diff --git a/.gitignore b/.gitignore
index 13cf148097e976bcd75db46f2ada28fcbd2cd064..8fd6d88b5ed5509ee059aa5605929ee9e78450d3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+*gcov
 **/*~
 **/*.bak
 **/*log
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e7c9c1ffabb4892e480c5248bf3ef28b65521520..2ede077a11af116101664d48f760b500198705c9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -48,11 +48,10 @@ set(CMAKE_CXX_FLAGS "-Wall -pedantic -Wextra -Wno-ignored-qualifiers")
 set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g")
 set(CMAKE_CXX_FLAGS_RELEASE "-O3 -g") # -O2 would not trade speed for size, neither O2/3 use fast-math
 set(CMAKE_Fortran_FLAGS "-std=legacy")
-if(COVERAGE)
-  set(CMAKE_CXX_FLAGS "--coverage")
-  set(CMAKE_EXE_LINKER_FLAGS "--coverage")
-  set(CMAKE_SHARED_LINKER_FLAGS "--coverage")
-endif()
+
+# setup coverage target
+set(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG} --coverage")
+set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS_DEBUG} --coverage")
 
 # clang produces a lot of unecessary warnings without this:
 add_compile_options("$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>>:-Wno-nonportable-include-path>")
diff --git a/ThirdParty/lcov/.version b/ThirdParty/lcov/.version
new file mode 100644
index 0000000000000000000000000000000000000000..ad1509c1e5fe761b2edd59aa9b46c4b9320e104f
--- /dev/null
+++ b/ThirdParty/lcov/.version
@@ -0,0 +1,3 @@
+VERSION=1.14
+RELEASE=1
+FULL=1.14
diff --git a/ThirdParty/lcov/CHANGES b/ThirdParty/lcov/CHANGES
new file mode 100644
index 0000000000000000000000000000000000000000..2e7086b994138249ee8f24b93a297015fc4fb245
--- /dev/null
+++ b/ThirdParty/lcov/CHANGES
@@ -0,0 +1,3454 @@
+commit 6c7ad581ab9ed66f80050970c0d559c6684613b7 (HEAD, tag: v1.14, origin/master, origin/HEAD, master)
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Feb 28 18:01:39 2019 +0100
+
+    lcov: Finalize release 1.14
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+commit 29814f18ec207ebaefa7b41f6e5acc4eca6d7a7a
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Feb 28 17:31:17 2019 +0100
+
+    geninfo: Fix missing FN: entries in result files
+    
+    geninfo sometimes fails to correctly collect function starting lines for
+    some source files, resulting in output files with missing FN: lines.
+    Also such functions are missing from the function list in HTML output.
+    
+    The problem occurs when
+      a) multiple source files contribute to a function implementation (e.g.
+         via including code), and
+      b) the source file that contains the initial function definition
+         is not the source file that contains the most function
+         definitions
+    
+    The problem occurs due to a heuristic in function graph_find_base() that
+    incorrectly determines the source file for a function in this situation.
+    
+    Fix this by using the first file that contributes to a function as the
+    base source file for that function. Only apply this change to data
+    collected using GCC versions 4 and above since earlier versions did not
+    produce stable file orders in graph files.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Reported-by: Joshua Cranmer
+
+commit 74bae96e8ef724eb9dbdf126adad17505375e149
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Feb 28 16:15:22 2019 +0100
+
+    Makefile: Make Perl path install-time configurable
+    
+    Add support for specifying the Perl interpreter path used in installed
+    Perl scripts. If no path is specified, the default '/usr/bin/perl' is
+    used.
+    
+    Set variable LCOV_PERL_PATH to specify a different path, for example:
+    
+      make install LCOV_PERL_PATH=/usr/local/bin/perl
+    
+    Unset this variable to keep the current path:
+    
+      make install LCOV_PERL_PATH=
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+commit 0b378cba2c0f93d728627aa8750849d3c33de0e1
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Feb 28 14:21:18 2019 +0100
+
+    bin,test: Use /usr/bin/env to locate script interpreters
+    
+    Make use of the /usr/bin/env tool to locate script interpreters. This is
+    needed to locate the correct interpreter in non-standard environments.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Suggested-by: Bjørn Forsman <bjorn.forsman@gmail.com>
+    Suggested-by: Mario Costa
+
+commit 2ff99aefbd0c80fe0cfddf1e09a596d7344533e1
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Feb 28 14:09:42 2019 +0100
+
+    bin/*: Remove '-w' from interpreter specifications
+    
+    Replace '-w' flag from Perl interpreter specifications with 'use strict'
+    directive. This is done in preparation of using a more flexible
+    interpreter specification.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+commit 3b378b0e76be95971680056d864d0e13f4a08557
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Wed Feb 27 16:33:42 2019 +0100
+
+    geninfo: Fix errors while resolving /././ path components
+    
+    Trying to collect code coverage data for source code that contains
+    repeat ./ references in a path components fails with the following
+    error message:
+    
+      geninfo: WARNING: cannot find an entry for <name>.gcov in .gcno file,
+               skipping file!
+    
+    This is caused by a bug in path normalization function
+    solve_relative_path() that does not correctly process adjacent ./
+    references.
+    
+    Fix this by repeating the resolution of ./ references in path
+    components.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Reported-by: Joshua Cranmer
+
+commit 42b55f5a497d2286566d0dd411e3e52fd4d50469
+Author: iignatyev <igor.v.ignatiev@gmail.com>
+Date:   Wed Feb 6 11:35:02 2019 -0800
+
+    geninfo: preserve-paths makes gcov to fail for long pathnames
+    
+    geninfo uses '--preserve-paths' gcov option whenever gcov supports it, this
+    forces gcov to use a whole pathname as a filename for .gcov files. So in cases
+    of quite large pathnames, gcov isn't able to create .gcov files and hence
+    geninfo can't get any data. The fix replaces usage '--preserve-paths' with
+    '--hash-filenames' when it is available.
+    
+    Signed-off-by: Igor Ignatev <igor.v.ignatiev@gmail.com>
+
+commit 04335632c371b5066e722298c9f8c6f11b210201
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Fri Jan 11 13:53:33 2019 +0100
+
+    geninfo: Fix "Can't use an undefined value" error
+    
+    When run on data for source code that causes gcc 8 to generate
+    artificial functions, geninfo emits warnings and eventually aborts
+    processing:
+    
+      geninfo: Use of uninitialized value in hash element at
+               /usr/local/bin/geninfo line 3001.
+      geninfo: Can't use an undefined value as an ARRAY reference at
+               /usr/local/bin/geninfo line 2889.
+    
+    This problem was introduced by commit 9aa0d14a ("geninfo: Ignore
+    artificial functions during --initial"). It is the result of an
+    incomplete removal of artificial functions from internal data.
+    
+    Fix this by explicitcly removing known artificial functions after
+    parsing of graph files completes.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Reported-by: Steven Peters <scpeters@osrfoundation.org>
+
+commit 9aa0d14af4446ef46d80356849a97bc961a91f97
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Jan 10 13:20:15 2019 +0100
+
+    geninfo: Ignore artificial functions during --initial
+    
+    Graph files generated by gcc 8 may contain "artifical" functions that do
+    not exist in a source file. geninfo incorrectly generates coverage data
+    for these functions when run with option --initial.
+    
+    Fix this by filtering out artifical functions when generating initial
+    coverage data.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Reported-by: Marcin Konarski <marcin.konarski@codestation.org>
+
+commit 1e0df571198229b4701100ce5f596cf1658ede4b
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Thu Jan 10 11:39:07 2019 +0100
+
+    geninfo: Fix data collection for files containing templates
+    
+    When using gcc 8, lcov/geninfo produces corrupt coverage output for
+    source code that contains templates or other constructs that cause gcov
+    to produce multiple versions of output for some lines and branches.
+    
+    This is caused by an incorrect check for duplicate output in function
+    read_gcov_file() that is triggered when a template consists of multiple
+    lines, or contains branches.
+    
+    Fix this by ensuring that duplicate lines in per-instance gcov output are
+    correctly ignored. Only the initial occurrence of each line containing
+    the combined coverage of all instances will be processed by geninfo.
+    
+    Note that for branch coverage, gcov doesn't provide a combined view and
+    geninfo processes all branches provided. This should not be a problem
+    though as genhtml will combine the branch data when generating HTML
+    output.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+commit abd8bed2b013334d4ef978abadbfff6cc6f3d55d
+Author: MarcoFalke <falke.marco@gmail.com>
+Date:   Tue Jan 8 12:49:00 2019 +0100
+
+    genhtml: Unconditionally include anchor for each named line
+    
+    This helps with referencing the line in the html when sharing links.
+    
+    Signed-off-by: MarcoFalke <falke.marco@gmail.com>
+
+commit 28675dc7564aaa1ad231a7ac23106512a3956d68
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Tue Dec 18 13:07:58 2018 +0100
+
+    genhtml: Use gmtime for SOURCE_DATE_EPOCH conversion
+    
+    By changing that localtime to gmtime the "Known bug" section of the
+    commit message can be removed.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Suggested-by: Bjørn Forsman <bjorn.forsman@gmail.com>
+
+commit 180286bec651928c41de4d6ce3a8760678b38f60
+Author: Bjørn Forsman <bjorn.forsman@gmail.com>
+Date:   Tue Dec 4 14:30:28 2018 +0100
+
+    genhtml: honor the SOURCE_DATE_EPOCH variable
+    
+    Implement the SOURCE_DATE_EPOCH specification[1] for reproducible
+    builds. If SOURCE_DATE_EPOCH is set, use it as timestamp instead of the
+    current time.
+    
+    In this context, reproducible builds means reproducible HTML coverage
+    reports.
+    
+    Known bug: the specification[1] says to defer converting the timestamp
+    to local timezone at presentation time. This is currently not happening;
+    it's converted at build time.
+    
+    [1] https://reproducible-builds.org/specs/source-date-epoch/
+    
+    Signed-off-by: Bjørn Forsman <bjorn.forsman@gmail.com>
+
+commit 41e07cadeeae3054ac22202d5b0b0f0ef6e26467
+Author: Bjørn Forsman <bjorn.forsman@gmail.com>
+Date:   Tue Dec 4 14:30:27 2018 +0100
+
+    Tolerate CDPATH being set
+    
+    If CDPATH is set, cd will print the path it enters, resulting in TOOLDIR
+    containing the path twice, separated by a newline.
+    
+    Signed-off-by: Bjørn Forsman <bjorn.forsman@gmail.com>
+
+commit a3bbe8f0398a3c36b4228cc173e4739d27a863e1
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Mon Dec 10 13:58:10 2018 +0100
+
+    CONTRIBUTING: Clarify patch format requirements
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+commit e6750800fe4cb89eda1ff80b7a5fe70fe87ede36
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Tue Nov 13 17:28:17 2018 +0100
+
+    geninfo: Fix accounting of basic blocks in exceptional paths
+    
+    Basic blocks that are not executed and are only reachable via
+    exceptional paths are marked with a '%%%%%' marker in the GCOV output of
+    current GCC versions. Fix geninfo to also recognize this marker.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+    Reported-by: trotux (github user)
+
+commit 94eac0ee870e58630d8052dca1181b0cf802525f
+Author: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date:   Mon Jul 16 13:24:58 2018 +0200
+
+    lcov: Fix branch coverage summary
+    
+    When combining two data files A (without branch coverage data) and B
+    (with branch coverage data), lcov will incorrectly report no branch
+    coverage data for the resulting file in program output, even though the
+    resulting file contains branch coverage data. This only happens when A
+    is specified first during the add operation.
+    
+    This is due to a bug in lcov that loses the correctly combined branch
+    coverage data internally in function brcount_db_combine() when its first
+    parameter is undefined. Fix this by ensuring that the first parameter is
+    an empty hash reference instead.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+commit a5dd9529f9232b8d901a4d6eb9ae54cae179e5b3
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Mar 7 14:18:55 2018 +0100
+
+    geninfo: Add gcc 8 support
+    
+    Fix errors and incorrect data when trying to collect coverage data
+    for programs compiled with gcc 8.
+    
+    Covers the following gcov-related changes in gcc:
+    
+    .gcov-file format:
+      - Line coverage data can appear multiple times for the same line
+      - Line coverage count can be suffixed by '*' to indicated unexecuted
+        basic blocks in that line
+    
+    .gcno-file format:
+      - new header field 'support unexecuted blocks flag'
+      - new function record fields 'column number', 'ending line number',
+        and 'compiler-generated entity flag'
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit c30d88a3a8096dbb3f968de999480c3dc2dedb5f
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue Jan 30 15:12:09 2018 +0100
+
+    genhtml: Implement option to show miss counts
+    
+    Add new command line option --missed that can be used to show the
+    number of missed lines, functions, or branches.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 999abf2447b4df373b135dc3f8ee317350bd95f8
+Author: Benoit Belley <Benoit.Belley@autodesk.com>
+Date:   Fri Oct 6 10:01:28 2017 -0400
+
+    Adding the --include and --exclude options to lcov and geninfo
+    
+    * The --include command-line option allows the user to specify a regular
+      expression for the source files to be included. The command-line
+      option can be repeated to specify multiple patterns. The coverage
+      information is only included for the source files matching at least
+      one of the patterns.
+    
+      The "lcov --capture --include" (or "geninfo --include") option is
+      similar in functionality to the "lcov --extract" command-line
+      option. But, by directly using applying the pattern while capturing
+      coverage data one can often avoid having to run "lcov --extract" as a
+      second pass.
+    
+    * The --exclude command-line option allows the user to specify a regular
+      expression for the source files to be excluded. The command-line
+      option can be repeated to specify multiple patterns. The coverage
+      information is excluded for source files matching at least one of the
+      patterns.
+    
+      The "lcov --capture --exclude" (or "geninfo --exclude") option is
+      similar in functionality to the "lcov --extract" command-line
+      option. But, by directly using applying the pattern while capturing
+      coverage data one can often avoid having to run "lcov --remove" as a
+      second pass.
+    
+    * On one of our code base at Autodesk, this speeds-up the generation of
+      HTML code coverage reports by a factor of 3X.
+    
+    Signed-off-by: Benoit Belley <benoit.belley@autodesk.com>
+
+commit b6a11368c3cdc86c4e147ccd8e539918dfe37900
+Author: Ziqian SUN (Zamir) <zsun@redhat.com>
+Date:   Wed Jul 19 10:58:24 2017 +0800
+
+    Resolve some rpmlint issue in SPEC.
+    
+    Following messages reported by rpmlint on RHEL is fixed by this patch:
+    lcov.src: W: invalid-license GPL
+    lcov.src:9: W: hardcoded-path-in-buildroot-tag
+    /var/tmp/%{name}-%{version}-root
+    lcov.src: E: specfile-error warning: bogus date in %changelog: Fri Oct 8
+    2002 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+    lcov.noarch: W: non-conffile-in-etc /etc/lcovrc
+    
+    Signed-off-by: Ziqian SUN (Zamir) <zsun@redhat.com>
+    [oberpar@linux.vnet.ibm.com: Corrected license to GPLv2+]
+
+commit a77a7628ef5377c525a0d4904cc0b73eeede4d7c
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Fri Apr 7 15:43:28 2017 +0200
+
+    genhtml: Reduce path resolution overhead
+    
+    Reduce overhead when reading coverage data files by consolidating
+    calls to Cwd:cwd().
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 526c5148ac0add40ef1224d2cdabdec73ce3f899
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Fri Apr 7 15:37:52 2017 +0200
+
+    genhtml: Reduce load times for complex coverage data files
+    
+    genhtml uses a significant amount of time loading coverage data files
+    containing complex branch coverage data (i.e. data with a large number
+    of branches per basic block). Most of this time is spent storing
+    branch coverage data in a vector-based data representation, with an
+    unnecessary amount of cross-checking being done for existing branch
+    data.
+    
+    Fix this by replacing the vector based data representation by two
+    separate representations, scalar for storage and hash for processing,
+    and by moving cross-checking out of the hot path. This results in a
+    significant speedup at the cost of a minor increase in memory usage.
+    
+    Test results for "make -C genhtml_output/ SIZE=large":
+    
+    Original:
+      6 tests executed, 6 passed, 0 failed, 0 skipped (time 768.4s, mem
+      893.8MB)
+    
+    Patched:
+      6 tests executed, 6 passed, 0 failed, 0 skipped (time 202.3s, mem
+      908.10MB)
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 0f07133f184af6670bdf1edf39fca9d2e90e9ad2
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Fri Apr 7 14:38:22 2017 +0200
+
+    test: Add self-tests for genhtml
+    
+    Add some tests for checking basic functionality of genhtml.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 544a6951db25679792bb0648006a897ea564d883
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Fri Apr 7 14:32:47 2017 +0200
+
+    genhtml: Ensure stable block order in branch output
+    
+    Sort order of basic blocks in output of branch coverage data. This
+    allows for a better comparison of output between test cases.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 477957fa4c6c104d5842911682ec17d6ad2d2980
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Thu Apr 6 12:28:11 2017 +0200
+
+    lcov: Reduce load times for complex coverage data files
+    
+    lcov uses a significant amount of time loading coverage data files
+    containing complex branch coverage data (i.e. data with a large number
+    of branches per basic block). Most of this time is spent storing
+    branch coverage data in a vector-based data representation, with an
+    unnecessary amount of cross-checking being done for existing branch
+    data.
+    
+    Fix this by replacing the vector based data representation by two
+    separate representations, scalar for storage and hash for processing,
+    and by moving cross-checking out of the hot path. This results in a
+    significant speedup at the cost of a minor increase in memory usage.
+    
+    Test results for "make test SIZE=large":
+    
+    Original:
+      17 tests executed, 17 passed, 0 failed, 0 skipped (time 1883.9s, mem
+      2459.0MB)
+    
+    Patched:
+      17 tests executed, 17 passed, 0 failed, 0 skipped (time 283.6s, mem
+      2544.2MB)
+    
+    Note that this fix only applies to the lcov tool. The same work is
+    necessary for genhtml.
+    
+    This approach was inspired by a patch by creich.3141592@gmail.com.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 3b397a3f3acdb62080e8366130758cb34703cfbf
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Thu Apr 6 09:01:36 2017 +0200
+
+    test: Improve test framework
+    
+    Various improvements to lcov's self-test framework:
+     - Add test case for lcov --diff
+     - Add new verbosity level
+     - Enable normalization of coverage data files from stdin
+     - Fix lcov_add_concatenated4 test name
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 53a6ce8ef604173b6de874a534a30121392d7cd0
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Thu Mar 30 15:42:56 2017 +0200
+
+    lcov: Add self-tests
+    
+    Add some tests for checking basic functionality of lcov. To run these
+    tests, type:
+    
+    	make test
+    
+    in either the top-level directory, or the test/ sub-directory.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 9753d5c0da107919537e91e504551e4ab3bccc2f
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Thu Mar 30 15:31:34 2017 +0200
+
+    lcov: Fix output on stderr for --summary
+    
+    Some functions of lcov erroneously print informational output to stderr
+    instead of stdout as expected. Fix this by inverting the "to_file" logic
+    in lcov to a "data_stdout" logic. Affected functions are --summary,
+    --reset and --list.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 25f5d38abad20eeaa407f62f53c3c00dfbbd0bf3
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Mar 6 09:51:00 2017 +0100
+
+    lcovrc.5: Add genhtml_demangle_cpp default and CLI reference
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 66db744a1d63c5d3b1dee2d8a2ce76e6e06c7255
+Author: Katsuhiko Nishimra <ktns.87@gmail.com>
+Date:   Fri Mar 3 17:47:48 2017 +0900
+
+    Support passing demangle-cpp option via lcovrc
+    
+    This patch allows users to passing the demangle-cpp option to genhtml
+    via lcovrc, alongside with CUI.
+    
+    Signed-off-by: Katsuhiko Nishimra <ktns.87@gmail.com>
+
+commit b6fb452addaa6a33dcb37c101879b8b5e1e0c34c (tag: v1.13)
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Dec 19 15:20:40 2016 +0100
+
+    lcov: Finalize release 1.13
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit daca8d9febe52ccf1976240a3b48ffc350dec902
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Dec 19 14:36:00 2016 +0100
+
+    geninfo: Fix 'unexpected end of file' error
+    
+    Use the compiler version as stored in the .gcno file to determine if
+    the file contains function records with split checksums. This fixes
+    the following problem that can occur when lcov is run using a gcov
+    tool of GCC version 4.7 and above on .gcno files compiled with a
+    version below 4.7:
+    
+      # lcov -c -d . -o test.info --initial
+      [...]
+      geninfo: ERROR: test.gcno: reached unexpected end of file
+    
+    Also add missing lcov version to --debug output.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit a90d50e97cb49ea712c94d91cdef1cc21a3c7986
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Dec 14 11:00:08 2016 +0100
+
+    lcov: Remove use of install -D option
+    
+    Some versions of the install tool don't support the -D option, causing
+    a 'make install' call to fail. Fix this by replacing the -D option with
+    two calls to install, first to create all target directory components,
+    then to install the actual files.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+    Reported-by: <deanyang@tencent.com>
+
+commit 6ec3f2398d22e605c1a8019541fb32d26d18044b
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Fri Oct 7 09:47:35 2016 +0200
+
+    genhtml: Fix warning with small genhtml_line_field_width
+    
+    On systems with Perl versions 5.21 and above, genhtml prints a warning
+    similar to the following during processing:
+    
+      genhtml: Negative repeat count does nothing at bin/genhtml line 3854,
+               <SOURCE_HANDLE> line 4.
+    
+    This is due to size calculations resulting in a negative number of
+    padding characters when genhtml_line_field_width is lower than the size
+    of the strings to pad (9). Fix this by disabling padding in these cases.
+    
+    Reported-by: xaizek@openmailbox.org
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit d7cc7591b3a7cc1ec95371d04e4fc46f10b3fd54
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue Oct 4 09:50:52 2016 +0200
+
+    geninfo: Fix gcov version detection for XCode 8.0
+    
+    The LLVM gcov version included in XCode 8.0 reports its version in a
+    format that is not understood by geninfo, resulting in the wrong format
+    of coverage data files being expected. Fix this by reworking gcov
+    version detection in geninfo to be more robust.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 68320d932c5ee5537ae1c287fe52603ae2fecf8c
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Aug 22 15:54:56 2016 +0200
+
+    lcov: Update installation mechanism
+    
+    Change default installation location to /usr/local to prevent
+    conflicts with files installed by package managers (reported by
+    Gregory Fong). To achieve this, rename PREFIX to DESTDIR and
+    introduce actual PREFIX Makefile variable and update spec file
+    to install packaged files to previous locations.
+    
+    Also fix spec file to not announce ownership of system directories
+    (reported by and based on patch by Jiri Kastner <jkastner@redhat.com>).
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 04a3c0ed1b4b9750b2ac5060aac0e6d5a3b9da7f
+Author: Benoit Belley <benoit.belley@autodesk.com>
+Date:   Mon Apr 4 18:16:54 2016 -0400
+
+    Pass --no-strip-underscore to c++filt on OS X
+    
+    * The --no-strip-underscope flag is necessary on OS X so that symbols
+      listed by gcov get demangled properly.
+    
+       From the c++filt man page: "On some systems, both the C and C++
+       compilers put an underscore in front of every name.  For example, the
+       C name "foo" gets the low-level name "_foo". This option tells c++filt
+       not to remove the initial underscore.  Whether c++filt removes the
+       underscore by default is target dependent."
+    
+    Signed-off-by: Benoit Belley <benoit.belley@autodesk.com>
+
+commit 632c25a0d1f5e4d2f4fd5b28ce7c8b86d388c91f
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue Mar 8 10:51:51 2016 +0100
+
+    lcov: Fix output files being created in / directory
+    
+    When a warning is emitted by lcov before creating an output file,
+    e.g. when a negative coverage count was found while combining
+    tracefiles, lcov tries to create the output file in the root
+    directory (/) instead of the current working directory.
+    
+    This is a result of lcov's warn handler calling a temp file cleanup
+    routine that changes directories to / before trying to remove its
+    temporary directory.
+    
+    Fix this by removing the temp cleanup call from the warn handler.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit e32aab1b4c85503a6592a91326c4b362613e1d66
+Author: Gabriel Laskar <gabriel@lse.epita.fr>
+Date:   Wed Feb 10 09:56:18 2016 +0100
+
+    lcov: Fix --remove pattern matching
+    
+    The --remove option of lcov now consider the pattern passed as parameter
+    as a full path, and not only a part of the filename.
+    
+    This behavior was discovered by using AX_CODE_COVERAGE[1] m4 macro from
+    a directory in $HOME/tmp. The macro itself calls lcov with
+    `--remove "/tmp/*"`.
+    
+    [1]: https://www.gnu.org/software/autoconf-archive/ax_code_coverage.html
+    
+    Signed-off-by: Gabriel Laskar <gabriel@lse.epita.fr>
+
+commit 79e9f281ea893b2f6498b4bad79173b1414aa055
+Author: Reiner Herrmann <reiner@reiner-h.de>
+Date:   Fri Oct 30 20:26:59 2015 +0100
+
+    lcov: use UTC to get timezone-independent date
+    
+    The date is used for updating the time inside manpages.
+    If localtime is used, the date could vary depending on the user's
+    timezone. To enable reproducible builds, UTC is used instead.
+    
+    Signed-off-by: Reiner Herrmann <reiner@reiner-h.de>
+
+commit de33f51b49dc6d01a285aa73990f03e7d982beb2 (tag: v1.12)
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Oct 5 17:37:40 2015 +0200
+
+    lcov: Finalize release 1.12
+    
+     - Use full git describe output as tool version
+     - Update version numbers and last-changed-dates in man pages,
+       spec and README file
+     - Replace static CHANGES file with git log
+     - Switch Makefile logic to use mktemp for generating a temporary
+       directory
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 1ad4f7779b7721e311e552209e110e08bbf18fa1
+Author: Denis Abramov <abramov.denis@gmail.com>
+Date:   Mon Sep 21 09:29:20 2015 +0200
+
+    geninfo: Added support for Xcode 7.0 gcov version handling
+    
+    With Xcode 7.0 LLVM gcov keeps version information on the first line.
+    E.g. gcov --version yields: Apple LLVM 7.0.0 (clang-700.0.65)
+    
+    Signed-off-by: Denis Abramov <abramov.denis@gmail.com>
+
+commit c3602ea8e598deda4afff603bb123caa98eef159
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Aug 3 11:05:51 2015 +0200
+
+    genhtml: Allow prefix paths with spaces
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit a3572971367198ef0febe476052640bd09bec931
+Author: Gilles Gouaillardet <gilles@rist.or.jp>
+Date:   Thu Jul 30 14:11:57 2015 +0900
+
+    genhtml: support a comma separated list of prefixes
+    
+    the --prefix option of genhtml now takes a comma separated list of prefixes
+    instead of a single prefix.
+    this can be required when running lcov vs projects configure'd with VPATH
+    and in which source files are both in the source and build directories.
+    
+    Signed-off-by: Gilles Gouaillardet <gilles@rist.or.jp>
+
+commit 997f32ae85717cd47d2305d7cd7ccce3ffa1abe6
+Author: Gilles Gouaillardet <gilles@rist.or.jp>
+Date:   Tue Jun 23 14:28:22 2015 +0900
+
+    Fix find command line
+    
+    find xxx -name \*.gcda -type f -o type l
+    does return :
+    - all files with the .gcda suffix
+    - all symbolic links
+    
+    the updated command line now returns
+    - all files with the .gcda suffix
+    - all symbolic links with the .gcda suffix
+    
+    Signed-off-by: Gilles Gouaillardet <gilles@rist.or.jp>
+
+commit 533db4e78b54ae01e023d00c1fec5dddaaaf37e6
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 17 17:54:20 2015 +0200
+
+    lcov: Fix capture for package files containing graph files
+    
+    Depending on whether package files contain graph files, data should be
+    collected from the unpacked package file directly, or from the build
+    directory after linking data files. This approach fixes problems when
+    capturing coverage data via a package from a directory containing graph
+    files.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit a2a8b376ec5e9e5082a0cbb935137d6a8f526870
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 17 17:34:33 2015 +0200
+
+    lcov: Fix .build_directory file not being deleted
+    
+    Using option --to-package while capturing coverage data creates a
+    temporary file named ".build_directory". Currently this file is not
+    properly removed at the end of processing due to a changed CWD. This
+    patch fixes this problem by reverting to the original CWD before trying
+    to remove the temporary file.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit b9de825f1fe018f381c8859ee0f3f4af15122c7a
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue Jun 16 13:53:00 2015 +0200
+
+    lcov: Enable userspace package capture with only data files
+    
+    Previously lcov's --from-package capture mechanism required
+    that .gcno files and source were present on the test machine.
+    
+    This patch modifies --from-package capturing to work when
+    only .gcda files are present in the package captured on the
+    test machine. It works by linking the .gcda files collected
+    on the test machine into their natural location on the build
+    machine. This requires existing .gcda files to be removed.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 0e4f0908aed3e1a071d5435c36c18cd493f0c309
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue Jun 16 13:33:54 2015 +0200
+
+    lcov: Make package handling more robust
+    
+    Apply some changes to --from-package and --to-package handling
+    to better handle failures:
+    
+     - Abort if tar tool is not available
+     - Abort if no data file is found in package file
+     - Ensure that temporary directories can be deleted
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit f87d980929a5a06d49d0a6856f6c3314418c27ef
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue May 12 17:28:44 2015 +0200
+
+    genhtml: Rework c++filt name demangling
+    
+    When running genhtml with command line option --demangle-cpp, do not
+    merge function call data based on demangled function names. Instead mark
+    duplicate function entries in the function view with a version suffix
+    (.<number>). This resolves problems with entries for functions that
+    demangle to the same name, but begin on different lines according to GCC
+    (e.g. virtual destructors).
+    
+    Reported-by: Lukasz Czajczyk <lukasz.czajczyk@gmail.com>
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 2e872175cbba2c09c9025da2660edf0b4abb55cb
+Author: Daniel Fahlgren <daniel@fahlgren.se>
+Date:   Wed Apr 22 15:17:10 2015 +0200
+
+    geninfo: make line exclusion markers configurable
+    
+    This patch exposes the variable $excl_line and $excl_br_line so they can
+    be set in the configuration file. It is not always possible to add the
+    exclusion markers to the code with reasons like third party code,
+    company policy, legacy code, no commit access etc.
+    
+    One obvious use case is to exclude assert() from the branch coverage and
+    abort() from line coverage. They are never meant to be triggered unless
+    something is wrong. Other use cases can be custom error handling macros
+    or macros that depend on endianness, like htons().
+    
+    Signed-off-by: Daniel Fahlgren <daniel@fahlgren.se>
+
+commit 10b11eaa178976d1433007adb2188d05b8605be6
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Nov 10 17:17:23 2014 +0100
+
+    geninfo: Ignore empty .gcno files with --initial
+    
+    Some versions of GCC create empty .gcno files which cause geninfo
+    to abort processing with an error message:
+    
+      geninfo: ERROR: dummy.gcno: reached unexpected end of file
+    
+    Fix this problem by skipping empty .gcno files.
+    
+    Reported-by: Maarten Hoes <hoes.maarten@gmail.com>
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit f9d8079646aa906518c4ab7d326504e6837532a7
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Mon Nov 10 16:54:08 2014 +0100
+
+    lcov: Fix warning when specifying --rc
+    
+    Current Perl versions report the following warning when using the --rc
+    option of lcov:
+    
+      lcov: Use of each() on hash after insertion without resetting hash
+      iterator results in undefined behavior
+    
+    Fix this warning by not modifying the hash variable that is being
+    iterated on. Also add the missing whitespace fix-up of --rc parameters
+    to genhtml.
+    
+    Reported-by: Maarten Hoes <hoes.maarten@gmail.com>
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 2a634f6caa98f979606189ec3ee98f4cac270b97
+Author: Philip Withnall <philip.withnall@collabora.com>
+Date:   Mon Nov 10 14:58:34 2014 +0000
+
+    genhtml: Support relative source filenames in SF keys
+    
+    Some tools which generate .info files generate relative filenames for
+    the ‘SF’ keys. For example, nodeunit’s lcov output does. When genhtml is
+    run with --output-directory, it calls chdir() which breaks relative
+    lookup of the source files. Fix that by resolving all source filenames
+    to absolute paths when loading an info file, resolving any relative ones
+    using the info file’s path as a base.
+    
+    Signed-off-by: Philip Withnall <philip.withnall@collabora.co.uk>
+
+commit b4344c6a5d3c434ca0d801c197a09cfdeecb3f32
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Fri Sep 26 13:11:18 2014 +0200
+
+    man: Add description for --precision and genhtml_precision
+    
+    Add man page sections for genhtml's command-line option --precision
+    and lcovrc configuration setting genhtml_precision. Also add an
+    example configuration setting in lcovrc.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit aa1217412f1e8b540010fea5ca9844b9e4699e54
+Author: Euccas Chen <euchen@qti.qualcomm.com>
+Date:   Fri Sep 26 12:53:29 2014 +0200
+
+    genhtml: Implement option to specify coverage rate precision
+    
+    Add command line support and config file support for specifying the
+    coverage rate precision, valid precision range: [1,4].
+    
+    Signed-off-by: Euccas Chen <euchen@qti.qualcomm.com>
+
+commit 4d4eba1a8b5e7d2a6c5e93c0a50264da1a5c5540
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 25 09:41:59 2014 +0200
+
+    get_version.sh: Remove - characters from release string
+    
+    Replace - with . in release strings to fix the following build
+    error in the dist Makefile target:
+    
+      error: line 4: Illegal char '-' in: Release: 4-g1d44b2a
+      make: *** [rpms] Error 1
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit ffbd3e08cc0871842b2205b0b73c2ae8f3ad02e8
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 25 09:25:50 2014 +0200
+
+    genhtml: Improve demangle error message
+    
+    Improve error message that is shown when there are mangled function name
+    entries on different lines that demangle to the same clear text function
+    name.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 1d44b2a090aa933b15e4cafc1a440ccb390df92e
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Tue Jun 24 17:45:34 2014 +0200
+
+    geninfo: Fix error when using --demangle-cpp
+    
+    Using genhtml's --demangle-cpp option on data produced with recent GCC
+    versions (at least 4.8 and 4.9) can result in an error message similar
+    to the following:
+    
+      genhtml: ERROR: Demangled function name _ZN3subD2Ev  maps to different
+               lines (5 vs 4)
+    
+    The reason for this error is an unexpected sequence of lines records
+    in a .gcno file. These records mention line numbers as belonging to a
+    function which occur before the initial line number of that function
+    as reported by the corresponding function record.
+    
+    Fix this problem by retaining the order of lines belonging to a function
+    as found in the .gcno file. This way geninfo will consistently use the
+    initial line number as reported by the function record when merging
+    function data during the demangling process.
+    
+    Reported-by: Alexandre Duret-Lutz <adl@lrde.epita.fr>
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 566e5ec7e69a03612e1ed4961779d939af180d66
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 18 16:05:29 2014 +0200
+
+    lcov: Remove unused files
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit c76172bfe630520e217ecc0bca8f18481c4c33b0
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 18 16:01:05 2014 +0200
+
+    README: Fix typo
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit a6b10a41056cd10c7b735e259fee81f1865c2109
+Author: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Date:   Wed Jun 18 15:50:04 2014 +0200
+
+    lcov: Remove CVS artifacts
+    
+    Replace CVS specifics in the build environment and tools source with
+    Git mechanisms:
+     * CONTRIBUTING and README file now refer to github for the primary
+       source location
+     * When run from a Git repository, the tools dynamically determine the
+       Git version using 'git describe'
+     * When installed into the file system, the version information is
+       fixed with the current Git version
+     * When preparing distribution files, the version at the time of
+       preparing the files is written to file ".version"
+    
+    Also add a .gitignore file to filter out the most frequent temporary
+    file types.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit fa2a991cf6fad37fec7650b95be705df143e058a (tag: v1.11)
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri May 23 08:56:17 2014 +0000
+
+    lcov: finalizing release 1.11
+
+commit e2729beea0d7769ef0e992c27a294b0742a6ac77
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri May 23 08:47:10 2014 +0000
+
+    CHANGES: update
+
+commit 866d187602bfc2e3a8199f4e9e9430ef38f106a8
+Author: Jeffrey Hutzelman <jhutz@cmu.edu>
+Date:   Tue May 20 14:12:55 2014 +0000
+
+    lcov: Sort branches in unnamed blocks first
+    
+    When processing branch coverage data, consider branches in "unnamed"
+    blocks to come before other blocks on the same line, so that they
+    appear in the correct order in HTML output.
+    
+    This is accomplished by using block number -1 for unnamed blocks,
+    instead of 9999 as was previously done.  In branch data vectors, this
+    is reprsented by the value $BR_VEC_MAX, which is defined to be the
+    largest value representable in the field width used.  This same value
+    is also used in .info files, for backward-compatibility with regular
+    expressions used to parse these files.  As a result, .info files
+    generated by versions of lcov with this change can be read by older
+    versions, though branch results will still appear out of order.
+    
+    Signed-off-by: Jeffrey Hutzelman <jhutz@cmu.edu>
+
+commit 17c0edec32193b9e8058908447d3eb403d76c8de
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu May 15 10:23:45 2014 +0000
+
+    lcov: Update man page
+    
+    Add missing description for command line parameter value.
+    
+    Reported-by: sylvestre@mozilla.com
+
+commit c0958139e015805cce15b60b740c735690ad4002
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Apr 14 12:14:55 2014 +0000
+
+    genhtml: Implement option to allow HTML in test description
+    
+    Add lcovrc directive genhtml_desc_html to allow using HTML markup in
+    test case description text.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 4f2c3aefcfcf816806da83a8609bd743eb227d37
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Apr 14 11:24:05 2014 +0000
+
+    genhtml: Check for proper description file format
+    
+    Ensure that description files contain test name lines before test
+    description lines. This fixes a "use of uninitialized value" warning
+    in genhtml.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 3a68239905c28a7c3bfac52172a254872d6a7aa7
+Author: Jonah Petri <jonah@petri.us>
+Date:   Mon Apr 14 11:06:21 2014 +0000
+
+    lcov: make geninfo compatible with LLVM's gcov
+    
+    These changes are needed to make geninfo compatible with LLVM's gcov:
+    * Use --version rather than -v to probe version info
+    * Convert LLVM gcov version numbers to the GCC gcov version they emulate
+    * Translate short options into their equivalent long option capabilities
+    
+    Signed-off-by: Jonah Petri <jonah@petri.us>
+
+commit a74bdeeae0383b197b1dafa44d01a54129fb3d7c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 8 13:26:04 2014 +0000
+
+    genhtml: Reduce hash copying while adding up files
+    
+    Reduce copying effort and memory usage. Based on similar patch for
+    lcov by olly@survex.com.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit c6b4d91fdf667cfca17213742e2e04f6281ebed4
+Author: Olly Betts <olly@survex.com>
+Date:   Wed Jan 8 13:14:05 2014 +0000
+
+    lcov: Avoiding copying hashes passed to add_counts function
+    
+    This patch reduces memory usage - without it lcov was failing every time
+    for me with out of memory errors in a VM with 1GB of RAM and 1GB of
+    swap, but with it lcov completes every time.
+    
+    It's presumably also faster to avoid these copies.
+    
+    Signed-off-by: Olly Betts <olly@survex.com>
+
+commit cf6f2e685510da62bd2eb1f386f71d57c41f4594
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Dec 13 16:09:05 2013 +0000
+
+    geninfo: Tolerate garbage at end of gcno file
+    
+    Some versions of gcc produce garbage at the end of a gcno file
+    when recompiling a source code file after removing some lines.
+    
+    This patch makes geninfo's gcno file parser more robust by assuming
+    end-of-file when it finds a record that extends beyond the end-of-file.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 14286b29d076208452da6021c792ebf43552ac2c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Dec 13 15:23:27 2013 +0000
+
+    geninfo: make gcov tool version detection more robust
+    
+    Don't consider gcov tool version information in parenthesis when
+    determining the gcov tool version. This fixes problems where the
+    version string contains a different version number in parenthesis
+    before the actual gcov version.
+    
+    Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+
+commit 0bde87338cd155af46804d77701c93ef263c3d53
+Author: Sebastian Stigler <s_stigler@gmx.de>
+Date:   Fri Dec 13 15:09:58 2013 +0000
+
+    geninfo: add exclude marker for branch coverage
+    
+    Sometimes it can be helpful to generally use branch coverage but to
+    disable it for some lines of code without excluding the line or function
+    coverage too.
+    
+    For example if you make heavily use of assertions in your code (which is
+    generally a good idea) you will see that for each 'assert(...)' exist
+    one branch which is taken and one that is not. Similarly you can see the
+    same phenomenon for 'delete' in C++ code.
+    
+    If you use the 'LCOV_EXCL_LINE' marker in such a situation both of these
+    branches will be omitted from the output. But in doing so, you loose the
+    ability to determine if this peace of code is genuine 'dead code' or not
+    because the line coverage is omitted too.
+    
+    The newly introduces 'LCOV_EXCL_BR_LINE', 'LCOV_EXCL_BR_START' and
+    'LCOV_EXCL_BR_STOP' marker address this problem. The usage is similar to
+    the 'LCOV_EXCL_LINE' etc. markers.
+    
+    Signed-off-by: Sebastian Stigler <s_stigler@gmx.de>
+
+commit 119be727596f567e83b03de384b4150b926911a3
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Dec 12 14:58:44 2013 +0000
+
+    geninfo: Fix handling of non-english locales
+    
+    geninfo expects gcov output in the default C locale. This isn't always
+    given, for example when running in an environment where variable
+    LANGUAGE is set to a non-english locale. In such cases gcov output
+    cannot be correctly parsed, resulting for example in the absence of
+    branch coverage data.
+    
+    gcov uses gettext() for writing internationalized messages. The info
+    page for gettext mentions the order in which locale-defining
+    environment variables are evaluated:
+    
+    LANGUAGE
+    LC_ALL
+    LC_MESSAGES
+    LANG
+    
+    In addition, gettext implements special handling where LC_ALL=C takes
+    precedence over LANGUAGE.
+    
+    geninfo currently only specifies LANG=C. Fix the issue by specifying
+    LC_ALL=C instead.
+    
+    Based on fix suggestion by Sebastian Stigler.
+    
+    Reported-by: Sebastian Stigler <s_stigler@gmx.de>
+
+commit 0f7bb3ebc8487b83ce9b7047c81a3655135876ea
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Dec 9 15:49:35 2013 +0000
+
+    lcov: Added contribution guidelines
+
+commit f83688fe27f133ef02e9ab47a435d6a5d2074932
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 2 07:29:20 2013 +0000
+
+    geninfo: fix --no-external not working with --initial
+    
+    When running lcov --capture --initial together with --no-external.
+    the --no-external has no effect. Fix this by applying the external
+    filtering also for graph files.
+    
+    Reported-by: malcolm.parsons@gmail.com
+
+commit 6a8a678046bd75aa81d30484b1817425022d71e5
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 1 11:49:46 2013 +0000
+
+    lcov: fix --config-file not being passed to geninfo
+    
+    Calling lcov to capture coverage data while specifying --config-file
+    will result in the configuration directives of that file not being
+    used during data collection.
+    
+    Fix this by ensuring that --config-file is passed on to geninfo.
+    
+    Reported-by: liuyhlinux@gmail.com
+
+commit c3be5b6859ef280b469b6b75cf4709fc35f91ced
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu May 2 11:02:24 2013 +0000
+
+    lcov: fix whitespace handling in --rc command line option
+    
+    Specifying blanks around --rc options results in the options not
+    being correctly recognized, for example:
+    
+    This doesn't work:
+    geninfo . -o - --rc="geninfo_adjust_src_path = /tmp => /usr"
+    
+    This works:
+    geninfo . -o - --rc="geninfo_adjust_src_path=/tmp => /usr"
+    
+    Fix this by automatically removing whitespaces at the start and end
+    of --rc options and values.
+
+commit 4699f8d391325335777ed234e388be2e2f87478c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Apr 12 07:51:34 2013 +0000
+
+    README: improve usage hint
+
+commit 36e0539737198ad1bee51103f47842f13c575239
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Mar 13 10:28:07 2013 +0000
+
+    genhtml: add time to date string
+    
+    Add the current time to the date information in the HTML output
+    generated by genhtml. This way users can differentiate results when
+    creating HTML output multiple times a day.
+    
+    Based on patch by sylvestre@debian.org.
+
+commit 38fbe93c8cd8402be8e4821825fdeeaa23e8367c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Feb 22 14:09:08 2013 +0000
+
+    geninfo: don't warn about missing .gcov files
+    
+    Newer versions of gcc remove .gcov files for source files that do
+    not contribute instrumented lines. Remove the
+    
+      WARNING: no data found for file.c
+    
+    warning that geninfo issues in this case.
+
+commit 29346542c30af221a2ffdfe097fbd858044b712a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Feb 1 11:44:03 2013 +0000
+
+    genhtml: fix handling of user-specified prefixes with trailing /
+    
+    A trailing / in a user-specified prefix is not correctly recognized.
+    Fix this by removing any number of trailing / in a user-specified
+    prefix. Reported by ahmed_osman@mentor.com.
+
+commit 5241e2afadca5f172bd0b8cafe61e20d2153f0bf
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 30 11:46:42 2013 +0000
+
+    lcov: fix bug when converting function data in --diff operation
+    
+    When a patch is applied to a tracefile using command line option --diff
+    and the patch changes the list of functions, the operation aborts with
+    the following error:
+    
+      lcov: Use of freed value in iteration at lcov line 3718.
+    
+    Fix by applying missing calls to keys() when iterating function data
+    hashes. Reported by Nasir.Amanullah@us.fujitsu.com.
+
+commit 9ce8d8cb4f978eb80fb88ecafd52e869fab75d8f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 25 16:30:25 2013 +0000
+
+    lcov/genhtml: fix outdated comment regarding data structure
+
+commit c85e73a36e3f8c4e7fab888ac1536bee94a6fe56
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 25 16:29:30 2013 +0000
+
+    genhtml: merge function data during demangling
+    
+    Merge function execution counts when multiple function names demangle
+    to the same name.
+
+commit 2dfafc99c1eccbb81066436845e06a868eb3c434
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 25 11:46:28 2013 +0000
+
+    genhtml: improve function table sorting
+    
+    In the function table view, the initial view should show the functions
+    sorted by execution count because - unlike with file names - the function
+    name is not a natural order for functions (the line number would be,
+    but that is not available). Also sort functions with the same execution
+    count alphabetically for a stable order.
+    
+    Base on a suggestion by paul.bignier@hotmail.fr.
+
+commit 331a29011709a27d2ec11c6cbd6ac51dfdaf70c6
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 23 16:52:06 2013 +0000
+
+    genhtml: consolidate calls to c++filt
+    
+    When using --demanglecpp, call c++filt only once instead of per
+    function. This approach can reduce the run-time for source files
+    with a lot of overloaded functions significantly. Based on idea
+    by olly@survex.com.
+
+commit 49b877160b1d28cd6c3d8332d5d47c9c74420070
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Jan 10 09:02:32 2013 +0000
+
+    geninfo: make empty data directories non-fatal
+    
+    Emit a warning when no data file is found in a data directory
+    to allow processing of additional directories.
+    
+    Based on suggestion by rich_drake@yahoo.com.
+
+commit 3836c162c2864ed180df7d80fa03c70d17102edc
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Nov 13 09:58:53 2012 +0000
+
+    geninfo: fix parsing of gcc 4.7 gcov format
+    
+    GCC 4.7 changes the gcov format for lines which can only be reached
+    by exceptions to "=====" instead of "#####". This results in the
+    following warning:
+    
+    geninfo: Argument "=====" isn't numeric in numeric gt (>) at geninfo
+    line 1281.
+    
+    Fix this by handling "=====" correctly.
+
+commit b932f94cc83c3df169c76689533336bba4de4dba
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Oct 10 09:14:17 2012 +0000
+
+    lcov.spec: back to CVS version
+
+commit 6af00fa26e1a91a39c873ff9fa6df7fb8830ec42
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Oct 10 09:12:42 2012 +0000
+
+    lcov.spec: fix Perl dependency
+
+commit 4eac16e93db328e86e44da40e3d5e96a0301d361
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Oct 10 08:36:16 2012 +0000
+
+    lcov: update CVS version to 1.11-pre1
+
+commit b5c1bdddd1380be3ad12952ed2747df3744e227e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Oct 10 08:20:21 2012 +0000
+
+    lcov: finalizing release 1.10
+
+commit 089861768a94d0f6e827539c828f19141092f529
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Oct 10 08:07:54 2012 +0000
+
+    CHANGES: update
+
+commit 9037de17458c5d9767d201bd0599d40347a9bc41
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Oct 10 08:07:01 2012 +0000
+
+    genhtml: handle source files in root directory gracefully
+
+commit 68dd0f19da0d8d6e82375e09b97f7ffc22847db4
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Oct 9 13:58:22 2012 +0000
+
+    geninfo: add automatic detection of base directory
+    
+    Add a heuristic to automatically determine the base directory
+    when collecting coverage data. This heuristic should cover many,
+    if not most cases of build systems moving files around during
+    compilation (e.g. libtool, automake, etc.). The heuristic can be
+    enabled or disabled using the configuration file directory
+    'geninfo_auto_base'.
+
+commit 631d2b11bfde56ffca4568382abf5d90653c4141
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Oct 8 15:03:23 2012 +0000
+
+    geninfo: fix missing line data after last commit
+
+commit b1e14c4a1a0f3ccaad0c665f439624cf4588a68d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Oct 8 13:02:45 2012 +0000
+
+    lcov: add missing help text for option --rc
+
+commit a432efff6ee8485ec0724aca4eae79a4c390a328
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 5 15:53:09 2012 +0000
+
+    lcov: updated CHANGES file and copyright years
+
+commit 897322ecdb858f18e4a12f4716bbb08c067b6c9c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 5 15:20:41 2012 +0000
+
+    geninfo: fix warning about unhandled .gcov files
+    
+    gcov will sometimes create .gcov files that contain no instrumented
+    line. When geninfo reads .gcno files it filters out such files,
+    resulting in the following warning:
+    
+    geninfo: WARNING: cannot find an entry for #path#to#file.gcov in
+             .gcno file, skipping file!
+    
+    Avoid this warning by not filtering out non-instrumented lines.
+
+commit 37d381ae99a66f59ea55d966f1da13a726d2efe8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 5 15:09:58 2012 +0000
+
+    genhtml: fix source path prefix calculation
+    
+    Fix the following problems of the algorithm used to identify an
+    optimal source path prefix:
+    - the last two path components (filename and first parent
+      directory) are ignored when trying to identify the optimal
+      prefix
+    - if a path prefix matches a longer path prefix, the weight
+      of the filenames associated with the latter is incorrectly
+      attributed to the former
+
+commit 263de2b40e21193ef8d11e899eb55aa52b17225d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 5 12:00:57 2012 +0000
+
+    lcov: set default for branch coverage data to disabled
+    
+    Collecting branch coverage data can significantly slow down
+    coverage data collection and processing of data files. Assuming
+    that most users are more interested in line/function coverage,
+    change defaults to not collect/process branch coverage data.
+    
+    Users can still override this default using lcov_branch_coverage=1
+    in the lcovrc file or command line option --rc lcov_branch_coverage=1
+
+commit 7e04a152683ff66e24b87f2125474c6765d4524b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 5 11:49:30 2012 +0000
+
+    geninfo: fix problems with adjust_src_path option
+    
+    Fix the following problems with adjust_src_path:
+    
+     * specifying --compat libtool=on and geninfo_adjust_src_path
+       unexpectedly sets --compat libtool=off
+     * path components that are assembled from sub-directory names are
+       not correctly adjusted
+
+commit 74e4296b6e2a0b0f164c6828c28cc82449344f08
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 5 08:23:06 2012 +0000
+
+    lcov: add setting to disable function and branch coverage
+    
+    Add two new configuration file settings:
+    
+     * lcov_function_coverage and
+     * lcov_branch_coverage
+    
+    When set to zero, lcov will skip the corresponding coverage data
+    type from being collected or processed, resulting in reduced
+    memory and CPU time consumption and smaller data files.
+
+commit 37bc1a1a5f721c6b88fff4c63121c1cbb794c14f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Oct 2 14:29:57 2012 +0000
+
+    lcovrc: clarify meaning of geninfo_external in man page
+
+commit fc4b9e21efe8f3409d9b0b90cfe7a3e8bc59a74c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Oct 2 09:12:38 2012 +0000
+
+    geninfo: fix processing of pre-3.3 gcov files
+    
+    When trying to collect coverage data for programs compiled with
+    GCC versions prior to 3.3, geninfo skips each data file with the
+    following warning:
+    
+    geninfo: WARNING: cannot find an entry for test.c.gcov in .bb file,
+    skipping file!
+    
+    Fix this by deriving the source code filename from the gcov filename
+    in case the gcov files do not follow the GCC 3.3 format.
+    
+    Reported-by: georgysebastian@gmail.com
+
+commit d1014dfcabfee2f305278a14ec8e5343e3889139
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 24 11:43:18 2012 +0000
+
+    lcov: fix problems with --rc option
+    
+    Fix error "Invalid option linkage for \"rc=s%\"" when running lcov
+    with an older version of the Getopt::Long module. Also pass --rc
+    options through lcov to geninfo.
+
+commit a9f08b79e2e7ec2b4a5c9ad27a077df8dfb46890
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jul 24 15:41:38 2012 +0000
+
+    geninfo: implement rc option geninfo_adjust_src_path
+    
+    Provide a new lcovrc file option geninfo_adjust_src_path that users
+    can use to change incorrect source paths.
+    
+    Inspired by patch by ammon.riley@gmail.com.
+
+commit 108f805788590defda99fdf252bfb71cb749f31e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Jul 19 13:12:35 2012 +0000
+
+    lcov: implement command line option --rc
+    
+    Users can now use command line option --rc to override configuration
+    file directives.
+
+commit eeeeeca74706e88a9b8ecfef2bb3451957512e20
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 18 12:56:21 2012 +0000
+
+    lcovrc: add description for geninfo_compat setting
+
+commit f842e46149b48ff316e80f68f630bf94085e4d19
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 18 12:40:56 2012 +0000
+
+    lcov: improve --compat description
+
+commit 392a690ba31092857f7d21d0008783d87954ebce
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 18 12:13:00 2012 +0000
+
+    lcov: add missing documentation for --compat option
+    
+    Add missing sections in the geninfo and lcov man-pages for the
+    newly introduced command line option --compat. Also set the
+    default value for the hammer compatibility mode to 'auto' to
+    keep the behavior of previous releases.
+
+commit 691cab3e3aaebc295c2cfe91c43c6a7c48f1ec2b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 18 10:40:12 2012 +0000
+
+    lcov: fix extra closing parenthesis in comment
+
+commit cef6f0ff8baa9b2b3dfb437463e7a88d3380b555
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jul 17 11:37:13 2012 +0000
+
+    lcov: make 0%/100% exclusive to no/full coverage rate
+    
+    Ensure that coverage rates 0% and 100% are only used when no or all
+    lines/functions/branches are hit respectively. This approach is
+    implemented to allow better identification of boundary cases, and
+    to be in accordance with the behavior of the gcov tool.
+    
+    Based on suggestions by: Paul.Zimmermann@loria.fr and
+    vincent@vinc17.net
+
+commit 9cec8f7e332258c9128f1c53d61acb9f0bc17085
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 11 14:09:27 2012 +0000
+
+    geninfo: more improvements to the .gcno format auto-detection
+    
+    Suggestions by garnold@google.com:
+    - rename command line setting
+    - simplify logic
+
+commit 0bbca3bd0c1ad3e3d3fd0b6eebfc3afbbc212a85
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jul 6 14:29:27 2012 +0000
+
+    geninfo: rename compatibility setting to compatibility mode
+
+commit f30fb978662996e29517c733218292a91f5fd12b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jul 6 09:03:27 2012 +0000
+
+    geninfo: improve detection of gcc 4.7 function records
+    
+    Suggestions by garnold@google.com:
+    - perform detection only once
+    - add warning in case detection is off but overlong strings are found
+    
+    Misc:
+    - add help text for --compat
+    - isolate detection heuristic into separate function
+    - rename corresponding compatibility setting to "gcc_4_7"
+    - allow "android_4_4_0" as alias for "gcc_4_7"
+
+commit 01321c3f170e5d24ffb3bb998441c99f5b775836
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 4 16:06:10 2012 +0000
+
+    geninfo: enable auto-detection of gcc-4.7 function record format
+    
+    gcc-4.7 introduced a modified function record format. This format
+    is in use by android toolchains and has also been ported to some
+    pre-4.7 versions of gcc. Introduce a heuristic-based auto-detection
+    to correctly handle .gcno files in these cases.
+
+commit d929600a0e2133168085e5ddea7ee832afd902b7
+Author: Martin Hopfeld <martin.hopfeld@sse-erfurt.de>
+Date:   Fri Jun 8 14:19:49 2012 +0000
+
+    geninfo: Make geninfo work more reliably on MSYS
+    
+    Using the lcov tools on Win7 with MSYS and MinGW 4.5.1/4.5.2
+    raised some issues for us:
+    
+    geninfo created in the for one source file in the 'SF:' line
+    paths starting with a lowercase drive letter and sometimes
+    starting with uppercase drive letters.
+    
+    This lead to inaccurate coverage results on the MSYS platform.
+    
+    This patch fixes this issue.
+
+commit 5b2751854aa19e6443fdc5fecc139595988d1e99
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon May 7 16:04:49 2012 +0000
+
+    lcov: add perl version dependency to RPM spec file
+    
+    lcov CVS (1.10 pre) seems to be broken on MSYS with perl 5.6.1.
+    The issue is the following:
+    
+      genhtml: Unknown open() mode '>>&' at /usr/bin/genhtml line 5512.
+    
+      $> perl genhtml --version
+      genhtml: LCOV version 1.10 pre (CVS 1.58)
+    
+      $> perl --version
+      This is perl, v5.6.1 built for msys
+    
+    Fortunately perl v5.8.8 is available for MSYS and genhtml works like a
+    charm with that 'new' version.
+    
+    Reported-by: Martin Hopfeld <martin.hopfeld@sse-erfurt.de>
+
+commit 83957a145d243cad0f8060e4a9ccc6cb8ed8fc09
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Apr 10 11:48:52 2012 +0000
+
+    geninfo: add support for gcc 4.7 .gcno file format
+    
+    Based on patch by berrange@redhat.com.
+
+commit 91c91dbc63d1e880d106919300c2fb37737697b0
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 20 11:53:57 2012 +0000
+
+    lcov: add new command line option --compat
+    
+    Add new option to lcov and geninfo to specify compatibility settings.
+    
+    Supported settings:
+      libtool: same as --compat-libtool
+      hammer: gcc3.3 hammer patch compatibility
+      android_4_4_0: android toolchain 4_4_0 compatibility
+
+commit 9588355790a302da680eff2f664058f78439a03e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 11 08:29:21 2011 +0000
+
+    lcov: fix problem with Objective-C functions
+    
+    Fix geninfo not recognizing function entries for Objective-C functions.
+    
+    Based on patch by abrahamh@web.de:
+    current version of lcov unfortunately not support Objective-C files.
+    In details the count of tested function is zero always and the annotated
+    lines have an offset by one if the Objective-C method have one ore more
+    arguments.
+
+commit e1acd78d1e88fe51aad96badf32555c470ee029b
+Author: Martin Hopfeld <martin.hopfeld@sse-erfurt.de>
+Date:   Mon May 23 08:03:13 2011 +0000
+
+    geninfo: Make geninfo handle MinGW output on MSYS.
+    
+    This patch converts path mixtures from MinGW when running on MSYS to
+    correct MSYS paths.
+    
+    In solve_relative_path() an additional conversion step will be inserted
+    when running on MSYS. This will extract the drive letter and convert the
+    remaining path from Windows pathnames to Unix Paths, which are used by
+    MSYS.
+    
+    Additionally, if no drive letter is found, the (relative) path is
+    converted to Unix style. There may be the case where Windows and Unix
+    path separators are intermixed within one path string.
+
+commit ed161e3db5cd5a7c6c8b2113930c729f001cdd4e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Dec 16 08:11:22 2010 +0000
+
+    genpng: handle empty source files
+    
+    Generating an overview PNG image for an empty source code file fails.
+    Handle this case by assuming a single empty line when run for an empty
+    source code file.
+    
+    Reported by: sylvestre@debian.org
+
+commit 95e2c5c337d281b4e88144d95d29bbec183c8728
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Dec 7 08:40:09 2010 +0000
+
+    genhtml: add note to further explain branch coverage output
+
+commit b1c66916151dd4b20998c79f81edf174659ebb14
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Dec 7 08:29:45 2010 +0000
+
+    genhtml: fixed incorrect description of default coverage rates
+
+commit 1994be7d8ed472772b884063af74235f2f25ab39
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Nov 19 16:33:25 2010 +0000
+
+    geninfo: add missing man page sections
+    
+    Add sections describing options --derive-func-data and --no-markers to
+    the geninfo man page.
+
+commit 01a393ef76092e43ebd2d8bf7892ebf375481a84
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Nov 19 16:15:27 2010 +0000
+
+    geninfo: remove help text for unimplemented parameter
+    
+    Parameter --function-coverage was removed but the help text still
+    mentions it. Fix this by removing the option from the help text as
+    well.
+
+commit b92f99d9db0af131080c462300dc9baf292a8ff6
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Nov 19 16:00:22 2010 +0000
+
+    genhtml: handle special characters in file and directory names
+    
+    HTML special characters (e.g. '<') found in file or directory names are
+    not correctly shown in HTML output. Fix this by correctly escaping such
+    characters.
+
+commit 17e158d4569d25218e79901e2d8cd03bfc7752fc
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Nov 19 15:45:01 2010 +0000
+
+    gendesc/genhtml/geninfo/genpng/lcov: handle '<' in filenames
+    
+    Use 3-arg open mode to prevent that a special character (e.g. '<')
+    found in a user-specified filename interfers with the required open
+    mode for that file.
+
+commit b87e40e475c560bdc88206df4de6dc8cf094d91f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Nov 19 15:11:53 2010 +0000
+
+    geninfo: ignore <built-in>.gcov files
+    
+    The gcov tool will sometimes create a file <built-in>.gcov for code
+    which was added by gcc itself during compilation. Since there isn't
+    any source available for such code, geninfo will fail. Fix this
+    by skipping these files while capturing code coverage data.
+
+commit 398d8f385423927b5675c1429f58c67b6a89a1a8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Oct 28 14:17:57 2010 +0000
+
+    geninfo: add function comment
+    
+    Add comment explaining data structures used by function derive_data.
+
+commit f5c2072e0e7195d35455db50705884e7f6c5fbe5
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Oct 28 14:16:34 2010 +0000
+
+    geninfo: apply exclusion marker to derived function data
+    
+    When option --derive-func-data is used together with exclusion markers,
+    function data for excluded lines is still included. Fix this by
+    only deriving function data for lines which are instrumented and not
+    excluded.
+    
+    Reported by: bettse@gmail.com
+
+commit 82280b8a5a78e8a147c333c8850a556729d9d96d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Aug 31 08:19:03 2010 +0000
+
+    geninfo: improve --debug output
+
+commit 6375a03010cb1bb22490b9d19a176188940e2f8b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Aug 31 08:17:23 2010 +0000
+
+    gcov: add configuration file option to not use gcov's -a option
+    
+    lcov calls gcov while specifying its --all-blocks option to get more
+    detailed branch coverage data per line. It turns out that this option
+    is broken on many versions of gcov, resulting in an endless loop while
+    processing some gcov data files. There's also a slight performance
+    penalty when specifying -a.
+    
+    lcov users can opt to not use the -a option by setting configuration
+    option geninfo_gcov_all_blocks to 0 in the lcovrc file.
+
+commit 7706fb73ebef8060fbbd92c0e08b5d68a2cd284e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Aug 24 16:15:53 2010 +0000
+
+    lcov: add option to specify a configuration file
+    
+    Provide an option for users to specify a configuration file to lcov.
+    This option may be useful when there is a need to run several instances
+    of a tool with different configuration file options in parallel.
+
+commit a404dafc2da12608a936afeb095d68410fa49b0a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Aug 23 16:14:37 2010 +0000
+
+    lcov: add option to display summary coverage information
+    
+    Provide an option for users to determine the summary coverage
+    information of one or more tracefiles. Example output:
+    
+    Summary coverage rate:
+      lines......: 26.0% (78132 of 300355 lines)
+      functions..: 34.9% (8413 of 24081 functions)
+      branches...: 16.9% (32610 of 193495 branches)
+
+commit 526b5b6a43f2b29f11eb02c1dd8f645293d8c295
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Aug 23 14:47:43 2010 +0000
+
+    lcov: add option to exclude external files
+    
+    Implement an option for users to specify that external source files
+    should be excluded when capturing coverage data. External source files
+    are files which are not located in the directories specified by the
+    --directory and --base-directory options of lcov/geninfo.
+
+commit c2255a0344648dc6eaef0189c53f345fdc70ed4e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 20 14:58:48 2010 +0000
+
+    lcov: pass --no-recursion to geninfo
+    
+    When specifying --no-recursion, make sure that the option is also passed
+    to the helper tool geninfo.
+
+commit 83543f3d21b5a5496b57c8d73e8e9c1819f82f34
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 20 14:31:59 2010 +0000
+
+    genhtml: fix HTML page title for directory pages
+
+commit b77df8ef1a69de3809e0b0bfa5cbbe5a84f313ae
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 20 14:27:19 2010 +0000
+
+    genhtml: make HTML charset specification customizable
+    
+    Provide a configuration file setting to adjust the charset specification
+    used by all generated HTML pages. Also change the default charset to
+    UTF-8.
+
+commit 1ff260462a67c440dc709d34c1fadf7d64760120
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 20 13:14:50 2010 +0000
+
+    lcov: follow Perl naming guidelines
+
+commit f637eb8c6ecb793b64eeb6bea57c6be8501d1484
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 20 13:08:25 2010 +0000
+
+    genhtml: add --ignore-errors option
+    
+    Provide a means for users to specify that genhtml should not abort if
+    it cannot read a source code file. Also make handling of --ignore-errors
+    parameter consistent accross lcov, geninfo and genhtml.
+
+commit 617bced393d5bb97e3409ec140768d9c8a2e2bfb
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 6 11:25:12 2010 +0000
+
+    lcov: update CVS version to 1.10
+
+commit 4dcb4f0ed014ca0f49859ef84fc9ced650f6deb8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 6 11:14:38 2010 +0000
+
+    lcov: finalizing release 1.9
+
+commit 594779e047eed2f534905ac40912969955d3797f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 5 16:17:44 2010 +0000
+
+    lcov: update CHANGES file in preparation of new release
+
+commit fbbd9034e7a4ea4bc59342b22bfbe9612dd4bdb8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 5 15:11:56 2010 +0000
+
+    lcov: introduce configuration file parameters for list output
+    
+    Make some aspects of list output customizable via configuration
+    file parameters. Also introduce special handling, if the root
+    directory is chosen as prefix.
+
+commit c6e783c1a1d3fb6db7419af95f9e2dcb89836fe9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 5 14:07:35 2010 +0000
+
+    lcov: switch coverage rate and number columns in list view
+    
+    To be more consistent with the order of output in the "Overall
+    coverage rate" case, rates are now shown first in the list output.
+
+commit 3c87b66c68c2e06811c9be479c6813cb409e5461
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 5 11:22:12 2010 +0000
+
+    lcov: fix display of total line coverage rate in list view
+
+commit 3cb6bc4ae0ef34aa63931d63f659f1ef43804c77
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Aug 4 16:15:19 2010 +0000
+
+    lcov: more lcov --list improvement
+    
+    Further improve list output to increase readability.
+
+commit dd98ff68ad143b985a728fc585c86d69e6027bd8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 28 14:49:47 2010 +0000
+
+    lcov: minor list improvement
+
+commit d4778c75ce8cf3c9d44607b6fd0e385db71126dd
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 28 14:48:25 2010 +0000
+
+    geninfo: remove unneeded functions
+
+commit 65a15afef3430c49c9c7c0d151cc2afec5fc83cc
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 28 14:19:09 2010 +0000
+
+    geninfo: print note on branch coverage data only once
+
+commit bd8ab633298ec27acf5f7db4b2cc4766baf1f153
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 28 14:17:59 2010 +0000
+
+    geninfo: remove incorrect overall coverage rate calculation
+    
+    geninfo output showing the overall coverage rate of its current
+    operation is incorrect since it may count lines, functions and
+    branches for included files multiple times. Remove the output
+    and associated code until a fixed version is available.
+
+commit 8c54de96a1326b7ee0632773816c52eda43393e8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 28 13:56:26 2010 +0000
+
+    lcov: more list output fixes
+
+commit 7e5fa9900d991320677c381db747c764495b2cc2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 28 13:52:01 2010 +0000
+
+    lcov: fix list output
+    
+    Fix list output for directories with short filenames.
+
+commit badd4790c70bd8ef8b991a9d56d0e062b28006a8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 26 13:33:18 2010 +0000
+
+    lcov: fix problem when using --initial and --kernel-directory
+    
+    Fix a problem in lcov that resulted in --kernel-directory options
+    being ignored when specifying --initial at the same time.
+    
+    Reported by hjia@redhat.com.
+
+commit a06c2038babb2f6d3e0a634cd298b0434041f834
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 19 16:06:15 2010 +0000
+
+    genhtml: change wording for branches which were not executed
+    
+    Since gcov sometimes reports both branches which were never executed
+    as well as branches which were executed in a single block, the wording
+    of the HTML alt text needs to be adjusted accordingly.
+
+commit e6b2491823ffd84c85406145031646af675170ee
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 19 15:50:02 2010 +0000
+
+    geninfo: handle branches in unnamed blocks
+    
+    gcov will sometimes report branches outside of a block. In that case,
+    account these branches to a special block so that they are not
+    accidentally merged with subsequently reported blocks.
+
+commit d6c82edf2117ce8b6232c998baf06c7a87269081
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 19 15:23:10 2010 +0000
+
+    genhtml: fix branch formatting code
+    
+    Fix the vertical alignment of the HTML representation of branches in
+    the source code view.
+
+commit 44ac74a47e25064ad1b421f65a28d057fdb9925d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 19 14:27:08 2010 +0000
+
+    lcov: improve list output
+    
+    Improve list output by separating directory and file names. Also provide
+    an option to show full path names.
+
+commit 0ab6f7507f3c4f074bec41e571ff1afbeb943185
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 19 12:12:43 2010 +0000
+
+    genhtml: fix large numbers being shown as negative in html output
+    
+    genhtml uses a "%d" format string for printing execution counts. For
+    counts exceeding integer range, the output becomes negative. Fix this
+    by using the "%.0f" format string instead.
+    
+    Reported by kkyriako@yahoo.com.
+
+commit bbf0ef40a51dd716c544f91576cffde7986bb6ec
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jun 7 12:22:18 2010 +0000
+
+    geninfo: ensure that exclusion markers apply to --initial
+    
+    Fix a problem where exclusion markers are ignored when gathering
+    initial coverage data.
+    
+    Problem was reported by ahmed_osman@mentor.com.
+
+commit b371fc59fa52f7176f62f382457fba498f39f4b2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jun 1 13:48:29 2010 +0000
+
+    lcov: fix problem with relative path names
+    
+    Fix a problem where coverage data is missing because gcov produces
+    output files starting with a dot.
+    
+    Problem reported by weston_schmidt@open-roadster.com.
+
+commit 93c70ddd0edbc2b0addf9d135dfd76871cc7a160
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Sun Feb 28 20:57:37 2010 +0000
+
+    lcov: fix problem with relative paths in build paths
+    
+    When binaries are built using relative paths, lcov cannot find any
+    coverage data. Instead, warnings similar to the following are printed:
+    
+    geninfo: WARNING: cannot find an entry for ^#src#test.c.gcov in .gcno
+    file, skipping file!
+    
+    The reason for this is that File::Spec::rel2abs does not remove ../ from
+    paths which results in lcov not being able to match the relative and
+    absolute versions of the corresponding filenames. Fix this by using the
+    internal function solve_relative_path instead.
+
+commit fad24a75cc69364d002d40e4fb75736b0efbdb37
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Sun Feb 21 14:57:52 2010 +0000
+
+    geninfo: write all debugging output to STDERR
+
+commit c0943385fa0acb927f63f9f78c9aeaebe3a8ece1
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Sun Feb 21 14:56:46 2010 +0000
+
+    geninfo: fix problem with some .gcno files
+    
+    Some .gcno files contain more data in a line record than
+    expected. Skip unhandled bytes of a .gcno file record.
+    This prevents the following unexpected error message:
+    
+    geninfo: ERROR: file.gcno: reached unexpected end of file
+
+commit 4b9ee7598e991b503425148eb43a35de2702aded
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Sun Feb 7 13:07:09 2010 +0000
+
+    lcov: add COPYING file
+
+commit de0e43a098ade45d6624ea43a53e6fad9a176469
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 11:07:25 2010 +0000
+
+    lcov: update CVS version to 1.9
+
+commit 4a33269fa3a73ea2577f7616d90bd3f1d14ae460
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 10:09:53 2010 +0000
+
+    lcov: finalizing release 1.8
+
+commit 310ffb28d8847f96e02b5a5db3d16bdcb406a876
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 10:01:35 2010 +0000
+
+    lcov: updated CHANGES file
+
+commit 9e12808e6108e05dca42b5e682bd8be121f3608d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 09:21:22 2010 +0000
+
+    genhtml: use sans-serif font for function table
+
+commit 71baabb6a1c15228213f8b25359346ee202300ce
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 09:12:55 2010 +0000
+
+    lcov: improve list output
+
+commit cc61a28dbc3c46ac84340141fafbfa559e1bf318
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 08:56:19 2010 +0000
+
+    lcov: fix overall rate display for tracefiles with more than one testcase
+
+commit b89028529db5110b3b76d117df788768a593d7dd
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 29 08:44:47 2010 +0000
+
+    lcov/genhtml: fix warning while merging branch data
+
+commit b7c69f31d9b1bfbd4bfc0fcb880cb8e514bcdb3f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Jan 28 15:59:23 2010 +0000
+
+    lcov: fix branch coverage related issues
+    
+    - warnings when combining certain combinations of branch data
+    - branches are not merged correctly when multiple input files are specified
+      to genhtml or when lcov -a is used
+
+commit 817875459df122fa3536a5e57c05ddfae19a089e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 27 16:37:50 2010 +0000
+
+    gendesc: fix problem with single word descriptions
+
+commit 33f60f48747b5ba12a6fdfb505bb662c922496bd
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 27 12:10:04 2010 +0000
+
+    lcov: remove temporary files when creating a package
+
+commit 6775457cbd3fa86acba4655d77b4ba2054b13253
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 27 12:00:05 2010 +0000
+
+    lcov: correctly retain information about converted test data
+
+commit f4d13eccc54f31a53ad109c3c4b86e4b52d6dfcb
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 27 10:17:43 2010 +0000
+
+    lcov. fixed overview output for function data
+
+commit aa00c65b7514c93320c1c787b848c8277593dcb0
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jan 26 09:36:19 2010 +0000
+
+    genhtml: don't use too much gcc-specific terms (basic block -> block)
+
+commit 3562f60b9500d8ad167c4629e9d95485308aa665
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:17:37 2010 +0000
+
+    lcov: consolidate coverage rate classification limits
+    
+    Classifying coverage rates per coverage type (line, function or branch
+    coverage) is not useful in most cases. Also the respective
+    color legend takes up too much space in the HTML output. Remove
+    function and branch coverage rates from the documentation and from
+    the color legend. Instead the original limits will be applied to those
+    coverage types as well. The per type rates can still be used if required
+    but it is recommended to only use one rate set.
+
+commit d77dc6a0adf259e322ac9f35c93241d446269a5b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:16:47 2010 +0000
+
+    lcov: minor code cleanup
+    
+    - remove unused function definitions and declarations
+    - remove unused CSS declarations
+    - add missing function declarations
+    - fix function prototypes
+
+commit b3243d1fdc17571ca9b1ed6a1ea975a9b3f1b86b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:16:15 2010 +0000
+
+    geninfo: consolidate similar functions
+
+commit 739e2bca054c69975594c2570049e8aa9ae1b5ce
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:15:35 2010 +0000
+
+    lcov: add coverage result output to more operations
+
+commit 0a31d3c0696015c5e4878e821529eba45451c3dd
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:14:57 2010 +0000
+
+    lcov: minor cosmetic HTML changes
+    
+    - top level view is now named "top-level"
+    - use sans-serif font for coverage values in file list
+    - use smaller font for show/hide details link
+    - use smaller font for function/source view link
+    - use smaller font for show descriptions link
+
+commit b631fa0cb9aabdf18f9365423f0b0bf85d6b8e16
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:14:27 2010 +0000
+
+    lcov: improve color legend
+    
+    Move color legend closer to the table containing coverage rates.
+
+commit 2aeeeafb31c36ccd1a51051f040e29a9fcf59df2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:13:58 2010 +0000
+
+    lcov: implement branch coverage
+
+commit 49dfe22f41b6c3edcb774dfb89b1a807ce7aee6c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:13:34 2010 +0000
+
+    genhtml: implement branch coverage
+
+commit 6aa2422401bb854c9710f5ed2936f06e487848c5
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:13:07 2010 +0000
+
+    geninfo: implement branch coverage
+
+commit ca2c9781b0a512bd6789eac2b6840405e2d87330
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 22 16:12:27 2010 +0000
+
+    geninfo: consolidate handling of extra gcov parameters
+
+commit 9d9c964eb6ece00b15ef068f176c68cb0eedfda0
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Jan 21 11:26:34 2010 +0000
+
+    lcov: minor fix for lcov --diff
+
+commit 4306f81d1e8446a89fe83d20cd71abe075a3cd61
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Jan 21 10:23:35 2010 +0000
+
+    lcov: improve lcov --list output
+
+commit 3242ce1bae94cfd859c3bc964fab11f85bd7d1ed
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 20 17:13:28 2010 +0000
+
+    lcov: unify data order in tracefiles
+
+commit 8f53b2e8dbbe5580050fbe0c604bd9a9322735a7
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 20 16:05:56 2010 +0000
+
+    lcov: fix bug when applying baseline files without function coverage data
+    
+    Fix the following error that occurs when genthml's --baseline-file option
+    is used on files which do not contain any function data:
+    
+    genhtml: Can't use an undefined value as a HASH reference at ./lcov/bin/genhtml line 4441.
+
+commit 96fcd676d5ac9c1eb9f83f3dc4c3089ba478baad
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 20 15:28:21 2010 +0000
+
+    lcov: resolve short-name option ambiguities
+
+commit f1d34d49b394a13c33c7a5b51f04e5dfbded5d26
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 20 14:47:50 2010 +0000
+
+    lcov: fix error messages
+
+commit 89ff61aa7cd2ca23b8cacd649288ecf7f67746de
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 20 08:35:20 2010 +0000
+
+    lcov: fix bug when using genhtml's --baseline-file option
+    
+    Fix the following error message when trying to use genhtml's
+    --baseline-file option:
+    
+    genhtml: Undefined subroutine &main::add_fnccounts called at
+    /home/oberpar/bin/genhtml line 4560.
+    
+    Reported by Brian DeGeeter <sixarm@gmail.com>
+
+commit c3df3a8504b06ca32b9863fdb2abb8cf0ce62251
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jan 18 09:17:17 2010 +0000
+
+    lcov: ensure LANG=C before calling gcov
+    
+    Fix problem calling lcov when LANG is not set to an english locale.
+    Reported by benoit_belbezet@yahoo.fr.
+
+commit d945f23345e02ca535d740782e7ae10cb3396b8c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Nov 18 09:39:21 2009 +0000
+
+    lcov: more version fixup
+
+commit 413249e6336cff432083954e6ed47236dd35f647
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Nov 18 09:38:03 2009 +0000
+
+    lcov: fix version fixup
+
+commit d0b7148e2d76164e5ea091fe56035c24f7dce22a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Nov 18 09:34:45 2009 +0000
+
+    lcov: add more CVS versioning
+
+commit 4e0219f918a15cbc9ff40d0e0e4dab91ac073f72
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Nov 18 09:14:56 2009 +0000
+
+    lcov: add CVS revision number to version output
+
+commit 34154c2d48497d9aad41ec1452ba94dd4cbce881
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 30 14:18:45 2009 +0000
+
+    lcov: further clarification in the README
+
+commit 7a4ab1340dd7f88ba0fb56a7b0eb368bf2d0112e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 30 13:58:56 2009 +0000
+
+    lcov: update README to mention required -lgcov switch during linking
+
+commit 3fa5b311b123af84debbd774baa4a1cd30e7085b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Oct 27 16:54:41 2009 +0000
+
+    lcov: remove further unneeded warning
+    
+    ... + use correct source for list of filenames
+
+commit cd4051719e72129f4abf1ad177269bf14031f83a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Oct 27 16:19:05 2009 +0000
+
+    lcov: fix problem with matching filename
+    
+    - used correct source for filenames
+    - converted match_filenames to portable version
+
+commit 0d0ff8a9945260eebed6d316aa08c0021faf3549
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Oct 27 15:29:41 2009 +0000
+
+    lcov: remove unnecessary warning
+
+commit 6c711d664c38d18f788ee8a5239586cd4a5b77d9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Oct 26 14:21:40 2009 +0000
+
+    lcov: improve derive-func-data option
+    
+    - rewrite graph file handling
+    - make derive data look at all lines belonging to a function to find
+      out whether it has been hit or not
+    - introduce --debug option to better debug problems with graph files
+
+commit 214cda20c4b591a823045f35b73f2a16221c9aa1
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Oct 1 15:26:58 2009 +0000
+
+    lcov: introduce new options --derive-func-data
+    
+    When using a gcov version that does not provide function data,
+    this option will attempt to guess the function coverage data
+    for a function by looking at the number of times that the first
+    line of that function was called.
+
+commit 9a75125895fd07a775a2a25f2cbe66b9fbf332d6
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Oct 1 11:49:53 2009 +0000
+
+    lcov: ignore incomplete function names in .bb files
+    
+    - don't abort processing when an incomplete function name is
+      encountered in a .bb file (gcc 2.95.3 adds those)
+    - fix filename prefix detection
+
+commit d5ab6076a0bfc5ad80652ba592583f7fc7946dc6
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Sep 28 12:27:09 2009 +0000
+
+    lcov: improve detection of gcov-kernel support
+
+commit 3cca782fcac9c4ea54adcebe75e1f047a8dca636
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Sep 22 13:44:04 2009 +0000
+
+    lcov: fix problem with CONFIG_MODVERSIONS
+    
+    Make geninfo work with Linux 2.6.31 and CONFIG_MODVERSIONS.
+
+commit 8af873f44c104cd214b796e13b916718fc8f6f99
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Sep 16 15:24:51 2009 +0000
+
+    lcov: remove default for gcov_dir so that auto-sensing works
+    
+    Fix problem with lcov not finding kernel coverage data at
+    /sys/kernel/debug/gcov because the default system-wide
+    lcovrc file contained a specification for the gcov directory
+    which prevented auto-detection from working.
+
+commit 50f90681af4d105a52b5b0dbf4f0bfd04369ffd2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 27 10:14:23 2009 +0000
+
+    lcov: apply excluded lines also to function coverage data
+
+commit 4aeb840d25c85a419171970e1a445aeb81079e53
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 27 09:23:13 2009 +0000
+
+    lcov: fix help text typo
+
+commit c17a783f87aa8e42949131d2fbc1c540bb3751a3
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 27 09:22:43 2009 +0000
+
+    lcov: add exclusion markers
+    
+    Users can exclude lines of code from coverage reports by adding keywords
+    to the source code.
+
+commit 445715c88337c13ce496bd05423ee5e58d84705c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 14 08:19:26 2009 +0000
+
+    lcov: ignore gcov errors for unnamed source files
+    
+    When specifying "--ignore-errors gcov", lcov/geninfo should not abort when
+    they cannot read a .gcov file. Fix this by introducing warnings in the
+    respective places.
+
+commit 0e23f03a9ce130e8ebec679fb5a9a6f854efbee5
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 6 12:34:04 2009 +0000
+
+    lcov: improvements
+    
+    - added --from-package and --to-package options
+    - improved gcov-kernel handling
+
+commit 17a05bdf646870cd61794274c7165211c93c82f9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Jul 23 12:45:15 2009 +0000
+
+    lcov: fix kernel capture for new gcov-kernel version
+    
+    - fix problems when compiling without O=
+
+commit 64e302b9134b6098852cad2e6180e0722f2dea41
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jul 21 15:42:44 2009 +0000
+
+    lcov: improve lcov -l output
+
+commit cea6941ef36d0860330b6e94f8c6096dca78ca58
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jul 21 09:10:49 2009 +0000
+
+    lcov: add support for the linux-2.6.31 upstream gcov kernel support
+
+commit 04470d2b25808f195d338112155b9f7db405d902
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Apr 22 09:13:12 2009 +0000
+
+    genhtml: fix warning about undefined value used
+    
+    nikita@zhuk.fi:
+    genhtml.patch checks that $funcdata->{$func} is defined before using
+    it - I got few "undefined value used" warnings without this check.
+
+commit a12d4f9a5d36232b928be12b7cbfaa9a00b3a923
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Apr 22 09:08:19 2009 +0000
+
+    genpng: fix runtime-warning
+    
+    - when called from within genhtml, genpng would warn about warn_handler
+      being redefined
+
+commit d0b5641c62bbdac89757b9ff185a7aa3f38fc0bb
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Mar 13 09:58:00 2009 +0000
+
+    lcov: improve function name filtering
+    
+    Only remove those characters from function names which would conflict
+    with internal delimiters.
+
+commit fbafa4a5628a639544e83f88083082c685677c36
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Feb 13 15:04:40 2009 +0000
+
+    genhtml: minor man page update
+
+commit 085a2150e38a3c1bdadb5af23c0a8a8a79dc4b0d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Feb 13 14:56:45 2009 +0000
+
+    genhtml: added --demangle-cpp option
+    
+    - used to convert C++ internal function names to human readable format
+    - based on a patch by slava.semushin@gmail.com
+
+commit 53f3ed4afb45a2a4248314b677d36377598cc73c
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Feb 13 14:07:46 2009 +0000
+
+    genhtml: update comment
+
+commit 3c2b2e8541387506fd514d183f9a4a63c07c0aa4
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Feb 12 17:01:19 2009 +0000
+
+    genhtml: fix error when combining tracefiles without function data
+    
+    - genhtml: Can't use an undefined value as a HASH reference at genhtml
+      line 1506.
+    - bug reported by richard.corden@gmail.com
+
+commit 22397370ada6893b6e9a1c3f6ad0aba7f4864f81
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Feb 11 09:31:24 2009 +0000
+
+    lcov: fix error when combining tracefiles without function data
+    
+    - lcov: Can't use an undefined value as a HASH reference at lcov line
+      1341.
+    - bug reported by richard.corden@gmail.com
+
+commit 24ec53ae83acdd35682ba757adae23750bd4c623
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Feb 9 16:15:49 2009 +0000
+
+    lcov: fix warning when $HOME is not set
+    
+    - based on patch by acalando@free.fr
+
+commit 5da3521d5a438db0a21e93b0d14ea5a3cdab14d9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Feb 9 12:41:44 2009 +0000
+
+    lcov: use install -pD -m <mode> for file installation
+
+commit bdce1bda2ac1a86aa6dfefae8e18353ba57afe4b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Feb 9 09:46:00 2009 +0000
+
+    lcov: fix double-counting of function data
+
+commit ea62c4e701abb05dd560ef22b52a4a72c17660e8
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 21 16:33:29 2009 +0000
+
+    geninfo: need to add CR removal to geninfo as well
+    
+    ... or checksumming will fail
+
+commit 70be5df7d58a393e27cee178df669c12ec670c5a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 21 16:24:01 2009 +0000
+
+    lcov: modify end-of-line CR removal
+    
+    - s///g is 10% slower than s/// - \r may be 0x10 or 0x13 (see man
+      perlport)
+
+commit d8df4b0f83ff175f1a06afb693903ee1a93ec377
+Author: Michael Knigge <michael.knigge@set-software.de>
+Date:   Tue Jan 20 11:41:39 2009 +0000
+
+    lcov: remove CRLF line breaks in source code when generating html output
+    
+    - added patch by michael.knigge@set-software.de
+
+commit 442cca7e69356e7f8ba03bd95f7813576bd197cc
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Nov 17 14:11:20 2008 +0000
+
+    lcov: updated CVS version to 1.8
+
+commit 5c5c85a1c090360facd50cb089b8af98f0b37c47
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Nov 17 13:55:52 2008 +0000
+
+    lcov: version + date updates
+
+commit 9f6a735809c23559b861e97a20af55a66b6b96bb
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Nov 17 13:49:43 2008 +0000
+
+    lcov: fix spec file bug
+
+commit 11483dc0b56d326718edcd31d06458143add858f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Nov 17 13:44:38 2008 +0000
+
+    lcov: update error and warning messages
+
+commit 4dd11b80d14e34fee2e75b3fe8c7aa163f61ad1d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Nov 17 12:48:03 2008 +0000
+
+    lcov: preparations for release 1.7
+
+commit b847ed6f3103a4c9f0a48417b9c3f160b9e00557
+Author: Jeff Connelly <jeffconnelly@users.sourceforge.net>
+Date:   Fri Oct 10 07:54:47 2008 +0000
+
+    lcov: geninfo chokes on spaces in the directory name
+    
+    In lcov 1.6, geninfo fails to find gcno/gcda files if the source directory
+    has spaces in the name, because it uses backticks to shell out to "find",
+    passing $directory on the command-line.
+    
+    Attached is a patch that double-quotes the variable, allowing geninfo to
+    operate on directories with spaces in their name. The fix isn't perfect; it
+    won't work on directories with a " character, but it works fine for my
+    purposes (I don't have any directories with quotes). A better fix would be
+    to use IPC::System::Simple's capturex from
+    http://search.cpan.org/~pjf/IPC-System-Simple-0.15/lib/IPC/System/Simple.pm
+    #runx(),_systemx()_and_capturex(). capturex() is a multiple-argument form
+    of the backticks, so it avoids any interpolation errors.
+
+commit ee3cdd554ee4e6d3ef5bdc9c5dcfee50de6375a7
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Aug 18 07:12:33 2008 +0000
+
+    lcov: change sorting order to low-to-high coverage
+
+commit fe665ca5ccf9d73d9ebdae17de88e181c1b9b0eb
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 15 08:38:21 2008 +0000
+
+    lcov: several changes
+    
+    - update download link
+    - unify webpage links
+    - provide --sort and --function-coverage switch + documentation
+
+commit 14137c5456f307982fed418e1e8fac65d7f086c3
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Aug 13 15:57:23 2008 +0000
+
+    lcov: fix function view page creation when --no-func is specified
+
+commit e59f7d15ffc7f1b3794a4212c53d0fb97ac7fb2a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Aug 13 15:35:48 2008 +0000
+
+    lcov: updated versioning mechanism
+    
+    ... + fixed some man page bugs
+
+commit e933698b31bc2fb4a750d89a5755bb8155313da2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Aug 13 14:08:23 2008 +0000
+
+    lcov: updated rpm description
+    
+    ... + summary and version strings
+
+commit 5a9660585ce39a77fa38607d0c2d2440955e7242
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Aug 13 13:53:50 2008 +0000
+
+    lcov: integrated function coverage patch
+    
+    ... by Tom Zoernen + sorting function
+
+commit d10ede8179747cfd675a3989578350c710e9bdd5
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed May 7 15:08:12 2008 +0000
+
+    lcov: --norecursion becomes --no-recursion
+    
+    + added docs
+
+commit 4096130608b9faf74c5b5feac554a10b5d9f83ce
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Feb 21 10:20:33 2008 +0000
+
+    lcov: fix error when trying to use genhtml -b
+    
+    genhtml fails when the data file contains an entry which is not
+    found in the base file.
+
+commit 9578099e13388344a6179c7cce54bfa094fd9b08
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Feb 20 17:21:51 2008 +0000
+
+    lcov: fixed problem with pre gcc-3.3 versions
+    
+    read_gcov_headers does not return valid results for pre gcc-3.3 versions.
+    Due to an unnecessary check, parsing of gcov files was aborted. Fix
+    by removing check.
+
+commit 16ec76b48fbc50c32890919e5bd0c30653719af9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Feb 5 09:18:50 2008 +0000
+
+    lcov: adding support for gzipped html
+    
+    ... based on patch by dnozay@vmware.com
+    
+    dnozay@vmware.com: genhtml is a great tool to generate html, but the more
+    files, the more space it takes (here I have over 113MB of html generated),
+    add to that I need to have different sets, and space usage increases
+    dramatically (2.7GB). we are using browsers with htmlz support, so it would
+    be nice to have support for that in genhtml, relying on 'gzip -S z' to do
+    the job.
+
+commit f2c98a8c8581180533508eb4af41720d8566049e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jan 7 16:33:57 2008 +0000
+
+    Filter non-word characters in function name
+    
+    ... as they would break our file format which uses comma and '=' as
+    field separator.
+
+commit 37725fc78fcacaf06e6240971edc3bdd7fe3d142
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Nov 1 16:29:39 2007 +0000
+
+    lcov: fix for problem resulting in lcov aborting with "ERROR: reading string"
+
+commit 48f13fcec1b521d2daba6202ccd7ec0ec8c5ece9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Oct 4 08:18:07 2007 +0000
+
+    lcov: workaround for gcc 4.1.0 .gcno file oddness
+    
+    scott.heavner@philips.com:
+    I'm trying to use lcov 1.6 with gcov/gcc 4.1.0. The geninfo parser was
+    aborting on a small number of .gcno files. I've patched my local copy so
+    that geninfo prints out the offset of the error and skips the remainder of
+    the problem file
+
+commit 1a805ea068db29b63a83c801f3bb1840fda8dd35
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 24 08:50:26 2007 +0000
+
+    lcov: add experimental option "--norecursion"
+
+commit 194de5071db1d9903d22164432448b73c1ec6cd0
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 23 11:08:39 2007 +0000
+
+    lcov: Makefile for post-release
+
+commit 0750f8a3e5235833711d616a3763c04103cf55a5
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Aug 23 11:04:30 2007 +0000
+
+    lcov: Makefile for release 1.6
+
+commit cb911f7a79593c89a730dc93fa54179fbf1df363
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Aug 20 10:29:35 2007 +0000
+
+    lcov: fixed spec file
+
+commit 62cefebdda87784140eb5f997ae4e575d2338298
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jul 6 07:38:47 2007 +0000
+
+    lcov: add new option --initial to get zero coverage data from graph files
+
+commit f0b6927f1ab1052b00081c662ced614a6e5f9ed7
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 4 14:38:59 2007 +0000
+
+    lcov: fixed bug that would not delete .gcda files when using -z
+
+commit 13941c3a159caf7dc6ba18a5b13e43c20fc18f2b
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 4 14:18:26 2007 +0000
+
+    lcov: another update in preparation for a new release
+
+commit d25e630a77ef2d0f69139058322269387866e414
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jul 4 13:13:22 2007 +0000
+
+    lcov: man page update
+
+commit 7844b915af5402441df9ab0423e4c20ef9a2632f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jul 3 16:43:05 2007 +0000
+
+    lcov: update manpage
+
+commit 5adaa72bfb32737d18c328492777c1c6116d4a9e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jul 2 15:29:02 2007 +0000
+
+    lcov: preparations for new release
+    
+    - updated CHANGES file
+    - added compat-libtool + no-compat-libtool option
+    - changed libtool default to on (due to popular request)
+    - added checksum option
+    - changed checksum default to off (to reduce cpu time + file size)
+    - added geninfo_checksum option to lcovrc, deprecated
+      geninfo_no_checksum
+    - added geninfo_compat_libtool option to lcovrc
+    - minor update of README file
+
+commit 6cbfd5022703a6198e1a1e2a2ddddcc0b90f5334
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue May 22 08:11:44 2007 +0000
+
+    lcov: minor help text update
+
+commit 2416ed02ba299c4d0bceb1e47c214b7dec066d7a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Mar 7 14:59:25 2007 +0000
+
+    lcov
+    
+    - add --ignore-errors option to lcov/geninfo
+    - add --gcov-tool option to lcov/geninfo
+    - remove s390 test case modification in geninfo
+    - restructured help text for lcov/geninfo
+
+commit a13375811717d3ada718e6f52364e4344a7e3187
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jan 8 17:07:21 2007 +0000
+
+    lcov
+    
+    - re-added libtool compatibility workaround patch by
+      thomas@apestaart.org
+    - added new lcov/geninfo-option --compat_libtool to activate libtool
+      compatibility patch
+
+commit 14871d7b097282819db60266d8b8a38506d7b14a
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Nov 14 11:45:17 2006 +0000
+
+    lcov
+    
+    Fix for problem found by Joerg Hohwieler: lcov -k doesn't work if -k is
+    specified more than once.
+
+commit 43b52b37006822c0fca12548bc72fecc957342ca
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Jun 26 15:48:52 2006 +0000
+
+    lcov: new version for prerelease rpms
+
+commit 89e9d59709c9d9d8722170c86251090adc3b96c9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jun 7 09:31:57 2006 +0000
+
+    lcov: removed autoupdate of copyright date (second thoughts)
+
+commit bb0cf1c9d0ed58b37c1551fea765fb1622bcacde
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jun 7 09:20:37 2006 +0000
+
+    lcov: minor cleanup (release preparations)
+
+commit 527693d753d11ac2b59fe26b923662c99e6e3715
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Apr 5 10:10:05 2006 +0000
+
+    lcov
+    
+    - added base-directory documentation
+    - updated CHANGES file
+
+commit 11ef9338cc4124801c8b61e3edd51a02e50b4c68
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Mar 20 17:09:50 2006 +0000
+
+    genhtml: added html-extension option
+
+commit 93d22308ffb410327248059b7dcdb592f85e249e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Mar 20 16:39:25 2006 +0000
+
+    genhtml
+    
+    - adding html-prolog and html-epilog options (based on patch by Marcus
+      Boerger)
+    - specified behavior when both --no-prefix and --prefix options where
+      provided
+    - small whitespace diff
+
+commit dcac095cdc00cc65930285bb6fc01d0f257ee4ed
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Feb 15 16:02:07 2006 +0000
+
+    lcov: added check for invalid characters in test names
+
+commit d89e561dfd9c5fde43350af1b145b1892d0710d0
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Dec 2 06:38:16 2005 +0000
+
+    lcov
+    
+    - updated Makefile so that people building RPMs from the CVS version get
+      a correct build version. Note: this needs to be adjusted after each
+      release!
+
+commit 1960123050f9098690768d10cd2490dd49b995f7
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Thu Nov 10 13:10:09 2005 +0000
+
+    lcov
+    
+    - fixed bug: .info file generation with new gcc 4.x compilers may fail
+      for programming languages that allow ':' in function names (c++,
+      objective c)
+    - removed special handling for libtool .libs files
+    - libtool should work with currently undocumented option --base-directory
+
+commit 479d446d3bf20a84c2933100ead279c79eeaf5c4
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Sep 7 16:24:39 2005 +0000
+
+    lcov
+    
+    - implementation of new option --base-directory (untested, undocumented)
+    - minor fix for link-traversal when looking for object directory
+    
+    TODO: document option (man page, online help), add to config file, check
+    whether libtool fix still works
+
+commit 770b94a3172f206de7f194c7497ebae14348b521
+Author: Robert Williamson <robbiew@users.sourceforge.net>
+Date:   Mon Jul 11 17:54:25 2005 +0000
+
+    Applied patch from Stefan Kost
+    
+    when running lcov over an uninstalled user-space apps tests, it finds
+    the .da file in the .libs directories, but does not look for the sources
+    one hierarchy up. Libtool places the object in the .libs dirs. when
+    running gcov manually one can specify -o.libs/ to produce a source.c.gov
+    file. I now have attached a patch that fixes the problem for me. please
+    do not just ignore this report. the lcov tool is so nice and it would be
+    a shame if it can not be used for normal apps.
+
+commit 79f2ff2c168150e7532046c2cdbc1e42c8b4708f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Jun 14 11:34:59 2005 +0000
+
+    lcov
+    
+    - renamed support for modified compilers (gcc 3.3 hammer patch)
+    - fixed bugs in the support for modified compilers
+
+commit fb7dab3494fdd8b093e6a84f088f6ea07fcefe6e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Mar 15 18:02:54 2005 +0000
+
+    lcov
+    
+    Emil.Jansson@oss.teleca.se:
+    lcov 1.4 does not work with the gcc version in Mandrake Linux 10.0
+    
+    >> gcc --version
+    
+    gcc (GCC) 3.3.2 (Mandrake Linux 10.0 3.3.2-6mdk)
+    
+    This patch for geninfo fixes the problem:
+
+commit ae3fe899d824e8af8a16736a0c8104c903565a56
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Mar 8 14:23:06 2005 +0000
+
+    lcov
+    
+    - added optional legend to HTML output
+    - changed background color for "good coverage" entries to green for
+      consistency reasons
+
+commit 18b73d39fd9d6bc8829395baa612a6ed98b89efe
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Mar 2 14:49:47 2005 +0000
+
+    lcov
+    
+    - fixed rpm build process to exclude unnecessary directories in RPM
+
+commit ef6ee74df5bf1d1d104322f8fff36b5c6fda34b4
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Mar 2 12:48:29 2005 +0000
+
+    lcov
+    
+    - added man page for configuration file lcovrc
+
+commit be3afe2626d6bc72256e1873d409c737ac4391c9
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Feb 28 16:31:51 2005 +0000
+
+    lcov
+    
+    - Updated CHANGES file in preparation for a new release
+
+commit dc68ce9c804ef21bc8e149d9b468e18c1619bb54
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Nov 2 15:48:45 2004 +0000
+
+    lcov
+    
+    - temporary fix for a problem which occurs when trying to parse C++
+      coverage data generated with vanilla gcc 3.3.3
+
+commit efedc5b930ab6743ea9f47ce4ea4a1a75bd739ff
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Sep 27 13:13:51 2004 +0000
+
+    lcov
+    
+    - fix for minor bug in geninfo (access to uninitialized variable)
+      related to SLES9 compatibility test and test for existing source code
+      files
+
+commit 47943eedfbec7a12c52e7a8ccbcfaf8d0706f142
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Sep 20 14:11:16 2004 +0000
+
+    lcov
+    
+    - minor fix for regular expression used to parse .gcov files - caused
+      problems when parsing branch coverage data and when using custom
+      gcov versions
+
+commit ce6335ebd92ce017b75ee3e194e9e3ca7bc7e1f3
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Sep 14 15:52:38 2004 +0000
+
+    lcov
+    
+    - fixed bug in geninfo which would not report any FN: data for data
+      generated with gcc versions 3.4.0 and above
+
+commit 58df8af3a62fa4e60569ef300e0ddd0073bf109e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Aug 31 15:57:41 2004 +0000
+
+    lcov
+    
+    - added support for modified GCC version provided by SUSE SLES9
+
+commit 69f3bc3a0c59b35eb6882205286a68b04a8a8d22
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Aug 31 15:48:32 2004 +0000
+
+    lcov
+    
+    - fixed bug in lcov RPM spec file which would not include the global
+      config file in the package list
+
+commit 5d10ca22144ad2be885405c3683b20c0976f7562
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Aug 9 14:32:23 2004 +0000
+
+    lcov
+    
+    - fixed a bug which would cause generation of incorrect line checksums
+      when source code is not available while capturing coverage data
+    - changed default directory for temporary files from . to /tmp
+
+commit 8ee3061f23f17a5074deda0777c66c3e82b5d852
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Aug 9 11:15:02 2004 +0000
+
+    lcov
+    
+    - added configuration file support
+    - fixed Makefile error for target "uninstall"
+
+commit 58af07f0b0ca1af8c9f2b90ad1683447bb560165
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Aug 6 11:36:33 2004 +0000
+
+    lcov
+    
+    - fixed bug which would cause an error when lcov was used on a source
+      directory which contained perl regular expression special characters
+    - simplified regular expression character escaping
+    - removed unnecessary function escape_shell from lcov
+
+commit 69a6918d4cd386aff2fbff093a6e0b5ddcc46602
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Mar 30 13:27:55 2004 +0000
+
+    lcov: - added --path option to fix --diff functionality
+
+commit cbc6cb11b532e525ae8b0c0742a4fd41189ca7c2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Mar 29 12:56:08 2004 +0000
+
+    lcov
+    
+    - Added compatibility for gcc-3.4
+    - Modified --diff function to better cope with ambiguous entries in
+      patch files
+    - Modified --capture option to use modprobe before insmod (needed for
+      2.6)
+
+commit 1cf9a02c3ea0e98cc1d8b626eaa0a2a1cbd96cf1
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Jan 30 09:42:13 2004 +0000
+
+    lcov
+    
+    - updated CHANGES file
+    - changed Makefile install path (/usr/local/bin -> /usr/bin)
+
+commit c60f0668059032cf4dc5f6c556fd6117925f535f
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Jan 14 10:14:10 2004 +0000
+
+    lcov-patch by Laurent Deniel
+    
+    avoids aborting the geninfo processing when an empty .bb file is
+    encountered (e.g. source code with no profiled statement)
+
+commit 7f2966f8f874a6c905b4d31e5aaf0f4654929044
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Dec 19 16:22:52 2003 +0000
+
+    lcov: updated references to lcov webpage to reflect recent site changes
+
+commit a3893f4eb2b4fadc4d7350324d74fa453a5ba0f3
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Dec 19 12:50:28 2003 +0000
+
+    Added changes by Laurent Deniel
+    
+    - a small patch to lcov 1.1 that introduces the --follow option (in
+      lcov & geninfo) to control whether or not links should be followed
+      while searching for .da files.
+    - a workaround for a gcov (3.2) bug which aborts with empty .da files
+      (gcov 3.3 is fixed but many distributions include gcc 3.2)
+
+commit d44f2f8e8672e31cc104c0598b0556a5949dc067
+Author: Paul Larson <plars@users.sourceforge.net>
+Date:   Fri Nov 21 19:34:59 2003 +0000
+
+    Fixed two buglets that caused geninfo to break with some versions of gcov.
+    
+    1. Return value for gcov --help might not be 0, expect -1 when it
+       doesn't exist
+    2. use -b instead of expanded (--branch-coverage or whatever it was)
+
+commit 5a1a33a840a665c77409f799be91cc2dce5cd3b2
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Tue Nov 18 14:06:47 2003 +0000
+
+    lcov
+    
+    - fixed function which interprets branch possibility data in geninfo
+      (branch x taken = y% would not be interpreted correctly)
+    - deactivated function which would add 'uname -a' output to testname
+      in geninfo (output in genhtml/showdetails looked unreadable, there
+      needs to be some better solution)
+
+commit e0ea03fedf43a3232c35708f882d7058998b2b3d
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Oct 10 14:18:32 2003 +0000
+
+    New function and bug fix update.
+    
+    Makefile:
+    - Added rule to build source rpm
+    
+    lcov.spec:
+    - Modified to support building source rpms
+    
+    genhtml:
+    - Fixed bug which would not correctly associate data sets with an empty
+      test name (only necessary when using --show-details in genhtml)
+    - Added checksumming mechanism: each tracefile now contains a checksum for
+      each instrumented line to detect incompatible data
+    - Implemented new command line option '--nochecksum' to suppress generation
+      of checksums
+    - Implemented new command line option '--highlight' which highlights lines of
+      code which were only covered in converted tracefiles (see '--diff' option of
+      lcov)
+    
+    geninfo:
+    - Added checksumming mechanism: each tracefile now contains a checksum for
+      each instrumented line to detect incompatible data
+    - Implemented new command line option '--nochecksum' to suppress generation
+      of checksums
+    - Added function to collect branch coverage data
+    
+    lcov:
+    - Fixed bug which would not correctly associate data sets with an empty
+      test name (only necessary when using --show-details in genhtml)
+    - Cleaned up internal command line option check
+    - Added info() output when reading tracefiles
+    - Added checksumming mechanism: each tracefile now contains a checksum for
+      each instrumented line to detect incompatible data
+    - Implemented new command line option '--nochecksum' to suppress generation
+      of checksums
+    - Implemented new command line option '--diff' which allows converting
+      coverage data from an older source code version by using a diff file
+      to map line numbers
+    
+    genpng:
+    - Added support for the highlighting option of genhtml
+    - Corrected tab to spaces conversion
+
+commit c17af02b4a856d8733a763e6c0685c31f3c7fb74
+Author: Nigel Hinds <nhinds@users.sourceforge.net>
+Date:   Fri Sep 19 21:51:06 2003 +0000
+
+    capture branch coverage data from GCOV.
+
+commit e2fc88f85254017bcf1fb04a3c935395a9b7a4a1
+Author: James M Kenefick Jr <parseexception@users.sourceforge.net>
+Date:   Thu Sep 4 16:56:10 2003 +0000
+
+    Initial checking of the galaxy map
+
+commit dfec606f3b30e1ac0f4114cfb98b29f91e9edb21
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Sat Jul 5 13:48:45 2003 +0000
+
+    LCOV: Fixed negative count handling
+    
+    - Negative counts are treated as zero
+    - Warning is issued when encountering negative counts
+
+commit a2ee105a07b19c52efe7a3e6e5b11a27b4b60ef8
+Author: Paul Larson <plars@users.sourceforge.net>
+Date:   Wed Jul 2 19:37:50 2003 +0000
+
+    Small fixes before the release
+
+commit 72860625dd904f84909253b20a7fc024b4e3377e
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon May 5 08:32:04 2003 +0000
+
+    Adjusted example program and README file
+    
+    ... to reflect renaming of lcov option '--reset' to '--zerocounters'.
+
+commit cbd9e315832960604d2949439326b30f4061e512
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Apr 30 15:47:51 2003 +0000
+
+    Renamed lcov option '--reset' to '--zerocounters'
+    
+    - Included '--remove' in help text of lcov
+    - Adjusted man pages to include option changes
+    - Extended info() change to geninfo and genhtml (infos are now printed
+      to STDERR)
+
+commit 8155960cb5db0359470d2a5f652bdc744e9ecfcd
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Wed Apr 16 15:43:31 2003 +0000
+
+    Modified read_gcov so that it can also parse the new gcov format which is to be introduced in gcc 3.3.
+
+commit 382440f781b12ade8f1f7962a0eb1cfc0525f2a5
+Author: Paul Larson <plars@users.sourceforge.net>
+Date:   Tue Apr 15 16:06:59 2003 +0000
+
+    Added --remove option info() now prints to stderr
+
+commit 62760fa1840326e849c7e58892ce671f510bb0af
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Mon Apr 14 09:31:51 2003 +0000
+
+    Check-in of updated LCOV version (to be released as 1.1).
+    
+    Includes fixes and modifications by Mike Kobler, Paul Larson and
+    myself.
+    
+    A quote from the CHANGS file:
+    - Added CHANGES file
+    - Added Makefile implementing the following targets:
+      * install    : install LCOV scripts and man pages
+      * uninstall  : revert previous installation
+      * dist       : create lcov.tar.gz file and lcov.rpm file
+      * clean      : clean up example directory, remove .tar and .rpm files
+    - Added man pages for all scripts
+    - Added example program to demonstrate the use of LCOV with a userspace
+      application
+    - Implemented RPM build process
+    - New directory structure:
+      * bin        : contains all executables
+      * example    : contains a userspace example for LCOV
+      * man        : contains man pages
+      * rpm        : contains files required for the RPM build process
+    - LCOV-scripts are now in bin/
+    - Removed .pl-extension from LCOV-script files
+    - Renamed readme.txt to README
+    
+    README:
+    - Adjusted mailing list address to ltp-coverage@lists.sourceforge.net
+    - Fixed incorrect parameter '--output-filename' in example LCOV call
+    - Removed tool descriptions and turned them into man pages
+    - Installation instructions now refer to RPM and tarball
+    
+    descriptions.tests:
+    - Fixed some spelling errors
+    
+    genhtml:
+    - Fixed bug which resulted in an error when trying to combine .info files
+      containing data without a test name
+    - Fixed bug which would not correctly handle data files in directories
+      with names containing some special characters ('+', etc.)
+    - Added check for empty tracefiles to prevent division-by-zeros
+    - Implemented new command line option --num-spaces / the number of spaces
+      which replace a tab in source code view is now user defined
+    - Fixed tab expansion so that in source code view, a tab doesn't produce a
+      fixed number of spaces, but as many spaces as are needed to advance to the
+      next tab position
+    - Output directory is now created if it doesn't exist
+    - Renamed "overview page" to "directory view page"
+    - HTML output pages are now titled "LCOV" instead of "GCOV"
+    
+    geninfo:
+    - Fixed bug which would not allow .info files to be generated in directories
+      with names containing some special characters
+    
+    lcov:
+    - Fixed bug which would cause lcov to fail when the tool is installed in
+      a path with a name containing some special characters
+    - Implemented new command line option '--add-tracefile' which allows the
+      combination of data from several tracefiles
+    - Implemented new command line option '--list' which lists the contents
+      of a tracefile
+    - Implemented new command line option '--extract' which allows extracting
+      data for a particular set of files from a tracefile
+    - Fixed name of gcov kernel module (new package contains gcov-prof.c)
+    - Changed name of gcov kernel directory from /proc/gcov to a global constant
+      so that it may be changed easily when required in future versions
+
+commit ec94ed71838a9780e82ea8bd67742bde2f4eeb47
+Author: Paul Larson <plars@users.sourceforge.net>
+Date:   Fri Mar 7 20:28:15 2003 +0000
+
+    Fix lcov.pl to work with the new gcov-kernel module
+    
+    ... ,documentation fixes in readme.txt
+
+commit e70d9abdb60b83de7174815371259c63fa75bf76
+Author: Robert Williamson <robbiew@users.sourceforge.net>
+Date:   Tue Feb 18 20:05:09 2003 +0000
+
+    Applied patch from Mike Kobler:
+    
+    One of my source file paths includes a "+" in the directory name.  I found
+    that genhtml.pl died when it encountered it. I was able to fix the problem
+    by modifying the string with the escape character before parsing it.
+
+commit 69ef6f1b607670589aae1ae1e6c78ef1b5d204e3
+Author: Peter Oberparleiter <oberpapr@users.sourceforge.net>
+Date:   Fri Sep 6 09:04:34 2002 +0000
+
+    Replaced reference to "cat" cvs directory
+    
+    ... and to .zip package.
+
+commit c641f6e694e2bebf9ef0a507091460026463d169
+Author: Manoj Iyer <iyermanoj@users.sourceforge.net>
+Date:   Thu Sep 5 19:14:51 2002 +0000
+
+    Coverage analysis files.
+    
+    Peter worked on this version.
diff --git a/ThirdParty/lcov/CONTRIBUTING b/ThirdParty/lcov/CONTRIBUTING
new file mode 100644
index 0000000000000000000000000000000000000000..6890789bd140c4443e89b6d3412a362860ebffda
--- /dev/null
+++ b/ThirdParty/lcov/CONTRIBUTING
@@ -0,0 +1,93 @@
+Contributing to LCOV
+====================
+
+Please read this document if you would like to help improving the LTP GCOV
+extension (LCOV). In general, all types of contributions are welcome, for
+example:
+
+ * Fixes for code or documentation
+ * Performance and compatibility improvements
+ * Functional enhancements
+
+There are some rules that these contributions must follow to be acceptable for
+inclusion:
+
+ 1. The contribution must align with the project goals of LCOV.
+ 2. The contribution must follow a particular format.
+ 3. The contribution must be signed.
+
+Once you have made sure that your contribution follows these rules, send it via
+e-mail to the LTP coverage mailing list [1].
+
+
+Signing your work
+=================
+
+All contributions to LCOV must be signed by putting the following line at the
+end of the explanation of a patch:
+
+  Signed-off-by: Your Name <your.email@example.org>
+
+By signing a patch, you certify the following:
+
+  By making a contribution to the LTP GCOV extension (LCOV) on
+  http://ltp.sourceforge.net, I certify that:
+
+  a) The contribution was created by me and I have the right to submit it
+     under the terms and conditions of the open source license
+     "GNU General Public License, version 2 or later".
+     (http://www.gnu.org/licenses/old-licenses/gpl-2.0.html).
+
+  b) The contribution is made free of any other party's intellectual property
+     claims or rights.
+
+  c) I understand and agree that this project and the contribution are public
+     and that a record of the contribution (including all personal information
+     I submit with it, including my sign-off) is maintained indefinitely and
+     may be redistributed consistent with this project or the open source
+     license(s) involved.
+
+
+Project goals
+=============
+
+The goal of LCOV is to provide a set of command line tools that can be used to
+collect, process and visualize code coverage data as produced by the gcov tool
+that is part of the GNU Compiler Collection (GCC) [2].
+
+If you have an idea for a contribution but are unsure if it aligns with the
+project goals, feel free to discuss the idea on the LTP coverage mailing
+list [1].
+
+
+Contribution format
+===================
+
+To contribute a change, please create a patch using 'git format-patch'.
+Alternatively you can use the diff utility with the following command line
+options:
+
+  diff -Naurp
+
+Please base your changes on the most current version of LCOV. You can use the
+following command line to obtain this version from the lcov Git repository:
+
+ git clone https://github.com/linux-test-project/lcov.git
+
+Add a meaningful description of the contribution to the top of the patch. The
+description should follow this format:
+
+  component: short description
+
+  detailed description
+
+  Signed-off-by: Your Name <your.email@example.org>
+
+With your Signed-off-by, you certify the rules stated in section
+"Signing your work".
+
+
+-- 
+
+[1] ltp-coverage@lists.sourceforge.net
+[2] http://gcc.gnu.org
diff --git a/ThirdParty/lcov/COPYING b/ThirdParty/lcov/COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..d511905c1647a1e311e8b20d5930a37a9c2531cd
--- /dev/null
+++ b/ThirdParty/lcov/COPYING
@@ -0,0 +1,339 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/ThirdParty/lcov/Makefile b/ThirdParty/lcov/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..1207cb19add633f0b5f99f2e7c0c4ce1b95bed95
--- /dev/null
+++ b/ThirdParty/lcov/Makefile
@@ -0,0 +1,122 @@
+#
+# Makefile for LCOV
+#
+# Make targets:
+#   - install:   install LCOV tools and man pages on the system
+#   - uninstall: remove tools and man pages from the system
+#   - dist:      create files required for distribution, i.e. the lcov.tar.gz
+#                and the lcov.rpm file. Just make sure to adjust the VERSION
+#                and RELEASE variables below - both version and date strings
+#                will be updated in all necessary files.
+#   - clean:     remove all generated files
+#
+
+VERSION := $(shell bin/get_version.sh --version)
+RELEASE := $(shell bin/get_version.sh --release)
+FULL    := $(shell bin/get_version.sh --full)
+
+# Set this variable during 'make install' to specify the Perl interpreter used in
+# installed scripts, or leave empty to keep the current interpreter.
+export LCOV_PERL_PATH := /usr/bin/perl
+
+PREFIX  := /usr/local
+
+CFG_DIR := $(PREFIX)/etc
+BIN_DIR := $(PREFIX)/bin
+MAN_DIR := $(PREFIX)/share/man
+TMP_DIR := $(shell mktemp -d)
+FILES   := $(wildcard bin/*) $(wildcard man/*) README Makefile \
+	   $(wildcard rpm/*) lcovrc
+
+.PHONY: all info clean install uninstall rpms test
+
+all: info
+
+info:
+	@echo "Available make targets:"
+	@echo "  install   : install binaries and man pages in DESTDIR (default /)"
+	@echo "  uninstall : delete binaries and man pages from DESTDIR (default /)"
+	@echo "  dist      : create packages (RPM, tarball) ready for distribution"
+	@echo "  test      : perform self-tests"
+
+clean:
+	rm -f lcov-*.tar.gz
+	rm -f lcov-*.rpm
+	make -C example clean
+	make -C test -s clean
+
+install:
+	bin/install.sh bin/lcov $(DESTDIR)$(BIN_DIR)/lcov -m 755
+	bin/install.sh bin/genhtml $(DESTDIR)$(BIN_DIR)/genhtml -m 755
+	bin/install.sh bin/geninfo $(DESTDIR)$(BIN_DIR)/geninfo -m 755
+	bin/install.sh bin/genpng $(DESTDIR)$(BIN_DIR)/genpng -m 755
+	bin/install.sh bin/gendesc $(DESTDIR)$(BIN_DIR)/gendesc -m 755
+	bin/install.sh man/lcov.1 $(DESTDIR)$(MAN_DIR)/man1/lcov.1 -m 644
+	bin/install.sh man/genhtml.1 $(DESTDIR)$(MAN_DIR)/man1/genhtml.1 -m 644
+	bin/install.sh man/geninfo.1 $(DESTDIR)$(MAN_DIR)/man1/geninfo.1 -m 644
+	bin/install.sh man/genpng.1 $(DESTDIR)$(MAN_DIR)/man1/genpng.1 -m 644
+	bin/install.sh man/gendesc.1 $(DESTDIR)$(MAN_DIR)/man1/gendesc.1 -m 644
+	bin/install.sh man/lcovrc.5 $(DESTDIR)$(MAN_DIR)/man5/lcovrc.5 -m 644
+	bin/install.sh lcovrc $(DESTDIR)$(CFG_DIR)/lcovrc -m 644
+	bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/lcov $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/genhtml $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/geninfo $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/genpng $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(BIN_DIR)/gendesc $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/lcov.1 $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/genhtml.1 $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/geninfo.1 $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/genpng.1 $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man1/gendesc.1 $(VERSION) $(RELEASE) $(FULL)
+	bin/updateversion.pl $(DESTDIR)$(MAN_DIR)/man5/lcovrc.5 $(VERSION) $(RELEASE) $(FULL)
+
+uninstall:
+	bin/install.sh --uninstall bin/lcov $(DESTDIR)$(BIN_DIR)/lcov
+	bin/install.sh --uninstall bin/genhtml $(DESTDIR)$(BIN_DIR)/genhtml
+	bin/install.sh --uninstall bin/geninfo $(DESTDIR)$(BIN_DIR)/geninfo
+	bin/install.sh --uninstall bin/genpng $(DESTDIR)$(BIN_DIR)/genpng
+	bin/install.sh --uninstall bin/gendesc $(DESTDIR)$(BIN_DIR)/gendesc
+	bin/install.sh --uninstall man/lcov.1 $(DESTDIR)$(MAN_DIR)/man1/lcov.1
+	bin/install.sh --uninstall man/genhtml.1 $(DESTDIR)$(MAN_DIR)/man1/genhtml.1
+	bin/install.sh --uninstall man/geninfo.1 $(DESTDIR)$(MAN_DIR)/man1/geninfo.1
+	bin/install.sh --uninstall man/genpng.1 $(DESTDIR)$(MAN_DIR)/man1/genpng.1
+	bin/install.sh --uninstall man/gendesc.1 $(DESTDIR)$(MAN_DIR)/man1/gendesc.1
+	bin/install.sh --uninstall man/lcovrc.5 $(DESTDIR)$(MAN_DIR)/man5/lcovrc.5
+	bin/install.sh --uninstall lcovrc $(DESTDIR)$(CFG_DIR)/lcovrc
+
+dist: lcov-$(VERSION).tar.gz lcov-$(VERSION)-$(RELEASE).noarch.rpm \
+      lcov-$(VERSION)-$(RELEASE).src.rpm
+
+lcov-$(VERSION).tar.gz: $(FILES)
+	mkdir $(TMP_DIR)/lcov-$(VERSION)
+	cp -r * $(TMP_DIR)/lcov-$(VERSION)
+	bin/copy_dates.sh . $(TMP_DIR)/lcov-$(VERSION)
+	make -C $(TMP_DIR)/lcov-$(VERSION) clean
+	bin/updateversion.pl $(TMP_DIR)/lcov-$(VERSION) $(VERSION) $(RELEASE) $(FULL)
+	bin/get_changes.sh > $(TMP_DIR)/lcov-$(VERSION)/CHANGES
+	cd $(TMP_DIR) ; \
+	tar cfz $(TMP_DIR)/lcov-$(VERSION).tar.gz lcov-$(VERSION)
+	mv $(TMP_DIR)/lcov-$(VERSION).tar.gz .
+	rm -rf $(TMP_DIR)
+
+lcov-$(VERSION)-$(RELEASE).noarch.rpm: rpms
+lcov-$(VERSION)-$(RELEASE).src.rpm: rpms
+
+rpms: lcov-$(VERSION).tar.gz
+	mkdir $(TMP_DIR)
+	mkdir $(TMP_DIR)/BUILD
+	mkdir $(TMP_DIR)/RPMS
+	mkdir $(TMP_DIR)/SOURCES
+	mkdir $(TMP_DIR)/SRPMS
+	cp lcov-$(VERSION).tar.gz $(TMP_DIR)/SOURCES
+	cd $(TMP_DIR)/BUILD ; \
+	tar xfz $(TMP_DIR)/SOURCES/lcov-$(VERSION).tar.gz \
+		lcov-$(VERSION)/rpm/lcov.spec
+	rpmbuild --define '_topdir $(TMP_DIR)' \
+		 -ba $(TMP_DIR)/BUILD/lcov-$(VERSION)/rpm/lcov.spec
+	mv $(TMP_DIR)/RPMS/noarch/lcov-$(VERSION)-$(RELEASE).noarch.rpm .
+	mv $(TMP_DIR)/SRPMS/lcov-$(VERSION)-$(RELEASE).src.rpm .
+	rm -rf $(TMP_DIR)
+
+test:
+	@make -C test -s all
diff --git a/ThirdParty/lcov/README b/ThirdParty/lcov/README
new file mode 100644
index 0000000000000000000000000000000000000000..ad53c3cbcbf06c6cd326b2bac49cd973ebf6bb8b
--- /dev/null
+++ b/ThirdParty/lcov/README
@@ -0,0 +1,135 @@
+-------------------------------------------------
+- README file for the LTP GCOV extension (LCOV) -
+- Last changes: 2019-02-28                      -
+-------------------------------------------------
+
+Description
+-----------
+  LCOV is an extension of GCOV, a GNU tool which provides information about
+  what parts of a program are actually executed (i.e. "covered") while running
+  a particular test case. The extension consists of a set of Perl scripts
+  which build on the textual GCOV output to implement the following enhanced
+  functionality:
+
+    * HTML based output: coverage rates are additionally indicated using bar
+      graphs and specific colors.
+
+    * Support for large projects: overview pages allow quick browsing of
+      coverage data by providing three levels of detail: directory view,
+      file view and source code view.
+
+  LCOV was initially designed to support Linux kernel coverage measurements,
+  but works as well for coverage measurements on standard user space
+  applications.
+
+
+Further README contents
+-----------------------
+  1. Included files
+  2. Installing LCOV
+  3. An example of how to access kernel coverage data
+  4. An example of how to access coverage data for a user space program
+  5. Questions and Comments
+
+
+
+1. Important files
+------------------
+  README             - This README file
+  CHANGES            - List of changes between releases
+  bin/lcov           - Tool for capturing LCOV coverage data
+  bin/genhtml        - Tool for creating HTML output from LCOV data
+  bin/gendesc        - Tool for creating description files as used by genhtml
+  bin/geninfo        - Internal tool (creates LCOV data files)
+  bin/genpng         - Internal tool (creates png overviews of source files)
+  bin/install.sh     - Internal tool (takes care of un-/installing)
+  man                - Directory containing man pages for included tools
+  example            - Directory containing an example to demonstrate LCOV
+  lcovrc             - LCOV configuration file
+  Makefile           - Makefile providing 'install' and 'uninstall' targets
+
+
+2. Installing LCOV
+------------------
+The LCOV package is available as either RPM or tarball from:
+     
+  http://ltp.sourceforge.net/coverage/lcov.php
+
+To install the tarball, unpack it to a directory and run:
+
+  make install
+
+Use Git for the most recent (but possibly unstable) version:
+
+  git clone https://github.com/linux-test-project/lcov.git
+
+Change to the resulting lcov directory and type:
+
+  make install
+
+
+3. An example of how to access kernel coverage data
+---------------------------------------------------
+Requirements: get and install the gcov-kernel package from
+
+  http://sourceforge.net/projects/ltp
+
+Copy the resulting gcov kernel module file to either the system wide modules
+directory or the same directory as the Perl scripts. As root, do the following:
+
+  a) Resetting counters
+
+     lcov --zerocounters
+
+  b) Capturing the current coverage state to a file
+
+     lcov --capture --output-file kernel.info
+
+  c) Getting HTML output
+
+     genhtml kernel.info
+
+Point the web browser of your choice to the resulting index.html file.
+
+
+4. An example of how to access coverage data for a user space program
+---------------------------------------------------------------------
+Requirements: compile the program in question using GCC with the options
+-fprofile-arcs and -ftest-coverage. During linking, make sure to specify
+-lgcov or -coverage.
+
+Assuming the compile directory is called "appdir", do the following:
+
+  a) Resetting counters
+
+     lcov --directory appdir --zerocounters
+
+  b) Capturing the current coverage state to a file
+
+     lcov --directory appdir --capture --output-file app.info
+
+     Note that this step only works after the application has
+     been started and stopped at least once. Otherwise lcov will
+     abort with an error mentioning that there are no data/.gcda files.
+
+  c) Getting HTML output
+
+     genhtml app.info
+
+Point the web browser of your choice to the resulting index.html file.
+
+Please note that independently of where the application is installed or
+from which directory it is run, the --directory statement needs to
+point to the directory in which the application was compiled.
+
+For further information on the gcc profiling mechanism, please also
+consult the gcov man page.
+
+
+5. Questions and comments
+-------------------------
+See the included man pages for more information on how to use the LCOV tools.
+
+Please email further questions or comments regarding this tool to the
+LTP Mailing list at ltp-coverage@lists.sourceforge.net  
+
diff --git a/ThirdParty/lcov/bin/copy_dates.sh b/ThirdParty/lcov/bin/copy_dates.sh
new file mode 100755
index 0000000000000000000000000000000000000000..aef5f5ed36cfce1d36186425b2260c5fee94a51a
--- /dev/null
+++ b/ThirdParty/lcov/bin/copy_dates.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+# Usage: copy_dates.sh SOURCE TARGET
+#
+# For each file found in SOURCE, set the modification time of the copy of that
+# file in TARGET to either the time of the latest Git commit (if SOURCE contains
+# a Git repository and the file was not modified after the last commit), or the
+# modification time of the original file.
+
+SOURCE="$1"
+TARGET="$2"
+
+if [ -z "$SOURCE" -o -z "$TARGET" ] ; then
+	echo "Usage: $0 SOURCE TARGET" >&2
+	exit 1
+fi
+
+[ -d "$SOURCE/.git" ] ; NOGIT=$?
+
+echo "Copying modification/commit times from $SOURCE to $TARGET"
+
+cd "$SOURCE" || exit 1
+find * -type f | while read FILENAME ; do
+	[ ! -e "$TARGET/$FILENAME" ] && continue
+
+	# Copy modification time
+	touch -m "$TARGET/$FILENAME" -r "$FILENAME"
+
+	[ $NOGIT -eq 1 ] && continue				# No Git
+	git diff --quiet -- "$FILENAME" || continue		# Modified
+	git diff --quiet --cached -- "$FILENAME" || continue	# Modified
+
+	# Apply modification time from Git commit time
+	TIME=$(git log --pretty=format:%cd -n 1 --date=iso -- "$FILENAME")
+	[ -n "$TIME" ] && touch -m "$TARGET/$FILENAME" --date "$TIME"
+done
diff --git a/ThirdParty/lcov/bin/gendesc b/ThirdParty/lcov/bin/gendesc
new file mode 100755
index 0000000000000000000000000000000000000000..334ee7892372935d48dc349ce2fb3cc373af3080
--- /dev/null
+++ b/ThirdParty/lcov/bin/gendesc
@@ -0,0 +1,226 @@
+#!/usr/bin/env perl
+#
+#   Copyright (c) International Business Machines  Corp., 2002
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.                 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# gendesc
+#
+#   This script creates a description file as understood by genhtml.
+#   Input file format:
+#
+#   For each test case:
+#     <test name><optional whitespace>
+#     <at least one whitespace character (blank/tab)><test description>
+#   
+#   Actual description may consist of several lines. By default, output is
+#   written to stdout. Test names consist of alphanumeric characters
+#   including _ and -.
+#
+#
+# History:
+#   2002-09-02: created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#
+
+use strict;
+use warnings;
+use File::Basename; 
+use Getopt::Long;
+use Cwd qw/abs_path/;
+
+
+# Constants
+our $tool_dir		= abs_path(dirname($0));
+our $lcov_version	= "LCOV version 1.14";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name		= basename($0);
+
+
+# Prototypes
+sub print_usage(*);
+sub gen_desc();
+sub warn_handler($);
+sub die_handler($);
+
+
+# Global variables
+our $help;
+our $version;
+our $output_filename;
+our $input_filename;
+
+
+#
+# Code entry point
+#
+
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Parse command line options
+if (!GetOptions("output-filename=s" => \$output_filename,
+		"version" =>\$version,
+		"help|?" => \$help
+		))
+{
+	print(STDERR "Use $tool_name --help to get usage information\n");
+	exit(1);
+}
+
+$input_filename = $ARGV[0];
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print("$tool_name: $lcov_version\n");
+	exit(0);
+}
+
+
+# Check for input filename
+if (!$input_filename)
+{
+	die("No input filename specified\n".
+	    "Use $tool_name --help to get usage information\n");
+}
+
+# Do something
+gen_desc();
+
+
+#
+# print_usage(handle)
+#
+# Write out command line usage information to given filehandle.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+
+	print(HANDLE <<END_OF_USAGE)
+Usage: $tool_name [OPTIONS] INPUTFILE
+
+Convert a test case description file into a format as understood by genhtml.
+
+  -h, --help                        Print this help, then exit
+  -v, --version                     Print version number, then exit
+  -o, --output-filename FILENAME    Write description to FILENAME
+
+For more information see: $lcov_url
+END_OF_USAGE
+	;
+}
+
+
+#
+# gen_desc()
+#
+# Read text file INPUT_FILENAME and convert the contained description to a
+# format as understood by genhtml, i.e.
+#
+#    TN:<test name>
+#    TD:<test description>
+#
+# If defined, write output to OUTPUT_FILENAME, otherwise to stdout.
+#
+# Die on error.
+#
+
+sub gen_desc()
+{
+	local *INPUT_HANDLE;
+	local *OUTPUT_HANDLE;
+	my $empty_line = "ignore";
+
+	open(INPUT_HANDLE, "<", $input_filename)
+		or die("ERROR: cannot open $input_filename!\n");
+
+	# Open output file for writing
+	if ($output_filename)
+	{
+		open(OUTPUT_HANDLE, ">", $output_filename)
+			or die("ERROR: cannot create $output_filename!\n");
+	}
+	else
+	{
+		*OUTPUT_HANDLE = *STDOUT;
+	}
+
+	# Process all lines in input file
+	while (<INPUT_HANDLE>)
+	{
+		chomp($_);
+
+		if (/^(\w[\w-]*)(\s*)$/)
+		{
+			# Matched test name
+			# Name starts with alphanum or _, continues with
+			# alphanum, _ or -
+			print(OUTPUT_HANDLE "TN: $1\n");
+			$empty_line = "ignore";
+		}
+		elsif (/^(\s+)(\S.*?)\s*$/)
+		{
+			# Matched test description
+			if ($empty_line eq "insert")
+			{
+				# Write preserved empty line
+				print(OUTPUT_HANDLE "TD: \n");
+			}
+			print(OUTPUT_HANDLE "TD: $2\n");
+			$empty_line = "observe";
+		}
+		elsif (/^\s*$/)
+		{
+			# Matched empty line to preserve paragraph separation
+			# inside description text
+			if ($empty_line eq "observe")
+			{
+				$empty_line = "insert";
+			}
+		}
+	}
+
+	# Close output file if defined
+	if ($output_filename)
+	{
+		close(OUTPUT_HANDLE);
+	}
+
+	close(INPUT_HANDLE);
+}
+
+sub warn_handler($)
+{
+	my ($msg) = @_;
+
+	warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+	my ($msg) = @_;
+
+	die("$tool_name: $msg");
+}
diff --git a/ThirdParty/lcov/bin/genhtml b/ThirdParty/lcov/bin/genhtml
new file mode 100755
index 0000000000000000000000000000000000000000..2352300c11403acdc39f4b27582d68f6387ac685
--- /dev/null
+++ b/ThirdParty/lcov/bin/genhtml
@@ -0,0 +1,5974 @@
+#!/usr/bin/env perl
+#
+#   Copyright (c) International Business Machines  Corp., 2002,2012
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details. 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# genhtml
+#
+#   This script generates HTML output from .info files as created by the
+#   geninfo script. Call it with --help and refer to the genhtml man page
+#   to get information on usage and available options.
+#
+#
+# History:
+#   2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#                         IBM Lab Boeblingen
+#        based on code by Manoj Iyer <manjo@mail.utexas.edu> and
+#                         Megan Bock <mbock@us.ibm.com>
+#                         IBM Austin
+#   2002-08-27 / Peter Oberparleiter: implemented frame view
+#   2002-08-29 / Peter Oberparleiter: implemented test description filtering
+#                so that by default only descriptions for test cases which
+#                actually hit some source lines are kept
+#   2002-09-05 / Peter Oberparleiter: implemented --no-sourceview
+#   2002-09-05 / Mike Kobler: One of my source file paths includes a "+" in
+#                the directory name.  I found that genhtml.pl died when it
+#                encountered it. I was able to fix the problem by modifying
+#                the string with the escape character before parsing it.
+#   2002-10-26 / Peter Oberparleiter: implemented --num-spaces
+#   2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
+#                when trying to combine .info files containing data without
+#                a test name
+#   2003-04-10 / Peter Oberparleiter: extended fix by Mike to also cover
+#                other special characters
+#   2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
+#   2003-07-10 / Peter Oberparleiter: added line checksum support
+#   2004-08-09 / Peter Oberparleiter: added configuration file support
+#   2005-03-04 / Cal Pierog: added legend to HTML output, fixed coloring of
+#                "good coverage" background
+#   2006-03-18 / Marcus Boerger: added --custom-intro, --custom-outro and
+#                overwrite --no-prefix if --prefix is present
+#   2006-03-20 / Peter Oberparleiter: changes to custom_* function (rename
+#                to html_prolog/_epilog, minor modifications to implementation),
+#                changed prefix/noprefix handling to be consistent with current
+#                logic
+#   2006-03-20 / Peter Oberparleiter: added --html-extension option
+#   2008-07-14 / Tom Zoerner: added --function-coverage command line option;
+#                added function table to source file page
+#   2008-08-13 / Peter Oberparleiter: modified function coverage
+#                implementation (now enabled per default),
+#                introduced sorting option (enabled per default)
+#
+
+use strict;
+use warnings;
+use File::Basename;
+use File::Temp qw(tempfile);
+use Getopt::Long;
+use Digest::MD5 qw(md5_base64);
+use Cwd qw/abs_path cwd/;
+
+
+# Global constants
+our $title		= "LCOV - code coverage report";
+our $tool_dir		= abs_path(dirname($0));
+our $lcov_version	= "LCOV version 1.14";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name		= basename($0);
+
+# Specify coverage rate default precision
+our $default_precision = 1;
+
+# Specify coverage rate limits (in %) for classifying file entries
+# HI:   $hi_limit <= rate <= 100          graph color: green
+# MED: $med_limit <= rate <  $hi_limit    graph color: orange
+# LO:          0  <= rate <  $med_limit   graph color: red
+
+# For line coverage/all coverage types if not specified
+our $hi_limit = 90;
+our $med_limit = 75;
+
+# For function coverage
+our $fn_hi_limit;
+our $fn_med_limit;
+
+# For branch coverage
+our $br_hi_limit;
+our $br_med_limit;
+
+# Width of overview image
+our $overview_width = 80;
+
+# Resolution of overview navigation: this number specifies the maximum
+# difference in lines between the position a user selected from the overview
+# and the position the source code window is scrolled to.
+our $nav_resolution = 4;
+
+# Clicking a line in the overview image should show the source code view at
+# a position a bit further up so that the requested line is not the first
+# line in the window. This number specifies that offset in lines.
+our $nav_offset = 10;
+
+# Clicking on a function name should show the source code at a position a
+# few lines before the first line of code of that function. This number
+# specifies that offset in lines.
+our $func_offset = 2;
+
+our $overview_title = "top level";
+
+# Width for line coverage information in the source code view
+our $line_field_width = 12;
+
+# Width for branch coverage information in the source code view
+our $br_field_width = 16;
+
+# Internal Constants
+
+# Header types
+our $HDR_DIR		= 0;
+our $HDR_FILE		= 1;
+our $HDR_SOURCE		= 2;
+our $HDR_TESTDESC	= 3;
+our $HDR_FUNC		= 4;
+
+# Sort types
+our $SORT_FILE		= 0;
+our $SORT_LINE		= 1;
+our $SORT_FUNC		= 2;
+our $SORT_BRANCH	= 3;
+
+# Fileview heading types
+our $HEAD_NO_DETAIL	= 1;
+our $HEAD_DETAIL_HIDDEN	= 2;
+our $HEAD_DETAIL_SHOWN	= 3;
+
+# Additional offsets used when converting branch coverage data to HTML
+our $BR_LEN	= 3;
+our $BR_OPEN	= 4;
+our $BR_CLOSE	= 5;
+
+# Branch data combination types
+our $BR_SUB = 0;
+our $BR_ADD = 1;
+
+# Error classes which users may specify to ignore during processing
+our $ERROR_SOURCE	= 0;
+our %ERROR_ID = (
+	"source" => $ERROR_SOURCE,
+);
+
+# Data related prototypes
+sub print_usage(*);
+sub gen_html();
+sub html_create($$);
+sub process_dir($);
+sub process_file($$$);
+sub info(@);
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$$$$$$;$$$$$$);
+sub get_prefix($@);
+sub shorten_prefix($);
+sub get_dir_list(@);
+sub get_relative_base_path($);
+sub read_testfile($);
+sub get_date_string();
+sub create_sub_dir($);
+sub subtract_counts($$);
+sub add_counts($$);
+sub apply_baseline($$);
+sub remove_unused_descriptions();
+sub get_found_and_hit($);
+sub get_affecting_tests($$$);
+sub combine_info_files($$);
+sub merge_checksums($$$);
+sub combine_info_entries($$$);
+sub apply_prefix($@);
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+sub get_html_prolog($);
+sub get_html_epilog($);
+sub write_dir_page($$$$$$$$$$$$$$$$$);
+sub classify_rate($$$$);
+sub combine_brcount($$$;$);
+sub get_br_found_and_hit($);
+sub warn_handler($);
+sub die_handler($);
+sub parse_ignore_errors(@);
+sub parse_dir_prefix(@);
+sub rate($$;$$$);
+
+
+# HTML related prototypes
+sub escape_html($);
+sub get_bar_graph_code($$$);
+
+sub write_png_files();
+sub write_htaccess_file();
+sub write_css_file();
+sub write_description_file($$$$$$$);
+sub write_function_table(*$$$$$$$$$$);
+
+sub write_html(*$);
+sub write_html_prolog(*$$);
+sub write_html_epilog(*$;$);
+
+sub write_header(*$$$$$$$$$$);
+sub write_header_prolog(*$);
+sub write_header_line(*@);
+sub write_header_epilog(*$);
+
+sub write_file_table(*$$$$$$$);
+sub write_file_table_prolog(*$@);
+sub write_file_table_entry(*$$$@);
+sub write_file_table_detail_entry(*$@);
+sub write_file_table_epilog(*);
+
+sub write_test_table_prolog(*$);
+sub write_test_table_entry(*$$);
+sub write_test_table_epilog(*);
+
+sub write_source($$$$$$$);
+sub write_source_prolog(*);
+sub write_source_line(*$$$$$);
+sub write_source_epilog(*);
+
+sub write_frameset(*$$$);
+sub write_overview_line(*$$$);
+sub write_overview(*$$$$);
+
+# External prototype (defined in genpng)
+sub gen_png($$$@);
+
+
+# Global variables & initialization
+our %info_data;		# Hash containing all data from .info file
+our @opt_dir_prefix;	# Array of prefixes to remove from all sub directories
+our @dir_prefix;
+our %test_description;	# Hash containing test descriptions if available
+our $date = get_date_string();
+
+our @info_filenames;	# List of .info files to use as data source
+our $test_title;	# Title for output as written to each page header
+our $output_directory;	# Name of directory in which to store output
+our $base_filename;	# Optional name of file containing baseline data
+our $desc_filename;	# Name of file containing test descriptions
+our $css_filename;	# Optional name of external stylesheet file to use
+our $quiet;		# If set, suppress information messages
+our $help;		# Help option flag
+our $version;		# Version option flag
+our $show_details;	# If set, generate detailed directory view
+our $no_prefix;		# If set, do not remove filename prefix
+our $func_coverage;	# If set, generate function coverage statistics
+our $no_func_coverage;	# Disable func_coverage
+our $br_coverage;	# If set, generate branch coverage statistics
+our $no_br_coverage;	# Disable br_coverage
+our $sort = 1;		# If set, provide directory listings with sorted entries
+our $no_sort;		# Disable sort
+our $frames;		# If set, use frames for source code view
+our $keep_descriptions;	# If set, do not remove unused test case descriptions
+our $no_sourceview;	# If set, do not create a source code view for each file
+our $highlight;		# If set, highlight lines covered by converted data only
+our $legend;		# If set, include legend in output
+our $tab_size = 8;	# Number of spaces to use in place of tab
+our $config;		# Configuration file contents
+our $html_prolog_file;	# Custom HTML prolog file (up to and including <body>)
+our $html_epilog_file;	# Custom HTML epilog file (from </body> onwards)
+our $html_prolog;	# Actual HTML prolog
+our $html_epilog;	# Actual HTML epilog
+our $html_ext = "html";	# Extension for generated HTML files
+our $html_gzip = 0;	# Compress with gzip
+our $demangle_cpp = 0;	# Demangle C++ function names
+our @opt_ignore_errors;	# Ignore certain error classes during processing
+our @ignore;
+our $opt_config_file;	# User-specified configuration file location
+our %opt_rc;
+our $opt_missed;	# List/sort lines by missed counts
+our $charset = "UTF-8";	# Default charset for HTML pages
+our @fileview_sortlist;
+our @fileview_sortname = ("", "-sort-l", "-sort-f", "-sort-b");
+our @funcview_sortlist;
+our @rate_name = ("Lo", "Med", "Hi");
+our @rate_png = ("ruby.png", "amber.png", "emerald.png");
+our $lcov_func_coverage = 1;
+our $lcov_branch_coverage = 0;
+our $rc_desc_html = 0;	# lcovrc: genhtml_desc_html
+
+our $cwd = cwd();	# Current working directory
+
+
+#
+# Code entry point
+#
+
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Check command line for a configuration file name
+Getopt::Long::Configure("pass_through", "no_auto_abbrev");
+GetOptions("config-file=s" => \$opt_config_file,
+	   "rc=s%" => \%opt_rc);
+Getopt::Long::Configure("default");
+
+{
+	# Remove spaces around rc options
+	my %new_opt_rc;
+
+	while (my ($key, $value) = each(%opt_rc)) {
+		$key =~ s/^\s+|\s+$//g;
+		$value =~ s/^\s+|\s+$//g;
+
+		$new_opt_rc{$key} = $value;
+	}
+	%opt_rc = %new_opt_rc;
+}
+
+# Read configuration file if available
+if (defined($opt_config_file)) {
+	$config = read_config($opt_config_file);
+} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
+{
+	$config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+	$config = read_config("/etc/lcovrc");
+} elsif (-r "/usr/local/etc/lcovrc")
+{
+	$config = read_config("/usr/local/etc/lcovrc");
+}
+
+if ($config || %opt_rc)
+{
+	# Copy configuration file and --rc values to variables
+	apply_config({
+		"genhtml_css_file"		=> \$css_filename,
+		"genhtml_hi_limit"		=> \$hi_limit,
+		"genhtml_med_limit"		=> \$med_limit,
+		"genhtml_line_field_width"	=> \$line_field_width,
+		"genhtml_overview_width"	=> \$overview_width,
+		"genhtml_nav_resolution"	=> \$nav_resolution,
+		"genhtml_nav_offset"		=> \$nav_offset,
+		"genhtml_keep_descriptions"	=> \$keep_descriptions,
+		"genhtml_no_prefix"		=> \$no_prefix,
+		"genhtml_no_source"		=> \$no_sourceview,
+		"genhtml_num_spaces"		=> \$tab_size,
+		"genhtml_highlight"		=> \$highlight,
+		"genhtml_legend"		=> \$legend,
+		"genhtml_html_prolog"		=> \$html_prolog_file,
+		"genhtml_html_epilog"		=> \$html_epilog_file,
+		"genhtml_html_extension"	=> \$html_ext,
+		"genhtml_html_gzip"		=> \$html_gzip,
+		"genhtml_precision"		=> \$default_precision,
+		"genhtml_function_hi_limit"	=> \$fn_hi_limit,
+		"genhtml_function_med_limit"	=> \$fn_med_limit,
+		"genhtml_function_coverage"	=> \$func_coverage,
+		"genhtml_branch_hi_limit"	=> \$br_hi_limit,
+		"genhtml_branch_med_limit"	=> \$br_med_limit,
+		"genhtml_branch_coverage"	=> \$br_coverage,
+		"genhtml_branch_field_width"	=> \$br_field_width,
+		"genhtml_sort"			=> \$sort,
+		"genhtml_charset"		=> \$charset,
+		"genhtml_desc_html"		=> \$rc_desc_html,
+		"genhtml_demangle_cpp"		=> \$demangle_cpp,
+		"genhtml_missed"		=> \$opt_missed,
+		"lcov_function_coverage"	=> \$lcov_func_coverage,
+		"lcov_branch_coverage"		=> \$lcov_branch_coverage,
+		});
+}
+
+# Copy related values if not specified
+$fn_hi_limit	= $hi_limit if (!defined($fn_hi_limit));
+$fn_med_limit	= $med_limit if (!defined($fn_med_limit));
+$br_hi_limit	= $hi_limit if (!defined($br_hi_limit));
+$br_med_limit	= $med_limit if (!defined($br_med_limit));
+$func_coverage	= $lcov_func_coverage if (!defined($func_coverage));
+$br_coverage	= $lcov_branch_coverage if (!defined($br_coverage));
+
+# Parse command line options
+if (!GetOptions("output-directory|o=s"	=> \$output_directory,
+		"title|t=s"		=> \$test_title,
+		"description-file|d=s"	=> \$desc_filename,
+		"keep-descriptions|k"	=> \$keep_descriptions,
+		"css-file|c=s"		=> \$css_filename,
+		"baseline-file|b=s"	=> \$base_filename,
+		"prefix|p=s"		=> \@opt_dir_prefix,
+		"num-spaces=i"		=> \$tab_size,
+		"no-prefix"		=> \$no_prefix,
+		"no-sourceview"		=> \$no_sourceview,
+		"show-details|s"	=> \$show_details,
+		"frames|f"		=> \$frames,
+		"highlight"		=> \$highlight,
+		"legend"		=> \$legend,
+		"quiet|q"		=> \$quiet,
+		"help|h|?"		=> \$help,
+		"version|v"		=> \$version,
+		"html-prolog=s"		=> \$html_prolog_file,
+		"html-epilog=s"		=> \$html_epilog_file,
+		"html-extension=s"	=> \$html_ext,
+		"html-gzip"		=> \$html_gzip,
+		"function-coverage"	=> \$func_coverage,
+		"no-function-coverage"	=> \$no_func_coverage,
+		"branch-coverage"	=> \$br_coverage,
+		"no-branch-coverage"	=> \$no_br_coverage,
+		"sort"			=> \$sort,
+		"no-sort"		=> \$no_sort,
+		"demangle-cpp"		=> \$demangle_cpp,
+		"ignore-errors=s"	=> \@opt_ignore_errors,
+		"config-file=s"		=> \$opt_config_file,
+		"rc=s%"			=> \%opt_rc,
+		"precision=i"		=> \$default_precision,
+		"missed"		=> \$opt_missed,
+		))
+{
+	print(STDERR "Use $tool_name --help to get usage information\n");
+	exit(1);
+} else {
+	# Merge options
+	if ($no_func_coverage) {
+		$func_coverage = 0;
+	}
+	if ($no_br_coverage) {
+		$br_coverage = 0;
+	}
+
+	# Merge sort options
+	if ($no_sort) {
+		$sort = 0;
+	}
+}
+
+@info_filenames = @ARGV;
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print("$tool_name: $lcov_version\n");
+	exit(0);
+}
+
+# Determine which errors the user wants us to ignore
+parse_ignore_errors(@opt_ignore_errors);
+
+# Split the list of prefixes if needed
+parse_dir_prefix(@opt_dir_prefix);
+
+# Check for info filename
+if (!@info_filenames)
+{
+	die("No filename specified\n".
+	    "Use $tool_name --help to get usage information\n");
+}
+
+# Generate a title if none is specified
+if (!$test_title)
+{
+	if (scalar(@info_filenames) == 1)
+	{
+		# Only one filename specified, use it as title
+		$test_title = basename($info_filenames[0]);
+	}
+	else
+	{
+		# More than one filename specified, used default title
+		$test_title = "unnamed";
+	}
+}
+
+# Make sure css_filename is an absolute path (in case we're changing
+# directories)
+if ($css_filename)
+{
+	if (!($css_filename =~ /^\/(.*)$/))
+	{
+		$css_filename = $cwd."/".$css_filename;
+	}
+}
+
+# Make sure tab_size is within valid range
+if ($tab_size < 1)
+{
+	print(STDERR "ERROR: invalid number of spaces specified: ".
+		     "$tab_size!\n");
+	exit(1);
+}
+
+# Get HTML prolog and epilog
+$html_prolog = get_html_prolog($html_prolog_file);
+$html_epilog = get_html_epilog($html_epilog_file);
+
+# Issue a warning if --no-sourceview is enabled together with --frames
+if ($no_sourceview && defined($frames))
+{
+	warn("WARNING: option --frames disabled because --no-sourceview ".
+	     "was specified!\n");
+	$frames = undef;
+}
+
+# Issue a warning if --no-prefix is enabled together with --prefix
+if ($no_prefix && @dir_prefix)
+{
+	warn("WARNING: option --prefix disabled because --no-prefix was ".
+	     "specified!\n");
+	@dir_prefix = undef;
+}
+
+@fileview_sortlist = ($SORT_FILE);
+@funcview_sortlist = ($SORT_FILE);
+
+if ($sort) {
+	push(@fileview_sortlist, $SORT_LINE);
+	push(@fileview_sortlist, $SORT_FUNC) if ($func_coverage);
+	push(@fileview_sortlist, $SORT_BRANCH) if ($br_coverage);
+	push(@funcview_sortlist, $SORT_LINE);
+}
+
+if ($frames)
+{
+	# Include genpng code needed for overview image generation
+	do("$tool_dir/genpng");
+}
+
+# Ensure that the c++filt tool is available when using --demangle-cpp
+if ($demangle_cpp)
+{
+	if (system_no_output(3, "c++filt", "--version")) {
+		die("ERROR: could not find c++filt tool needed for ".
+		    "--demangle-cpp\n");
+	}
+}
+
+# Make sure precision is within valid range
+if ($default_precision < 1 || $default_precision > 4)
+{
+	die("ERROR: specified precision is out of range (1 to 4)\n");
+}
+
+
+# Make sure output_directory exists, create it if necessary
+if ($output_directory)
+{
+	stat($output_directory);
+
+	if (! -e _)
+	{
+		create_sub_dir($output_directory);
+	}
+}
+
+# Do something
+gen_html();
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+
+	print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS] INFOFILE(S)
+
+Create HTML output for coverage data found in INFOFILE. Note that INFOFILE
+may also be a list of filenames.
+
+Misc:
+  -h, --help                        Print this help, then exit
+  -v, --version                     Print version number, then exit
+  -q, --quiet                       Do not print progress messages
+      --config-file FILENAME        Specify configuration file location
+      --rc SETTING=VALUE            Override configuration file setting
+      --ignore-errors ERRORS        Continue after ERRORS (source)
+
+Operation:
+  -o, --output-directory OUTDIR     Write HTML output to OUTDIR
+  -s, --show-details                Generate detailed directory view
+  -d, --description-file DESCFILE   Read test case descriptions from DESCFILE
+  -k, --keep-descriptions           Do not remove unused test descriptions
+  -b, --baseline-file BASEFILE      Use BASEFILE as baseline file
+  -p, --prefix PREFIX               Remove PREFIX from all directory names
+      --no-prefix                   Do not remove prefix from directory names
+      --(no-)function-coverage      Enable (disable) function coverage display
+      --(no-)branch-coverage        Enable (disable) branch coverage display
+
+HTML output:
+  -f, --frames                      Use HTML frames for source code view
+  -t, --title TITLE                 Display TITLE in header of all pages
+  -c, --css-file CSSFILE            Use external style sheet file CSSFILE
+      --no-source                   Do not create source code view
+      --num-spaces NUM              Replace tabs with NUM spaces in source view
+      --highlight                   Highlight lines with converted-only data
+      --legend                      Include color legend in HTML output
+      --html-prolog FILE            Use FILE as HTML prolog for generated pages
+      --html-epilog FILE            Use FILE as HTML epilog for generated pages
+      --html-extension EXT          Use EXT as filename extension for pages
+      --html-gzip                   Use gzip to compress HTML
+      --(no-)sort                   Enable (disable) sorted coverage views
+      --demangle-cpp                Demangle C++ function names
+      --precision NUM               Set precision of coverage rate
+      --missed                      Show miss counts as negative numbers
+
+For more information see: $lcov_url
+END_OF_USAGE
+	;
+}
+
+
+#
+# get_rate(found, hit)
+#
+# Return a relative value for the specified found&hit values
+# which is used for sorting the corresponding entries in a
+# file list.
+#
+
+sub get_rate($$)
+{
+	my ($found, $hit) = @_;
+
+	if ($found == 0) {
+		return 10000;
+	}
+	return int($hit * 1000 / $found) * 10 + 2 - (1 / $found);
+}
+
+
+#
+# get_overall_line(found, hit, name_singular, name_plural)
+#
+# Return a string containing overall information for the specified
+# found/hit data.
+#
+
+sub get_overall_line($$$$)
+{
+	my ($found, $hit, $name_sn, $name_pl) = @_;
+	my $name;
+
+	return "no data found" if (!defined($found) || $found == 0);
+	$name = ($found == 1) ? $name_sn : $name_pl;
+	return rate($hit, $found, "% ($hit of $found $name)");
+}
+
+
+#
+# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do
+#                    br_found, br_hit)
+#
+# Print overall coverage rates for the specified coverage types.
+#
+
+sub print_overall_rate($$$$$$$$$)
+{
+	my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit,
+	    $br_do, $br_found, $br_hit) = @_;
+
+	info("Overall coverage rate:\n");
+	info("  lines......: %s\n",
+	     get_overall_line($ln_found, $ln_hit, "line", "lines"))
+		if ($ln_do);
+	info("  functions..: %s\n",
+	     get_overall_line($fn_found, $fn_hit, "function", "functions"))
+		if ($fn_do);
+	info("  branches...: %s\n",
+	     get_overall_line($br_found, $br_hit, "branch", "branches"))
+		if ($br_do);
+}
+
+sub get_fn_list($)
+{
+	my ($info) = @_;
+	my %fns;
+	my @result;
+
+	foreach my $filename (keys(%{$info})) {
+		my $data = $info->{$filename};
+		my $funcdata = $data->{"func"};
+		my $sumfnccount = $data->{"sumfnc"};
+
+		if (defined($funcdata)) {
+			foreach my $func_name (keys(%{$funcdata})) {
+				$fns{$func_name} = 1;
+			}
+		}
+
+		if (defined($sumfnccount)) {
+			foreach my $func_name (keys(%{$sumfnccount})) {
+				$fns{$func_name} = 1;
+			}
+		}
+	}
+
+	@result = keys(%fns);
+
+	return \@result;
+}
+
+#
+# rename_functions(info, conv)
+#
+# Rename all function names in INFO according to CONV: OLD_NAME -> NEW_NAME.
+# In case two functions demangle to the same name, assume that they are
+# different object code implementations for the same source function.
+#
+
+sub rename_functions($$)
+{
+	my ($info, $conv) = @_;
+
+	foreach my $filename (keys(%{$info})) {
+		my $data = $info->{$filename};
+		my $funcdata;
+		my $testfncdata;
+		my $sumfnccount;
+		my %newfuncdata;
+		my %newsumfnccount;
+		my $f_found;
+		my $f_hit;
+
+		# funcdata: function name -> line number
+		$funcdata = $data->{"func"};
+		foreach my $fn (keys(%{$funcdata})) {
+			my $cn = $conv->{$fn};
+
+			# Abort if two functions on different lines map to the
+			# same demangled name.
+			if (defined($newfuncdata{$cn}) &&
+			    $newfuncdata{$cn} != $funcdata->{$fn}) {
+				die("ERROR: Demangled function name $cn ".
+				    "maps to different lines (".
+				    $newfuncdata{$cn}." vs ".
+				    $funcdata->{$fn}.") in $filename\n");
+			}
+			$newfuncdata{$cn} = $funcdata->{$fn};
+		}
+		$data->{"func"} = \%newfuncdata;
+
+		# testfncdata: test name -> testfnccount
+		# testfnccount: function name -> execution count
+		$testfncdata = $data->{"testfnc"};
+		foreach my $tn (keys(%{$testfncdata})) {
+			my $testfnccount = $testfncdata->{$tn};
+			my %newtestfnccount;
+
+			foreach my $fn (keys(%{$testfnccount})) {
+				my $cn = $conv->{$fn};
+
+				# Add counts for different functions that map
+				# to the same name.
+				$newtestfnccount{$cn} +=
+					$testfnccount->{$fn};
+			}
+			$testfncdata->{$tn} = \%newtestfnccount;
+		}
+
+		# sumfnccount: function name -> execution count
+		$sumfnccount = $data->{"sumfnc"};
+		foreach my $fn (keys(%{$sumfnccount})) {
+			my $cn = $conv->{$fn};
+
+			# Add counts for different functions that map
+			# to the same name.
+			$newsumfnccount{$cn} += $sumfnccount->{$fn};
+		}
+		$data->{"sumfnc"} = \%newsumfnccount;
+
+		# Update function found and hit counts since they may have
+		# changed
+		$f_found = 0;
+		$f_hit = 0;
+		foreach my $fn (keys(%newsumfnccount)) {
+			$f_found++;
+			$f_hit++ if ($newsumfnccount{$fn} > 0);
+		}
+		$data->{"f_found"} = $f_found;
+		$data->{"f_hit"} = $f_hit;
+	}
+}
+
+#
+# gen_html()
+#
+# Generate a set of HTML pages from contents of .info file INFO_FILENAME.
+# Files will be written to the current directory. If provided, test case
+# descriptions will be read from .tests file TEST_FILENAME and included
+# in ouput.
+#
+# Die on error.
+#
+
+sub gen_html()
+{
+	local *HTML_HANDLE;
+	my %overview;
+	my %base_data;
+	my $lines_found;
+	my $lines_hit;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+	my $overall_found = 0;
+	my $overall_hit = 0;
+	my $total_fn_found = 0;
+	my $total_fn_hit = 0;
+	my $total_br_found = 0;
+	my $total_br_hit = 0;
+	my $dir_name;
+	my $link_name;
+	my @dir_list;
+	my %new_info;
+
+	# Read in all specified .info files
+	foreach (@info_filenames)
+	{
+		%new_info = %{read_info_file($_)};
+
+		# Combine %new_info with %info_data
+		%info_data = %{combine_info_files(\%info_data, \%new_info)};
+	}
+
+	info("Found %d entries.\n", scalar(keys(%info_data)));
+
+	# Read and apply baseline data if specified
+	if ($base_filename)
+	{
+		# Read baseline file
+		info("Reading baseline file $base_filename\n");
+		%base_data = %{read_info_file($base_filename)};
+		info("Found %d entries.\n", scalar(keys(%base_data)));
+
+		# Apply baseline
+		info("Subtracting baseline data.\n");
+		%info_data = %{apply_baseline(\%info_data, \%base_data)};
+	}
+
+	@dir_list = get_dir_list(keys(%info_data));
+
+	if ($no_prefix)
+	{
+		# User requested that we leave filenames alone
+		info("User asked not to remove filename prefix\n");
+	}
+	elsif (! @dir_prefix)
+	{
+		# Get prefix common to most directories in list
+		my $prefix = get_prefix(1, keys(%info_data));
+
+		if ($prefix)
+		{
+			info("Found common filename prefix \"$prefix\"\n");
+			$dir_prefix[0] = $prefix;
+
+		}
+		else
+		{
+			info("No common filename prefix found!\n");
+			$no_prefix=1;
+		}
+	}
+	else
+	{
+		my $msg = "Using user-specified filename prefix ";
+		for my $i (0 .. $#dir_prefix)
+		{
+				$dir_prefix[$i] =~ s/\/+$//;
+				$msg .= ", " unless 0 == $i;
+				$msg .= "\"" . $dir_prefix[$i] . "\"";
+		}
+		info($msg . "\n");
+	}
+
+
+	# Read in test description file if specified
+	if ($desc_filename)
+	{
+		info("Reading test description file $desc_filename\n");
+		%test_description = %{read_testfile($desc_filename)};
+
+		# Remove test descriptions which are not referenced
+		# from %info_data if user didn't tell us otherwise
+		if (!$keep_descriptions)
+		{
+			remove_unused_descriptions();
+		}
+	}
+
+	# Change to output directory if specified
+	if ($output_directory)
+	{
+		chdir($output_directory)
+			or die("ERROR: cannot change to directory ".
+			"$output_directory!\n");
+	}
+
+	info("Writing .css and .png files.\n");
+	write_css_file();
+	write_png_files();
+
+	if ($html_gzip)
+	{
+		info("Writing .htaccess file.\n");
+		write_htaccess_file();
+	}
+
+	info("Generating output.\n");
+
+	# Process each subdirectory and collect overview information
+	foreach $dir_name (@dir_list)
+	{
+		($lines_found, $lines_hit, $fn_found, $fn_hit,
+		 $br_found, $br_hit)
+			= process_dir($dir_name);
+
+		# Handle files in root directory gracefully
+		$dir_name = "root" if ($dir_name eq "");
+
+		# Remove prefix if applicable
+		if (!$no_prefix && @dir_prefix)
+		{
+			# Match directory names beginning with one of @dir_prefix
+			$dir_name = apply_prefix($dir_name,@dir_prefix);
+		}
+
+		# Generate name for directory overview HTML page
+		if ($dir_name =~ /^\/(.*)$/)
+		{
+			$link_name = substr($dir_name, 1)."/index.$html_ext";
+		}
+		else
+		{
+			$link_name = $dir_name."/index.$html_ext";
+		}
+
+		$overview{$dir_name} = [$lines_found, $lines_hit, $fn_found,
+					$fn_hit, $br_found, $br_hit, $link_name,
+					get_rate($lines_found, $lines_hit),
+					get_rate($fn_found, $fn_hit),
+					get_rate($br_found, $br_hit)];
+		$overall_found	+= $lines_found;
+		$overall_hit	+= $lines_hit;
+		$total_fn_found	+= $fn_found;
+		$total_fn_hit	+= $fn_hit;
+		$total_br_found	+= $br_found;
+		$total_br_hit	+= $br_hit;
+	}
+
+	# Generate overview page
+	info("Writing directory view page.\n");
+
+	# Create sorted pages
+	foreach (@fileview_sortlist) {
+		write_dir_page($fileview_sortname[$_], ".", "", $test_title,
+			       undef, $overall_found, $overall_hit,
+			       $total_fn_found, $total_fn_hit, $total_br_found,
+			       $total_br_hit, \%overview, {}, {}, {}, 0, $_);
+	}
+
+	# Check if there are any test case descriptions to write out
+	if (%test_description)
+	{
+		info("Writing test case description file.\n");
+		write_description_file( \%test_description,
+					$overall_found, $overall_hit,
+					$total_fn_found, $total_fn_hit,
+					$total_br_found, $total_br_hit);
+	}
+
+	print_overall_rate(1, $overall_found, $overall_hit,
+			   $func_coverage, $total_fn_found, $total_fn_hit,
+			   $br_coverage, $total_br_found, $total_br_hit);
+
+	chdir($cwd);
+}
+
+#
+# html_create(handle, filename)
+#
+
+sub html_create($$)
+{
+	my $handle = $_[0];
+	my $filename = $_[1];
+
+	if ($html_gzip)
+	{
+		open($handle, "|-", "gzip -c >'$filename'")
+			or die("ERROR: cannot open $filename for writing ".
+			       "(gzip)!\n");
+	}
+	else
+	{
+		open($handle, ">", $filename)
+			or die("ERROR: cannot open $filename for writing!\n");
+	}
+}
+
+sub write_dir_page($$$$$$$$$$$$$$$$$)
+{
+	my ($name, $rel_dir, $base_dir, $title, $trunc_dir, $overall_found,
+	    $overall_hit, $total_fn_found, $total_fn_hit, $total_br_found,
+	    $total_br_hit, $overview, $testhash, $testfnchash, $testbrhash,
+	    $view_type, $sort_type) = @_;
+
+	# Generate directory overview page including details
+	html_create(*HTML_HANDLE, "$rel_dir/index$name.$html_ext");
+	if (!defined($trunc_dir)) {
+		$trunc_dir = "";
+	}
+	$title .= " - " if ($trunc_dir ne "");
+	write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $title$trunc_dir");
+	write_header(*HTML_HANDLE, $view_type, $trunc_dir, $rel_dir,
+		     $overall_found, $overall_hit, $total_fn_found,
+		     $total_fn_hit, $total_br_found, $total_br_hit, $sort_type);
+	write_file_table(*HTML_HANDLE, $base_dir, $overview, $testhash,
+			 $testfnchash, $testbrhash, $view_type, $sort_type);
+	write_html_epilog(*HTML_HANDLE, $base_dir);
+	close(*HTML_HANDLE);
+}
+
+
+#
+# process_dir(dir_name)
+#
+
+sub process_dir($)
+{
+	my $abs_dir = $_[0];
+	my $trunc_dir;
+	my $rel_dir = $abs_dir;
+	my $base_dir;
+	my $filename;
+	my %overview;
+	my $lines_found;
+	my $lines_hit;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+	my $overall_found=0;
+	my $overall_hit=0;
+	my $total_fn_found=0;
+	my $total_fn_hit=0;
+	my $total_br_found = 0;
+	my $total_br_hit = 0;
+	my $base_name;
+	my $extension;
+	my $testdata;
+	my %testhash;
+	my $testfncdata;
+	my %testfnchash;
+	my $testbrdata;
+	my %testbrhash;
+	my @sort_list;
+	local *HTML_HANDLE;
+
+	# Remove prefix if applicable
+	if (!$no_prefix)
+	{
+		# Match directory name beginning with one of @dir_prefix
+		$rel_dir = apply_prefix($rel_dir,@dir_prefix);
+	}
+
+	$trunc_dir = $rel_dir;
+
+	# Remove leading /
+	if ($rel_dir =~ /^\/(.*)$/)
+	{
+		$rel_dir = substr($rel_dir, 1);
+	}
+
+	# Handle files in root directory gracefully
+	$rel_dir = "root" if ($rel_dir eq "");
+	$trunc_dir = "root" if ($trunc_dir eq "");
+
+	$base_dir = get_relative_base_path($rel_dir);
+
+	create_sub_dir($rel_dir);
+
+	# Match filenames which specify files in this directory, not including
+	# sub-directories
+	foreach $filename (grep(/^\Q$abs_dir\E\/[^\/]*$/,keys(%info_data)))
+	{
+		my $page_link;
+		my $func_link;
+
+		($lines_found, $lines_hit, $fn_found, $fn_hit, $br_found,
+		 $br_hit, $testdata, $testfncdata, $testbrdata) =
+			process_file($trunc_dir, $rel_dir, $filename);
+
+		$base_name = basename($filename);
+
+		if ($no_sourceview) {
+			$page_link = "";
+		} elsif ($frames) {
+			# Link to frameset page
+			$page_link = "$base_name.gcov.frameset.$html_ext";
+		} else {
+			# Link directory to source code view page
+			$page_link = "$base_name.gcov.$html_ext";
+		}
+		$overview{$base_name} = [$lines_found, $lines_hit, $fn_found,
+					 $fn_hit, $br_found, $br_hit,
+					 $page_link,
+					 get_rate($lines_found, $lines_hit),
+					 get_rate($fn_found, $fn_hit),
+					 get_rate($br_found, $br_hit)];
+
+		$testhash{$base_name} = $testdata;
+		$testfnchash{$base_name} = $testfncdata;
+		$testbrhash{$base_name} = $testbrdata;
+
+		$overall_found	+= $lines_found;
+		$overall_hit	+= $lines_hit;
+
+		$total_fn_found += $fn_found;
+		$total_fn_hit   += $fn_hit;
+
+		$total_br_found += $br_found;
+		$total_br_hit   += $br_hit;
+	}
+
+	# Create sorted pages
+	foreach (@fileview_sortlist) {
+		# Generate directory overview page (without details)	
+		write_dir_page($fileview_sortname[$_], $rel_dir, $base_dir,
+			       $test_title, $trunc_dir, $overall_found,
+			       $overall_hit, $total_fn_found, $total_fn_hit,
+			       $total_br_found, $total_br_hit, \%overview, {},
+			       {}, {}, 1, $_);
+		if (!$show_details) {
+			next;
+		}
+		# Generate directory overview page including details
+		write_dir_page("-detail".$fileview_sortname[$_], $rel_dir,
+			       $base_dir, $test_title, $trunc_dir,
+			       $overall_found, $overall_hit, $total_fn_found,
+			       $total_fn_hit, $total_br_found, $total_br_hit,
+			       \%overview, \%testhash, \%testfnchash,
+			       \%testbrhash, 1, $_);
+	}
+
+	# Calculate resulting line counts
+	return ($overall_found, $overall_hit, $total_fn_found, $total_fn_hit,
+		$total_br_found, $total_br_hit);
+}
+
+
+#
+# get_converted_lines(testdata)
+#
+# Return hash of line numbers of those lines which were only covered in
+# converted data sets.
+#
+
+sub get_converted_lines($)
+{
+	my $testdata = $_[0];
+	my $testcount;
+	my %converted;
+	my %nonconverted;
+	my $hash;
+	my $testcase;
+	my $line;
+	my %result;
+
+
+	# Get a hash containing line numbers with positive counts both for
+	# converted and original data sets
+	foreach $testcase (keys(%{$testdata}))
+	{
+		# Check to see if this is a converted data set
+		if ($testcase =~ /,diff$/)
+		{
+			$hash = \%converted;
+		}
+		else
+		{
+			$hash = \%nonconverted;
+		}
+
+		$testcount = $testdata->{$testcase};
+		# Add lines with a positive count to hash
+		foreach $line (keys%{$testcount})
+		{
+			if ($testcount->{$line} > 0)
+			{
+				$hash->{$line} = 1;
+			}
+		}
+	}
+
+	# Combine both hashes to resulting list
+	foreach $line (keys(%converted))
+	{
+		if (!defined($nonconverted{$line}))
+		{
+			$result{$line} = 1;
+		}
+	}
+
+	return \%result;
+}
+
+
+sub write_function_page($$$$$$$$$$$$$$$$$$)
+{
+	my ($base_dir, $rel_dir, $trunc_dir, $base_name, $title,
+	    $lines_found, $lines_hit, $fn_found, $fn_hit, $br_found, $br_hit,
+	    $sumcount, $funcdata, $sumfnccount, $testfncdata, $sumbrcount,
+	    $testbrdata, $sort_type) = @_;
+	my $pagetitle;
+	my $filename;
+
+	# Generate function table for this file
+	if ($sort_type == 0) {
+		$filename = "$rel_dir/$base_name.func.$html_ext";
+	} else {
+		$filename = "$rel_dir/$base_name.func-sort-c.$html_ext";
+	}
+	html_create(*HTML_HANDLE, $filename);
+	$pagetitle = "LCOV - $title - $trunc_dir/$base_name - functions";
+	write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
+	write_header(*HTML_HANDLE, 4, "$trunc_dir/$base_name",
+		     "$rel_dir/$base_name", $lines_found, $lines_hit,
+		     $fn_found, $fn_hit, $br_found, $br_hit, $sort_type);
+	write_function_table(*HTML_HANDLE, "$base_name.gcov.$html_ext",
+			     $sumcount, $funcdata,
+			     $sumfnccount, $testfncdata, $sumbrcount,
+			     $testbrdata, $base_name,
+			     $base_dir, $sort_type);
+	write_html_epilog(*HTML_HANDLE, $base_dir, 1);
+	close(*HTML_HANDLE);
+}
+
+
+#
+# process_file(trunc_dir, rel_dir, filename)
+#
+
+sub process_file($$$)
+{
+	info("Processing file ".apply_prefix($_[2], @dir_prefix)."\n");
+
+	my $trunc_dir = $_[0];
+	my $rel_dir = $_[1];
+	my $filename = $_[2];
+	my $base_name = basename($filename);
+	my $base_dir = get_relative_base_path($rel_dir);
+	my $testdata;
+	my $testcount;
+	my $sumcount;
+	my $funcdata;
+	my $checkdata;
+	my $testfncdata;
+	my $sumfnccount;
+	my $testbrdata;
+	my $sumbrcount;
+	my $lines_found;
+	my $lines_hit;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+	my $converted;
+	my @source;
+	my $pagetitle;
+	local *HTML_HANDLE;
+
+	($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
+	 $sumfnccount, $testbrdata, $sumbrcount, $lines_found, $lines_hit,
+	 $fn_found, $fn_hit, $br_found, $br_hit)
+		= get_info_entry($info_data{$filename});
+
+	# Return after this point in case user asked us not to generate
+	# source code view
+	if ($no_sourceview)
+	{
+		return ($lines_found, $lines_hit, $fn_found, $fn_hit,
+			$br_found, $br_hit, $testdata, $testfncdata,
+			$testbrdata);
+	}
+
+	$converted = get_converted_lines($testdata);
+	# Generate source code view for this file
+	html_create(*HTML_HANDLE, "$rel_dir/$base_name.gcov.$html_ext");
+	$pagetitle = "LCOV - $test_title - $trunc_dir/$base_name";
+	write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
+	write_header(*HTML_HANDLE, 2, "$trunc_dir/$base_name",
+		     "$rel_dir/$base_name", $lines_found, $lines_hit,
+		     $fn_found, $fn_hit, $br_found, $br_hit, 0);
+	@source = write_source(*HTML_HANDLE, $filename, $sumcount, $checkdata,
+			       $converted, $funcdata, $sumbrcount);
+
+	write_html_epilog(*HTML_HANDLE, $base_dir, 1);
+	close(*HTML_HANDLE);
+
+	if ($func_coverage) {
+		# Create function tables
+		foreach (@funcview_sortlist) {
+			write_function_page($base_dir, $rel_dir, $trunc_dir,
+					    $base_name, $test_title,
+					    $lines_found, $lines_hit,
+					    $fn_found, $fn_hit, $br_found,
+					    $br_hit, $sumcount,
+					    $funcdata, $sumfnccount,
+					    $testfncdata, $sumbrcount,
+					    $testbrdata, $_);
+		}
+	}
+
+	# Additional files are needed in case of frame output
+	if (!$frames)
+	{
+		return ($lines_found, $lines_hit, $fn_found, $fn_hit,
+			$br_found, $br_hit, $testdata, $testfncdata,
+			$testbrdata);
+	}
+
+	# Create overview png file
+	gen_png("$rel_dir/$base_name.gcov.png", $overview_width, $tab_size,
+		@source);
+
+	# Create frameset page
+	html_create(*HTML_HANDLE,
+		    "$rel_dir/$base_name.gcov.frameset.$html_ext");
+	write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle);
+	close(*HTML_HANDLE);
+
+	# Write overview frame
+	html_create(*HTML_HANDLE,
+		    "$rel_dir/$base_name.gcov.overview.$html_ext");
+	write_overview(*HTML_HANDLE, $base_dir, $base_name, $pagetitle,
+		       scalar(@source));
+	close(*HTML_HANDLE);
+
+	return ($lines_found, $lines_hit, $fn_found, $fn_hit, $br_found,
+		$br_hit, $testdata, $testfncdata, $testbrdata);
+}
+
+
+sub compress_brcount($)
+{
+	my ($brcount) = @_;
+	my $db;
+
+	$db = brcount_to_db($brcount);
+	return db_to_brcount($db, $brcount);
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test"  -> \%testdata
+#        "sum"   -> \%sumcount
+#        "func"  -> \%funcdata
+#        "found" -> $lines_found (number of instrumented lines found in file)
+#	 "hit"   -> $lines_hit (number of executed lines in file)
+#        "f_found" -> $fn_found (number of instrumented functions found in file)
+#	 "f_hit"   -> $fn_hit (number of executed functions in file)
+#        "b_found" -> $br_found (number of instrumented branches found in file)
+#	 "b_hit"   -> $br_hit (number of executed branches in file)
+#        "check" -> \%checkdata
+#        "testfnc" -> \%testfncdata
+#        "sumfnc"  -> \%sumfnccount
+#        "testbr"  -> \%testbrdata
+#        "sumbr"   -> \%sumbrcount
+#
+# %testdata   : name of test affecting this file -> \%testcount
+# %testfncdata: name of test affecting this file -> \%testfnccount
+# %testbrdata:  name of test affecting this file -> \%testbrcount
+#
+# %testcount   : line number   -> execution count for a single test
+# %testfnccount: function name -> execution count for a single test
+# %testbrcount : line number   -> branch coverage data for a single test
+# %sumcount    : line number   -> execution count for all tests
+# %sumfnccount : function name -> execution count for all tests
+# %sumbrcount  : line number   -> branch coverage data for all tests
+# %funcdata    : function name -> line number
+# %checkdata   : line number   -> checksum of source code line
+# $brdata      : vector of items: block, branch, taken
+# 
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error.
+#
+
+sub read_info_file($)
+{
+	my $tracefile = $_[0];		# Name of tracefile
+	my %result;			# Resulting hash: file -> data
+	my $data;			# Data handle for current entry
+	my $testdata;			#       "             "
+	my $testcount;			#       "             "
+	my $sumcount;			#       "             "
+	my $funcdata;			#       "             "
+	my $checkdata;			#       "             "
+	my $testfncdata;
+	my $testfnccount;
+	my $sumfnccount;
+	my $testbrdata;
+	my $testbrcount;
+	my $sumbrcount;
+	my $line;			# Current line read from .info file
+	my $testname;			# Current test name
+	my $filename;			# Current filename
+	my $hitcount;			# Count for lines hit
+	my $count;			# Execution count of current line
+	my $negative;			# If set, warn about negative counts
+	my $changed_testname;		# If set, warn about changed testname
+	my $line_checksum;		# Checksum of current line
+	my $notified_about_relative_paths;
+	local *INFO_HANDLE;		# Filehandle for .info file
+
+	info("Reading data file $tracefile\n");
+
+	# Check if file exists and is readable
+	stat($_[0]);
+	if (!(-r _))
+	{
+		die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	# Check if this is really a plain file
+	if (!(-f _))
+	{
+		die("ERROR: not a plain file: $_[0]!\n");
+	}
+
+	# Check for .gz extension
+	if ($_[0] =~ /\.gz$/)
+	{
+		# Check for availability of GZIP tool
+		system_no_output(1, "gunzip" ,"-h")
+			and die("ERROR: gunzip command not available!\n");
+
+		# Check integrity of compressed file
+		system_no_output(1, "gunzip", "-t", $_[0])
+			and die("ERROR: integrity check failed for ".
+				"compressed file $_[0]!\n");
+
+		# Open compressed file
+		open(INFO_HANDLE, "-|", "gunzip -c '$_[0]'")
+			or die("ERROR: cannot start gunzip to decompress ".
+			       "file $_[0]!\n");
+	}
+	else
+	{
+		# Open decompressed file
+		open(INFO_HANDLE, "<", $_[0])
+			or die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	$testname = "";
+	while (<INFO_HANDLE>)
+	{
+		chomp($_);
+		$line = $_;
+
+		# Switch statement
+		foreach ($line)
+		{
+			/^TN:([^,]*)(,diff)?/ && do
+			{
+				# Test name information found
+				$testname = defined($1) ? $1 : "";
+				if ($testname =~ s/\W/_/g)
+				{
+					$changed_testname = 1;
+				}
+				$testname .= $2 if (defined($2));
+				last;
+			};
+
+			/^[SK]F:(.*)/ && do
+			{
+				# Filename information found
+				# Retrieve data for new entry
+				$filename = File::Spec->rel2abs($1, $cwd);
+
+				if (!File::Spec->file_name_is_absolute($1) &&
+				    !$notified_about_relative_paths)
+				{
+					info("Resolved relative source file ".
+					     "path \"$1\" with CWD to ".
+					     "\"$filename\".\n");
+					$notified_about_relative_paths = 1;
+				}
+
+				$data = $result{$filename};
+				($testdata, $sumcount, $funcdata, $checkdata,
+				 $testfncdata, $sumfnccount, $testbrdata,
+				 $sumbrcount) =
+					get_info_entry($data);
+
+				if (defined($testname))
+				{
+					$testcount = $testdata->{$testname};
+					$testfnccount = $testfncdata->{$testname};
+					$testbrcount = $testbrdata->{$testname};
+				}
+				else
+				{
+					$testcount = {};
+					$testfnccount = {};
+					$testbrcount = {};
+				}
+				last;
+			};
+
+			/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
+			{
+				# Fix negative counts
+				$count = $2 < 0 ? 0 : $2;
+				if ($2 < 0)
+				{
+					$negative = 1;
+				}
+				# Execution count found, add to structure
+				# Add summary counts
+				$sumcount->{$1} += $count;
+
+				# Add test-specific counts
+				if (defined($testname))
+				{
+					$testcount->{$1} += $count;
+				}
+
+				# Store line checksum if available
+				if (defined($3))
+				{
+					$line_checksum = substr($3, 1);
+
+					# Does it match a previous definition
+					if (defined($checkdata->{$1}) &&
+					    ($checkdata->{$1} ne
+					     $line_checksum))
+					{
+						die("ERROR: checksum mismatch ".
+						    "at $filename:$1\n");
+					}
+
+					$checkdata->{$1} = $line_checksum;
+				}
+				last;
+			};
+
+			/^FN:(\d+),([^,]+)/ && do
+			{
+				last if (!$func_coverage);
+
+				# Function data found, add to structure
+				$funcdata->{$2} = $1;
+
+				# Also initialize function call data
+				if (!defined($sumfnccount->{$2})) {
+					$sumfnccount->{$2} = 0;
+				}
+				if (defined($testname))
+				{
+					if (!defined($testfnccount->{$2})) {
+						$testfnccount->{$2} = 0;
+					}
+				}
+				last;
+			};
+
+			/^FNDA:(\d+),([^,]+)/ && do
+			{
+				last if (!$func_coverage);
+				# Function call count found, add to structure
+				# Add summary counts
+				$sumfnccount->{$2} += $1;
+
+				# Add test-specific counts
+				if (defined($testname))
+				{
+					$testfnccount->{$2} += $1;
+				}
+				last;
+			};
+
+			/^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do {
+				# Branch coverage data found
+				my ($line, $block, $branch, $taken) =
+				   ($1, $2, $3, $4);
+
+				last if (!$br_coverage);
+				$sumbrcount->{$line} .=
+					"$block,$branch,$taken:";
+
+				# Add test-specific counts
+				if (defined($testname)) {
+					$testbrcount->{$line} .=
+						"$block,$branch,$taken:";
+				}
+				last;
+			};
+
+			/^end_of_record/ && do
+			{
+				# Found end of section marker
+				if ($filename)
+				{
+					# Store current section data
+					if (defined($testname))
+					{
+						$testdata->{$testname} =
+							$testcount;
+						$testfncdata->{$testname} =
+							$testfnccount;
+						$testbrdata->{$testname} =
+							$testbrcount;
+					}	
+
+					set_info_entry($data, $testdata,
+						       $sumcount, $funcdata,
+						       $checkdata, $testfncdata,
+						       $sumfnccount,
+						       $testbrdata,
+						       $sumbrcount);
+					$result{$filename} = $data;
+					last;
+				}
+			};
+
+			# default
+			last;
+		}
+	}
+	close(INFO_HANDLE);
+
+	# Calculate lines_found and lines_hit for each file
+	foreach $filename (keys(%result))
+	{
+		$data = $result{$filename};
+
+		($testdata, $sumcount, undef, undef, $testfncdata,
+		 $sumfnccount, $testbrdata, $sumbrcount) =
+			get_info_entry($data);
+
+		# Filter out empty files
+		if (scalar(keys(%{$sumcount})) == 0)
+		{
+			delete($result{$filename});
+			next;
+		}
+		# Filter out empty test cases
+		foreach $testname (keys(%{$testdata}))
+		{
+			if (!defined($testdata->{$testname}) ||
+			    scalar(keys(%{$testdata->{$testname}})) == 0)
+			{
+				delete($testdata->{$testname});
+				delete($testfncdata->{$testname});
+			}
+		}
+
+		$data->{"found"} = scalar(keys(%{$sumcount}));
+		$hitcount = 0;
+
+		foreach (keys(%{$sumcount}))
+		{
+			if ($sumcount->{$_} > 0) { $hitcount++; }
+		}
+
+		$data->{"hit"} = $hitcount;
+
+		# Get found/hit values for function call data
+		$data->{"f_found"} = scalar(keys(%{$sumfnccount}));
+		$hitcount = 0;
+
+		foreach (keys(%{$sumfnccount})) {
+			if ($sumfnccount->{$_} > 0) {
+				$hitcount++;
+			}
+		}
+		$data->{"f_hit"} = $hitcount;
+
+		# Combine branch data for the same branches
+		(undef, $data->{"b_found"}, $data->{"b_hit"}) =
+			compress_brcount($sumbrcount);
+		foreach $testname (keys(%{$testbrdata})) {
+			compress_brcount($testbrdata->{$testname});
+		}
+	}
+
+	if (scalar(keys(%result)) == 0)
+	{
+		die("ERROR: no valid records found in tracefile $tracefile\n");
+	}
+	if ($negative)
+	{
+		warn("WARNING: negative counts found in tracefile ".
+		     "$tracefile\n");
+	}
+	if ($changed_testname)
+	{
+		warn("WARNING: invalid characters removed from testname in ".
+		     "tracefile $tracefile\n");
+	}
+
+	return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
+#  ref, testfncdata hash ref, sumfnccount hash ref, lines found, lines hit,
+#  functions found, functions hit)
+#
+
+sub get_info_entry($)
+{
+	my $testdata_ref = $_[0]->{"test"};
+	my $sumcount_ref = $_[0]->{"sum"};
+	my $funcdata_ref = $_[0]->{"func"};
+	my $checkdata_ref = $_[0]->{"check"};
+	my $testfncdata = $_[0]->{"testfnc"};
+	my $sumfnccount = $_[0]->{"sumfnc"};
+	my $testbrdata = $_[0]->{"testbr"};
+	my $sumbrcount = $_[0]->{"sumbr"};
+	my $lines_found = $_[0]->{"found"};
+	my $lines_hit = $_[0]->{"hit"};
+	my $fn_found = $_[0]->{"f_found"};
+	my $fn_hit = $_[0]->{"f_hit"};
+	my $br_found = $_[0]->{"b_found"};
+	my $br_hit = $_[0]->{"b_hit"};
+
+	return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
+		$testfncdata, $sumfnccount, $testbrdata, $sumbrcount,
+		$lines_found, $lines_hit, $fn_found, $fn_hit,
+		$br_found, $br_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
+#                checkdata_ref, testfncdata_ref, sumfcncount_ref,
+#                testbrdata_ref, sumbrcount_ref[,lines_found,
+#                lines_hit, f_found, f_hit, $b_found, $b_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$$$$$$;$$$$$$)
+{
+	my $data_ref = $_[0];
+
+	$data_ref->{"test"} = $_[1];
+	$data_ref->{"sum"} = $_[2];
+	$data_ref->{"func"} = $_[3];
+	$data_ref->{"check"} = $_[4];
+	$data_ref->{"testfnc"} = $_[5];
+	$data_ref->{"sumfnc"} = $_[6];
+	$data_ref->{"testbr"} = $_[7];
+	$data_ref->{"sumbr"} = $_[8];
+
+	if (defined($_[9])) { $data_ref->{"found"} = $_[9]; }
+	if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; }
+	if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; }
+	if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; }
+	if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; }
+	if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; }
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+#   line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+	my $data1_ref = $_[0];	# Hash 1
+	my $data2_ref = $_[1];	# Hash 2
+	my %result;		# Resulting hash
+	my $line;		# Current line iteration scalar
+	my $data1_count;	# Count of line in hash1
+	my $data2_count;	# Count of line in hash2
+	my $found = 0;		# Total number of lines found
+	my $hit = 0;		# Number of lines with a count > 0
+
+	foreach $line (keys(%$data1_ref))
+	{
+		$data1_count = $data1_ref->{$line};
+		$data2_count = $data2_ref->{$line};
+
+		# Add counts if present in both hashes
+		if (defined($data2_count)) { $data1_count += $data2_count; }
+
+		# Store sum in %result
+		$result{$line} = $data1_count;
+
+		$found++;
+		if ($data1_count > 0) { $hit++; }
+	}
+
+	# Add lines unique to data2_ref
+	foreach $line (keys(%$data2_ref))
+	{
+		# Skip lines already in data1_ref
+		if (defined($data1_ref->{$line})) { next; }
+
+		# Copy count from data2_ref
+		$result{$line} = $data2_ref->{$line};
+
+		$found++;
+		if ($result{$line} > 0) { $hit++; }
+	}
+
+	return (\%result, $found, $hit);
+}
+
+
+#
+# merge_checksums(ref1, ref2, filename)
+#
+# REF1 and REF2 are references to hashes containing a mapping
+#
+#   line number -> checksum
+#
+# Merge checksum lists defined in REF1 and REF2 and return reference to
+# resulting hash. Die if a checksum for a line is defined in both hashes
+# but does not match.
+#
+
+sub merge_checksums($$$)
+{
+	my $ref1 = $_[0];
+	my $ref2 = $_[1];
+	my $filename = $_[2];
+	my %result;
+	my $line;
+
+	foreach $line (keys(%{$ref1}))
+	{
+		if (defined($ref2->{$line}) &&
+		    ($ref1->{$line} ne $ref2->{$line}))
+		{
+			die("ERROR: checksum mismatch at $filename:$line\n");
+		}
+		$result{$line} = $ref1->{$line};
+	}
+
+	foreach $line (keys(%{$ref2}))
+	{
+		$result{$line} = $ref2->{$line};
+	}
+
+	return \%result;
+}
+
+
+#
+# merge_func_data(funcdata1, funcdata2, filename)
+#
+
+sub merge_func_data($$$)
+{
+	my ($funcdata1, $funcdata2, $filename) = @_;
+	my %result;
+	my $func;
+
+	if (defined($funcdata1)) {
+		%result = %{$funcdata1};
+	}
+
+	foreach $func (keys(%{$funcdata2})) {
+		my $line1 = $result{$func};
+		my $line2 = $funcdata2->{$func};
+
+		if (defined($line1) && ($line1 != $line2)) {
+			warn("WARNING: function data mismatch at ".
+			     "$filename:$line2\n");
+			next;
+		}
+		$result{$func} = $line2;
+	}
+
+	return \%result;
+}
+
+
+#
+# add_fnccount(fnccount1, fnccount2)
+#
+# Add function call count data. Return list (fnccount_added, f_found, f_hit)
+#
+
+sub add_fnccount($$)
+{
+	my ($fnccount1, $fnccount2) = @_;
+	my %result;
+	my $fn_found;
+	my $fn_hit;
+	my $function;
+
+	if (defined($fnccount1)) {
+		%result = %{$fnccount1};
+	}
+	foreach $function (keys(%{$fnccount2})) {
+		$result{$function} += $fnccount2->{$function};
+	}
+	$fn_found = scalar(keys(%result));
+	$fn_hit = 0;
+	foreach $function (keys(%result)) {
+		if ($result{$function} > 0) {
+			$fn_hit++;
+		}
+	}
+
+	return (\%result, $fn_found, $fn_hit);
+}
+
+#
+# add_testfncdata(testfncdata1, testfncdata2)
+#
+# Add function call count data for several tests. Return reference to
+# added_testfncdata.
+#
+
+sub add_testfncdata($$)
+{
+	my ($testfncdata1, $testfncdata2) = @_;
+	my %result;
+	my $testname;
+
+	foreach $testname (keys(%{$testfncdata1})) {
+		if (defined($testfncdata2->{$testname})) {
+			my $fnccount;
+
+			# Function call count data for this testname exists
+			# in both data sets: add
+			($fnccount) = add_fnccount(
+				$testfncdata1->{$testname},
+				$testfncdata2->{$testname});
+			$result{$testname} = $fnccount;
+			next;
+		}
+		# Function call count data for this testname is unique to
+		# data set 1: copy
+		$result{$testname} = $testfncdata1->{$testname};
+	}
+
+	# Add count data for testnames unique to data set 2
+	foreach $testname (keys(%{$testfncdata2})) {
+		if (!defined($result{$testname})) {
+			$result{$testname} = $testfncdata2->{$testname};
+		}
+	}
+	return \%result;
+}
+
+
+#
+# brcount_to_db(brcount)
+#
+# Convert brcount data to the following format:
+#
+# db:          line number    -> block hash
+# block hash:  block number   -> branch hash
+# branch hash: branch number  -> taken value
+#
+
+sub brcount_to_db($)
+{
+	my ($brcount) = @_;
+	my $line;
+	my $db;
+
+	# Add branches to database
+	foreach $line (keys(%{$brcount})) {
+		my $brdata = $brcount->{$line};
+
+		foreach my $entry (split(/:/, $brdata)) {
+			my ($block, $branch, $taken) = split(/,/, $entry);
+			my $old = $db->{$line}->{$block}->{$branch};
+
+			if (!defined($old) || $old eq "-") {
+				$old = $taken;
+			} elsif ($taken ne "-") {
+				$old += $taken;
+			}
+
+			$db->{$line}->{$block}->{$branch} = $old;
+		}
+	}
+
+	return $db;
+}
+
+
+#
+# db_to_brcount(db[, brcount])
+#
+# Convert branch coverage data back to brcount format. If brcount is specified,
+# the converted data is directly inserted in brcount.
+#
+
+sub db_to_brcount($;$)
+{
+	my ($db, $brcount) = @_;
+	my $line;
+	my $br_found = 0;
+	my $br_hit = 0;
+
+	# Convert database back to brcount format
+	foreach $line (sort({$a <=> $b} keys(%{$db}))) {
+		my $ldata = $db->{$line};
+		my $brdata;
+		my $block;
+
+		foreach $block (sort({$a <=> $b} keys(%{$ldata}))) {
+			my $bdata = $ldata->{$block};
+			my $branch;
+
+			foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) {
+				my $taken = $bdata->{$branch};
+
+				$br_found++;
+				$br_hit++ if ($taken ne "-" && $taken > 0);
+				$brdata .= "$block,$branch,$taken:";
+			}
+		}
+		$brcount->{$line} = $brdata;
+	}
+
+	return ($brcount, $br_found, $br_hit);
+}
+
+
+#
+# brcount_db_combine(db1, db2, op)
+#
+# db1 := db1 op db2, where
+#   db1, db2: brcount data as returned by brcount_to_db
+#   op:       one of $BR_ADD and BR_SUB
+#
+sub brcount_db_combine($$$)
+{
+	my ($db1, $db2, $op) = @_;
+
+	foreach my $line (keys(%{$db2})) {
+		my $ldata = $db2->{$line};
+
+		foreach my $block (keys(%{$ldata})) {
+			my $bdata = $ldata->{$block};
+
+			foreach my $branch (keys(%{$bdata})) {
+				my $taken = $bdata->{$branch};
+				my $new = $db1->{$line}->{$block}->{$branch};
+
+				if (!defined($new) || $new eq "-") {
+					$new = $taken;
+				} elsif ($taken ne "-") {
+					if ($op == $BR_ADD) {
+						$new += $taken;
+					} elsif ($op == $BR_SUB) {
+						$new -= $taken;
+						$new = 0 if ($new < 0);
+					}
+				}
+
+				$db1->{$line}->{$block}->{$branch} = $new;
+			}
+		}
+	}
+}
+
+
+#
+# brcount_db_get_found_and_hit(db)
+#
+# Return (br_found, br_hit) for db.
+#
+
+sub brcount_db_get_found_and_hit($)
+{
+	my ($db) = @_;
+	my ($br_found , $br_hit) = (0, 0);
+
+	foreach my $line (keys(%{$db})) {
+		my $ldata = $db->{$line};
+
+		foreach my $block (keys(%{$ldata})) {
+			my $bdata = $ldata->{$block};
+
+			foreach my $branch (keys(%{$bdata})) {
+				my $taken = $bdata->{$branch};
+
+				$br_found++;
+				$br_hit++ if ($taken ne "-" && $taken > 0);
+			}
+		}
+	}
+
+	return ($br_found, $br_hit);
+}
+
+
+# combine_brcount(brcount1, brcount2, type, inplace)
+#
+# If add is BR_ADD, add branch coverage data and return list brcount_added.
+# If add is BR_SUB, subtract the taken values of brcount2 from brcount1 and
+# return brcount_sub. If inplace is set, the result is inserted into brcount1.
+#
+
+sub combine_brcount($$$;$)
+{
+	my ($brcount1, $brcount2, $type, $inplace) = @_;
+	my ($db1, $db2);
+
+	$db1 = brcount_to_db($brcount1);
+	$db2 = brcount_to_db($brcount2);
+	brcount_db_combine($db1, $db2, $type);
+
+	return db_to_brcount($db1, $inplace ? $brcount1 : undef);
+}
+
+
+#
+# add_testbrdata(testbrdata1, testbrdata2)
+#
+# Add branch coverage data for several tests. Return reference to
+# added_testbrdata.
+#
+
+sub add_testbrdata($$)
+{
+	my ($testbrdata1, $testbrdata2) = @_;
+	my %result;
+	my $testname;
+
+	foreach $testname (keys(%{$testbrdata1})) {
+		if (defined($testbrdata2->{$testname})) {
+			my $brcount;
+
+			# Branch coverage data for this testname exists
+			# in both data sets: add
+			($brcount) = combine_brcount($testbrdata1->{$testname},
+					 $testbrdata2->{$testname}, $BR_ADD);
+			$result{$testname} = $brcount;
+			next;
+		}
+		# Branch coverage data for this testname is unique to
+		# data set 1: copy
+		$result{$testname} = $testbrdata1->{$testname};
+	}
+
+	# Add count data for testnames unique to data set 2
+	foreach $testname (keys(%{$testbrdata2})) {
+		if (!defined($result{$testname})) {
+			$result{$testname} = $testbrdata2->{$testname};
+		}
+	}
+	return \%result;
+}
+
+
+#
+# combine_info_entries(entry_ref1, entry_ref2, filename)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$$)
+{
+	my $entry1 = $_[0];	# Reference to hash containing first entry
+	my $testdata1;
+	my $sumcount1;
+	my $funcdata1;
+	my $checkdata1;
+	my $testfncdata1;
+	my $sumfnccount1;
+	my $testbrdata1;
+	my $sumbrcount1;
+
+	my $entry2 = $_[1];	# Reference to hash containing second entry
+	my $testdata2;
+	my $sumcount2;
+	my $funcdata2;
+	my $checkdata2;
+	my $testfncdata2;
+	my $sumfnccount2;
+	my $testbrdata2;
+	my $sumbrcount2;
+
+	my %result;		# Hash containing combined entry
+	my %result_testdata;
+	my $result_sumcount = {};
+	my $result_funcdata;
+	my $result_testfncdata;
+	my $result_sumfnccount;
+	my $result_testbrdata;
+	my $result_sumbrcount;
+	my $lines_found;
+	my $lines_hit;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+
+	my $testname;
+	my $filename = $_[2];
+
+	# Retrieve data
+	($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
+	 $sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1);
+	($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
+	 $sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2);
+
+	# Merge checksums
+	$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
+
+	# Combine funcdata
+	$result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
+
+	# Combine function call count data
+	$result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
+	($result_sumfnccount, $fn_found, $fn_hit) =
+		add_fnccount($sumfnccount1, $sumfnccount2);
+	
+	# Combine branch coverage data
+	$result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2);
+	($result_sumbrcount, $br_found, $br_hit) =
+		combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD);
+
+	# Combine testdata
+	foreach $testname (keys(%{$testdata1}))
+	{
+		if (defined($testdata2->{$testname}))
+		{
+			# testname is present in both entries, requires
+			# combination
+			($result_testdata{$testname}) =
+				add_counts($testdata1->{$testname},
+					   $testdata2->{$testname});
+		}
+		else
+		{
+			# testname only present in entry1, add to result
+			$result_testdata{$testname} = $testdata1->{$testname};
+		}
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+
+	foreach $testname (keys(%{$testdata2}))
+	{
+		# Skip testnames already covered by previous iteration
+		if (defined($testdata1->{$testname})) { next; }
+
+		# testname only present in entry2, add to result hash
+		$result_testdata{$testname} = $testdata2->{$testname};
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+	
+	# Calculate resulting sumcount
+
+	# Store result
+	set_info_entry(\%result, \%result_testdata, $result_sumcount,
+		       $result_funcdata, $checkdata1, $result_testfncdata,
+		       $result_sumfnccount, $result_testbrdata,
+		       $result_sumbrcount, $lines_found, $lines_hit,
+		       $fn_found, $fn_hit, $br_found, $br_hit);
+
+	return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+	my %hash1 = %{$_[0]};
+	my %hash2 = %{$_[1]};
+	my $filename;
+
+	foreach $filename (keys(%hash2))
+	{
+		if ($hash1{$filename})
+		{
+			# Entry already exists in hash1, combine them
+			$hash1{$filename} =
+				combine_info_entries($hash1{$filename},
+						     $hash2{$filename},
+						     $filename);
+		}
+		else
+		{
+			# Entry is unique in both hashes, simply add to
+			# resulting hash
+			$hash1{$filename} = $hash2{$filename};
+		}
+	}
+
+	return(\%hash1);
+}
+
+
+#
+# get_prefix(min_dir, filename_list)
+#
+# Search FILENAME_LIST for a directory prefix which is common to as many
+# list entries as possible, so that removing this prefix will minimize the
+# sum of the lengths of all resulting shortened filenames while observing
+# that no filename has less than MIN_DIR parent directories.
+#
+
+sub get_prefix($@)
+{
+	my ($min_dir, @filename_list) = @_;
+	my %prefix;			# mapping: prefix -> sum of lengths
+	my $current;			# Temporary iteration variable
+
+	# Find list of prefixes
+	foreach (@filename_list)
+	{
+		# Need explicit assignment to get a copy of $_ so that
+		# shortening the contained prefix does not affect the list
+		$current = $_;
+		while ($current = shorten_prefix($current))
+		{
+			$current .= "/";
+
+			# Skip rest if the remaining prefix has already been
+			# added to hash
+			if (exists($prefix{$current})) { last; }
+
+			# Initialize with 0
+			$prefix{$current}="0";
+		}
+
+	}
+
+	# Remove all prefixes that would cause filenames to have less than
+	# the minimum number of parent directories
+	foreach my $filename (@filename_list) {
+		my $dir = dirname($filename);
+
+		for (my $i = 0; $i < $min_dir; $i++) {
+			delete($prefix{$dir."/"});
+			$dir = shorten_prefix($dir);
+		}
+	}
+
+	# Check if any prefix remains
+	return undef if (!%prefix);
+
+	# Calculate sum of lengths for all prefixes
+	foreach $current (keys(%prefix))
+	{
+		foreach (@filename_list)
+		{
+			# Add original length
+			$prefix{$current} += length($_);
+
+			# Check whether prefix matches
+			if (substr($_, 0, length($current)) eq $current)
+			{
+				# Subtract prefix length for this filename
+				$prefix{$current} -= length($current);
+			}
+		}
+	}
+
+	# Find and return prefix with minimal sum
+	$current = (keys(%prefix))[0];
+
+	foreach (keys(%prefix))
+	{
+		if ($prefix{$_} < $prefix{$current})
+		{
+			$current = $_;
+		}
+	}
+
+	$current =~ s/\/$//;
+
+	return($current);
+}
+
+
+#
+# shorten_prefix(prefix)
+#
+# Return PREFIX shortened by last directory component.
+#
+
+sub shorten_prefix($)
+{
+	my @list = split("/", $_[0]);
+
+	pop(@list);
+	return join("/", @list);
+}
+
+
+
+#
+# get_dir_list(filename_list)
+#
+# Return sorted list of directories for each entry in given FILENAME_LIST.
+#
+
+sub get_dir_list(@)
+{
+	my %result;
+
+	foreach (@_)
+	{
+		$result{shorten_prefix($_)} = "";
+	}
+
+	return(sort(keys(%result)));
+}
+
+
+#
+# get_relative_base_path(subdirectory)
+#
+# Return a relative path string which references the base path when applied
+# in SUBDIRECTORY.
+#
+# Example: get_relative_base_path("fs/mm") -> "../../"
+#
+
+sub get_relative_base_path($)
+{
+	my $result = "";
+	my $index;
+
+	# Make an empty directory path a special case
+	if (!$_[0]) { return(""); }
+
+	# Count number of /s in path
+	$index = ($_[0] =~ s/\//\//g);
+
+	# Add a ../ to $result for each / in the directory path + 1
+	for (; $index>=0; $index--)
+	{
+		$result .= "../";
+	}
+
+	return $result;
+}
+
+
+#
+# read_testfile(test_filename)
+#
+# Read in file TEST_FILENAME which contains test descriptions in the format:
+#
+#   TN:<whitespace><test name>
+#   TD:<whitespace><test description>
+#
+# for each test case. Return a reference to a hash containing a mapping
+#
+#   test name -> test description.
+#
+# Die on error.
+#
+
+sub read_testfile($)
+{
+	my %result;
+	my $test_name;
+	my $changed_testname;
+	local *TEST_HANDLE;
+
+	open(TEST_HANDLE, "<", $_[0])
+		or die("ERROR: cannot open $_[0]!\n");
+
+	while (<TEST_HANDLE>)
+	{
+		chomp($_);
+
+		# Match lines beginning with TN:<whitespace(s)>
+		if (/^TN:\s+(.*?)\s*$/)
+		{
+			# Store name for later use
+			$test_name = $1;
+			if ($test_name =~ s/\W/_/g)
+			{
+				$changed_testname = 1;
+			}
+		}
+
+		# Match lines beginning with TD:<whitespace(s)>
+		if (/^TD:\s+(.*?)\s*$/)
+		{
+			if (!defined($test_name)) {
+				die("ERROR: Found test description without prior test name in $_[0]:$.\n");
+			}
+			# Check for empty line
+			if ($1)
+			{
+				# Add description to hash
+				$result{$test_name} .= " $1";
+			}
+			else
+			{
+				# Add empty line
+				$result{$test_name} .= "\n\n";
+			}
+		}
+	}
+
+	close(TEST_HANDLE);
+
+	if ($changed_testname)
+	{
+		warn("WARNING: invalid characters removed from testname in ".
+		     "descriptions file $_[0]\n");
+	}
+
+	return \%result;
+}
+
+
+#
+# escape_html(STRING)
+#
+# Return a copy of STRING in which all occurrences of HTML special characters
+# are escaped.
+#
+
+sub escape_html($)
+{
+	my $string = $_[0];
+
+	if (!$string) { return ""; }
+
+	$string =~ s/&/&amp;/g;		# & -> &amp;
+	$string =~ s/</&lt;/g;		# < -> &lt;
+	$string =~ s/>/&gt;/g;		# > -> &gt;
+	$string =~ s/\"/&quot;/g;	# " -> &quot;
+
+	while ($string =~ /^([^\t]*)(\t)/)
+	{
+		my $replacement = " "x($tab_size - (length($1) % $tab_size));
+		$string =~ s/^([^\t]*)(\t)/$1$replacement/;
+	}
+
+	$string =~ s/\n/<br>/g;		# \n -> <br>
+
+	return $string;
+}
+
+
+#
+# get_date_string()
+#
+# Return the current date in the form: yyyy-mm-dd
+#
+
+sub get_date_string()
+{
+	my $year;
+	my $month;
+	my $day;
+	my $hour;
+	my $min;
+	my $sec;
+	my @timeresult;
+
+	if (defined $ENV{'SOURCE_DATE_EPOCH'})
+	{
+		@timeresult = gmtime($ENV{'SOURCE_DATE_EPOCH'});
+	}
+	else
+	{
+		@timeresult = localtime();
+	}
+	($year, $month, $day, $hour, $min, $sec) =
+		@timeresult[5, 4, 3, 2, 1, 0];
+
+	return sprintf("%d-%02d-%02d %02d:%02d:%02d", $year+1900, $month+1,
+		       $day, $hour, $min, $sec);
+}
+
+
+#
+# create_sub_dir(dir_name)
+#
+# Create subdirectory DIR_NAME if it does not already exist, including all its
+# parent directories.
+#
+# Die on error.
+#
+
+sub create_sub_dir($)
+{
+	my ($dir) = @_;
+
+	system("mkdir", "-p" ,$dir)
+		and die("ERROR: cannot create directory $dir!\n");
+}
+
+
+#
+# write_description_file(descriptions, overall_found, overall_hit,
+#                        total_fn_found, total_fn_hit, total_br_found,
+#                        total_br_hit)
+#
+# Write HTML file containing all test case descriptions. DESCRIPTIONS is a
+# reference to a hash containing a mapping
+#
+#   test case name -> test case description
+#
+# Die on error.
+#
+
+sub write_description_file($$$$$$$)
+{
+	my %description = %{$_[0]};
+	my $found = $_[1];
+	my $hit = $_[2];
+	my $fn_found = $_[3];
+	my $fn_hit = $_[4];
+	my $br_found = $_[5];
+	my $br_hit = $_[6];
+	my $test_name;
+	local *HTML_HANDLE;
+
+	html_create(*HTML_HANDLE,"descriptions.$html_ext");
+	write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions");
+	write_header(*HTML_HANDLE, 3, "", "", $found, $hit, $fn_found,
+		     $fn_hit, $br_found, $br_hit, 0);
+
+	write_test_table_prolog(*HTML_HANDLE,
+			 "Test case descriptions - alphabetical list");
+
+	foreach $test_name (sort(keys(%description)))
+	{
+		my $desc = $description{$test_name};
+
+		$desc = escape_html($desc) if (!$rc_desc_html);
+		write_test_table_entry(*HTML_HANDLE, $test_name, $desc);
+	}
+
+	write_test_table_epilog(*HTML_HANDLE);
+	write_html_epilog(*HTML_HANDLE, "");
+
+	close(*HTML_HANDLE);
+}
+
+
+
+#
+# write_png_files()
+#
+# Create all necessary .png files for the HTML-output in the current
+# directory. .png-files are used as bar graphs.
+#
+# Die on error.
+#
+
+sub write_png_files()
+{
+	my %data;
+	local *PNG_HANDLE;
+
+	$data{"ruby.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57, 
+		 0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f, 
+		 0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+		 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"amber.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb, 
+		 0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50, 
+		 0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+	  	 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"emerald.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5, 
+		 0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59, 
+		 0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+		 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"snow.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc, 
+		 0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, 
+		 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+		 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"glass.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, 
+		 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 
+		 0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66, 
+		 0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, 0x00, 0x88, 
+		 0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 
+		 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 
+		 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 
+		 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, 0x19, 0xc4, 
+		 0x40, 0x56, 0x10, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 
+		 0x54, 0x78, 0x9c, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 
+		 0x01, 0x48, 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49, 
+		 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82];
+	$data{"updown.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0a, 
+		 0x00, 0x00, 0x00, 0x0e, 0x08, 0x06, 0x00, 0x00, 0x00, 0x16, 
+		 0xa3, 0x8d, 0xab, 0x00, 0x00, 0x00, 0x3c, 0x49, 0x44, 0x41, 
+		 0x54, 0x28, 0xcf, 0x63, 0x60, 0x40, 0x03, 0xff, 0xa1, 0x00, 
+		 0x5d, 0x9c, 0x11, 0x5d, 0x11, 0x8a, 0x24, 0x23, 0x23, 0x23, 
+		 0x86, 0x42, 0x6c, 0xa6, 0x20, 0x2b, 0x66, 0xc4, 0xa7, 0x08, 
+		 0x59, 0x31, 0x23, 0x21, 0x45, 0x30, 0xc0, 0xc4, 0x30, 0x60, 
+		 0x80, 0xfa, 0x6e, 0x24, 0x3e, 0x78, 0x48, 0x0a, 0x70, 0x62, 
+		 0xa2, 0x90, 0x81, 0xd8, 0x44, 0x01, 0x00, 0xe9, 0x5c, 0x2f, 
+		 0xf5, 0xe2, 0x9d, 0x0f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x49, 
+		 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82] if ($sort);
+	foreach (keys(%data))
+	{
+		open(PNG_HANDLE, ">", $_)
+			or die("ERROR: cannot create $_!\n");
+		binmode(PNG_HANDLE);
+		print(PNG_HANDLE map(chr,@{$data{$_}}));
+		close(PNG_HANDLE);
+	}
+}
+
+
+#
+# write_htaccess_file()
+#
+
+sub write_htaccess_file()
+{
+	local *HTACCESS_HANDLE;
+	my $htaccess_data;
+
+	open(*HTACCESS_HANDLE, ">", ".htaccess")
+		or die("ERROR: cannot open .htaccess for writing!\n");
+
+	$htaccess_data = (<<"END_OF_HTACCESS")
+AddEncoding x-gzip .html
+END_OF_HTACCESS
+	;
+
+	print(HTACCESS_HANDLE $htaccess_data);
+	close(*HTACCESS_HANDLE);
+}
+
+
+#
+# write_css_file()
+#
+# Write the cascading style sheet file gcov.css to the current directory.
+# This file defines basic layout attributes of all generated HTML pages.
+#
+
+sub write_css_file()
+{
+	local *CSS_HANDLE;
+
+	# Check for a specified external style sheet file
+	if ($css_filename)
+	{
+		# Simply copy that file
+		system("cp", $css_filename, "gcov.css")
+			and die("ERROR: cannot copy file $css_filename!\n");
+		return;
+	}
+
+	open(CSS_HANDLE, ">", "gcov.css")
+		or die ("ERROR: cannot open gcov.css for writing!\n");
+
+
+	# *************************************************************
+
+	my $css_data = ($_=<<"END_OF_CSS")
+	/* All views: initial background and text color */
+	body
+	{
+	  color: #000000;
+	  background-color: #FFFFFF;
+	}
+	
+	/* All views: standard link format*/
+	a:link
+	{
+	  color: #284FA8;
+	  text-decoration: underline;
+	}
+	
+	/* All views: standard link - visited format */
+	a:visited
+	{
+	  color: #00CB40;
+	  text-decoration: underline;
+	}
+	
+	/* All views: standard link - activated format */
+	a:active
+	{
+	  color: #FF0040;
+	  text-decoration: underline;
+	}
+	
+	/* All views: main title format */
+	td.title
+	{
+	  text-align: center;
+	  padding-bottom: 10px;
+	  font-family: sans-serif;
+	  font-size: 20pt;
+	  font-style: italic;
+	  font-weight: bold;
+	}
+	
+	/* All views: header item format */
+	td.headerItem
+	{
+	  text-align: right;
+	  padding-right: 6px;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	  vertical-align: top;
+	  white-space: nowrap;
+	}
+	
+	/* All views: header item value format */
+	td.headerValue
+	{
+	  text-align: left;
+	  color: #284FA8;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	  white-space: nowrap;
+	}
+
+	/* All views: header item coverage table heading */
+	td.headerCovTableHead
+	{
+	  text-align: center;
+	  padding-right: 6px;
+	  padding-left: 6px;
+	  padding-bottom: 0px;
+	  font-family: sans-serif;
+	  font-size: 80%;
+	  white-space: nowrap;
+	}
+	
+	/* All views: header item coverage table entry */
+	td.headerCovTableEntry
+	{
+	  text-align: right;
+	  color: #284FA8;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	  white-space: nowrap;
+	  padding-left: 12px;
+	  padding-right: 4px;
+	  background-color: #DAE7FE;
+	}
+	
+	/* All views: header item coverage table entry for high coverage rate */
+	td.headerCovTableEntryHi
+	{
+	  text-align: right;
+	  color: #000000;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	  white-space: nowrap;
+	  padding-left: 12px;
+	  padding-right: 4px;
+	  background-color: #A7FC9D;
+	}
+	
+	/* All views: header item coverage table entry for medium coverage rate */
+	td.headerCovTableEntryMed
+	{
+	  text-align: right;
+	  color: #000000;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	  white-space: nowrap;
+	  padding-left: 12px;
+	  padding-right: 4px;
+	  background-color: #FFEA20;
+	}
+	
+	/* All views: header item coverage table entry for ow coverage rate */
+	td.headerCovTableEntryLo
+	{
+	  text-align: right;
+	  color: #000000;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	  white-space: nowrap;
+	  padding-left: 12px;
+	  padding-right: 4px;
+	  background-color: #FF0000;
+	}
+	
+	/* All views: header legend value for legend entry */
+	td.headerValueLeg
+	{
+	  text-align: left;
+	  color: #000000;
+	  font-family: sans-serif;
+	  font-size: 80%;
+	  white-space: nowrap;
+	  padding-top: 4px;
+	}
+	
+	/* All views: color of horizontal ruler */
+	td.ruler
+	{
+	  background-color: #6688D4;
+	}
+	
+	/* All views: version string format */
+	td.versionInfo
+	{
+	  text-align: center;
+	  padding-top: 2px;
+	  font-family: sans-serif;
+	  font-style: italic;
+	}
+	
+	/* Directory view/File view (all)/Test case descriptions:
+	   table headline format */
+	td.tableHead
+	{
+	  text-align: center;
+	  color: #FFFFFF;
+	  background-color: #6688D4;
+	  font-family: sans-serif;
+	  font-size: 120%;
+	  font-weight: bold;
+	  white-space: nowrap;
+	  padding-left: 4px;
+	  padding-right: 4px;
+	}
+
+	span.tableHeadSort
+	{
+	  padding-right: 4px;
+	}
+	
+	/* Directory view/File view (all): filename entry format */
+	td.coverFile
+	{
+	  text-align: left;
+	  padding-left: 10px;
+	  padding-right: 20px; 
+	  color: #284FA8;
+	  background-color: #DAE7FE;
+	  font-family: monospace;
+	}
+	
+	/* Directory view/File view (all): bar-graph entry format*/
+	td.coverBar
+	{
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #DAE7FE;
+	}
+	
+	/* Directory view/File view (all): bar-graph outline color */
+	td.coverBarOutline
+	{
+	  background-color: #000000;
+	}
+	
+	/* Directory view/File view (all): percentage entry for files with
+	   high coverage rate */
+	td.coverPerHi
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #A7FC9D;
+	  font-weight: bold;
+	  font-family: sans-serif;
+	}
+	
+	/* Directory view/File view (all): line count entry for files with
+	   high coverage rate */
+	td.coverNumHi
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #A7FC9D;
+	  white-space: nowrap;
+	  font-family: sans-serif;
+	}
+	
+	/* Directory view/File view (all): percentage entry for files with
+	   medium coverage rate */
+	td.coverPerMed
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #FFEA20;
+	  font-weight: bold;
+	  font-family: sans-serif;
+	}
+	
+	/* Directory view/File view (all): line count entry for files with
+	   medium coverage rate */
+	td.coverNumMed
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #FFEA20;
+	  white-space: nowrap;
+	  font-family: sans-serif;
+	}
+	
+	/* Directory view/File view (all): percentage entry for files with
+	   low coverage rate */
+	td.coverPerLo
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #FF0000;
+	  font-weight: bold;
+	  font-family: sans-serif;
+	}
+	
+	/* Directory view/File view (all): line count entry for files with
+	   low coverage rate */
+	td.coverNumLo
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #FF0000;
+	  white-space: nowrap;
+	  font-family: sans-serif;
+	}
+	
+	/* File view (all): "show/hide details" link format */
+	a.detail:link
+	{
+	  color: #B8D0FF;
+	  font-size:80%;
+	}
+	
+	/* File view (all): "show/hide details" link - visited format */
+	a.detail:visited
+	{
+	  color: #B8D0FF;
+	  font-size:80%;
+	}
+	
+	/* File view (all): "show/hide details" link - activated format */
+	a.detail:active
+	{
+	  color: #FFFFFF;
+	  font-size:80%;
+	}
+	
+	/* File view (detail): test name entry */
+	td.testName
+	{
+	  text-align: right;
+	  padding-right: 10px;
+	  background-color: #DAE7FE;
+	  font-family: sans-serif;
+	}
+	
+	/* File view (detail): test percentage entry */
+	td.testPer
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px; 
+	  background-color: #DAE7FE;
+	  font-family: sans-serif;
+	}
+	
+	/* File view (detail): test lines count entry */
+	td.testNum
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px; 
+	  background-color: #DAE7FE;
+	  font-family: sans-serif;
+	}
+	
+	/* Test case descriptions: test name format*/
+	dt
+	{
+	  font-family: sans-serif;
+	  font-weight: bold;
+	}
+	
+	/* Test case descriptions: description table body */
+	td.testDescription
+	{
+	  padding-top: 10px;
+	  padding-left: 30px;
+	  padding-bottom: 10px;
+	  padding-right: 30px;
+	  background-color: #DAE7FE;
+	}
+	
+	/* Source code view: function entry */
+	td.coverFn
+	{
+	  text-align: left;
+	  padding-left: 10px;
+	  padding-right: 20px; 
+	  color: #284FA8;
+	  background-color: #DAE7FE;
+	  font-family: monospace;
+	}
+
+	/* Source code view: function entry zero count*/
+	td.coverFnLo
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #FF0000;
+	  font-weight: bold;
+	  font-family: sans-serif;
+	}
+
+	/* Source code view: function entry nonzero count*/
+	td.coverFnHi
+	{
+	  text-align: right;
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  background-color: #DAE7FE;
+	  font-weight: bold;
+	  font-family: sans-serif;
+	}
+
+	/* Source code view: source code format */
+	pre.source
+	{
+	  font-family: monospace;
+	  white-space: pre;
+	  margin-top: 2px;
+	}
+	
+	/* Source code view: line number format */
+	span.lineNum
+	{
+	  background-color: #EFE383;
+	}
+	
+	/* Source code view: format for lines which were executed */
+	td.lineCov,
+	span.lineCov
+	{
+	  background-color: #CAD7FE;
+	}
+	
+	/* Source code view: format for Cov legend */
+	span.coverLegendCov
+	{
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  padding-bottom: 2px;
+	  background-color: #CAD7FE;
+	}
+	
+	/* Source code view: format for lines which were not executed */
+	td.lineNoCov,
+	span.lineNoCov
+	{
+	  background-color: #FF6230;
+	}
+	
+	/* Source code view: format for NoCov legend */
+	span.coverLegendNoCov
+	{
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  padding-bottom: 2px;
+	  background-color: #FF6230;
+	}
+	
+	/* Source code view (function table): standard link - visited format */
+	td.lineNoCov > a:visited,
+	td.lineCov > a:visited
+	{  
+	  color: black;
+	  text-decoration: underline;
+	}  
+	
+	/* Source code view: format for lines which were executed only in a
+	   previous version */
+	span.lineDiffCov
+	{
+	  background-color: #B5F7AF;
+	}
+	
+	/* Source code view: format for branches which were executed
+	 * and taken */
+	span.branchCov
+	{
+	  background-color: #CAD7FE;
+	}
+
+	/* Source code view: format for branches which were executed
+	 * but not taken */
+	span.branchNoCov
+	{
+	  background-color: #FF6230;
+	}
+
+	/* Source code view: format for branches which were not executed */
+	span.branchNoExec
+	{
+	  background-color: #FF6230;
+	}
+
+	/* Source code view: format for the source code heading line */
+	pre.sourceHeading
+	{
+	  white-space: pre;
+	  font-family: monospace;
+	  font-weight: bold;
+	  margin: 0px;
+	}
+
+	/* All views: header legend value for low rate */
+	td.headerValueLegL
+	{
+	  font-family: sans-serif;
+	  text-align: center;
+	  white-space: nowrap;
+	  padding-left: 4px;
+	  padding-right: 2px;
+	  background-color: #FF0000;
+	  font-size: 80%;
+	}
+
+	/* All views: header legend value for med rate */
+	td.headerValueLegM
+	{
+	  font-family: sans-serif;
+	  text-align: center;
+	  white-space: nowrap;
+	  padding-left: 2px;
+	  padding-right: 2px;
+	  background-color: #FFEA20;
+	  font-size: 80%;
+	}
+
+	/* All views: header legend value for hi rate */
+	td.headerValueLegH
+	{
+	  font-family: sans-serif;
+	  text-align: center;
+	  white-space: nowrap;
+	  padding-left: 2px;
+	  padding-right: 4px;
+	  background-color: #A7FC9D;
+	  font-size: 80%;
+	}
+
+	/* All views except source code view: legend format for low coverage */
+	span.coverLegendCovLo
+	{
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  padding-top: 2px;
+	  background-color: #FF0000;
+	}
+
+	/* All views except source code view: legend format for med coverage */
+	span.coverLegendCovMed
+	{
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  padding-top: 2px;
+	  background-color: #FFEA20;
+	}
+
+	/* All views except source code view: legend format for hi coverage */
+	span.coverLegendCovHi
+	{
+	  padding-left: 10px;
+	  padding-right: 10px;
+	  padding-top: 2px;
+	  background-color: #A7FC9D;
+	}
+END_OF_CSS
+	;
+
+	# *************************************************************
+
+
+	# Remove leading tab from all lines
+	$css_data =~ s/^\t//gm;
+
+	print(CSS_HANDLE $css_data);
+
+	close(CSS_HANDLE);
+}
+
+
+#
+# get_bar_graph_code(base_dir, cover_found, cover_hit)
+#
+# Return a string containing HTML code which implements a bar graph display
+# for a coverage rate of cover_hit * 100 / cover_found.
+#
+
+sub get_bar_graph_code($$$)
+{
+	my ($base_dir, $found, $hit) = @_;
+	my $rate;
+	my $alt;
+	my $width;
+	my $remainder;
+	my $png_name;
+	my $graph_code;
+
+	# Check number of instrumented lines
+	if ($_[1] == 0) { return ""; }
+
+	$alt		= rate($hit, $found, "%");
+	$width		= rate($hit, $found, undef, 0);
+	$remainder	= 100 - $width;
+
+	# Decide which .png file to use
+	$png_name = $rate_png[classify_rate($found, $hit, $med_limit,
+					    $hi_limit)];
+
+	if ($width == 0)
+	{
+		# Zero coverage
+		$graph_code = (<<END_OF_HTML)
+	        <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]snow.png" width=100 height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+		;
+	}
+	elsif ($width == 100)
+	{
+		# Full coverage
+		$graph_code = (<<END_OF_HTML)
+		<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=100 height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+		;
+	}
+	else
+	{
+		# Positive coverage
+		$graph_code = (<<END_OF_HTML)
+		<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=$width height=10 alt="$alt"><img src="$_[0]snow.png" width=$remainder height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+		;
+	}
+
+	# Remove leading tabs from all lines
+	$graph_code =~ s/^\t+//gm;
+	chomp($graph_code);
+
+	return($graph_code);
+}
+
+#
+# sub classify_rate(found, hit, med_limit, high_limit)
+#
+# Return 0 for low rate, 1 for medium rate and 2 for hi rate.
+#
+
+sub classify_rate($$$$)
+{
+	my ($found, $hit, $med, $hi) = @_;
+	my $rate;
+
+	if ($found == 0) {
+		return 2;
+	}
+	$rate = rate($hit, $found);
+	if ($rate < $med) {
+		return 0;
+	} elsif ($rate < $hi) {
+		return 1;
+	}
+	return 2;
+}
+
+
+#
+# write_html(filehandle, html_code)
+#
+# Write out HTML_CODE to FILEHANDLE while removing a leading tabulator mark
+# in each line of HTML_CODE.
+#
+
+sub write_html(*$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $html_code = $_[1];
+
+	# Remove leading tab from all lines
+	$html_code =~ s/^\t//gm;
+
+	print(HTML_HANDLE $html_code)
+		or die("ERROR: cannot write HTML data ($!)\n");
+}
+
+
+#
+# write_html_prolog(filehandle, base_dir, pagetitle)
+#
+# Write an HTML prolog common to all HTML files to FILEHANDLE. PAGETITLE will
+# be used as HTML page title. BASE_DIR contains a relative path which points
+# to the base directory.
+#
+
+sub write_html_prolog(*$$)
+{
+	my $basedir = $_[1];
+	my $pagetitle = $_[2];
+	my $prolog;
+
+	$prolog = $html_prolog;
+	$prolog =~ s/\@pagetitle\@/$pagetitle/g;
+	$prolog =~ s/\@basedir\@/$basedir/g;
+
+	write_html($_[0], $prolog);
+}
+
+
+#
+# write_header_prolog(filehandle, base_dir)
+#
+# Write beginning of page header HTML code.
+#
+
+sub write_header_prolog(*$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <table width="100%" border=0 cellspacing=0 cellpadding=0>
+	    <tr><td class="title">$title</td></tr>
+	    <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+
+	    <tr>
+	      <td width="100%">
+	        <table cellpadding=1 border=0 width="100%">
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_header_line(handle, content)
+#
+# Write a header line with the specified table contents.
+#
+
+sub write_header_line(*@)
+{
+	my ($handle, @content) = @_;
+	my $entry;
+
+	write_html($handle, "          <tr>\n");
+	foreach $entry (@content) {
+		my ($width, $class, $text, $colspan) = @{$entry};
+
+		if (defined($width)) {
+			$width = " width=\"$width\"";
+		} else {
+			$width = "";
+		}
+		if (defined($class)) {
+			$class = " class=\"$class\"";
+		} else {
+			$class = "";
+		}
+		if (defined($colspan)) {
+			$colspan = " colspan=\"$colspan\"";
+		} else {
+			$colspan = "";
+		}
+		$text = "" if (!defined($text));
+		write_html($handle,
+			   "            <td$width$class$colspan>$text</td>\n");
+	}
+	write_html($handle, "          </tr>\n");
+}
+
+
+#
+# write_header_epilog(filehandle, base_dir)
+#
+# Write end of page header HTML code.
+#
+
+sub write_header_epilog(*$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	          <tr><td><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+	        </table>
+	      </td>
+	    </tr>
+
+	    <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+	  </table>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_prolog(handle, file_heading, ([heading, num_cols], ...))
+#
+# Write heading for file table.
+#
+
+sub write_file_table_prolog(*$@)
+{
+	my ($handle, $file_heading, @columns) = @_;
+	my $num_columns = 0;
+	my $file_width;
+	my $col;
+	my $width;
+
+	$width = 20 if (scalar(@columns) == 1);
+	$width = 10 if (scalar(@columns) == 2);
+	$width = 8 if (scalar(@columns) > 2);
+
+	foreach $col (@columns) {
+		my ($heading, $cols) = @{$col};
+
+		$num_columns += $cols;
+	}
+	$file_width = 100 - $num_columns * $width;
+
+	# Table definition
+	write_html($handle, <<END_OF_HTML);
+	  <center>
+	  <table width="80%" cellpadding=1 cellspacing=1 border=0>
+
+	    <tr>
+	      <td width="$file_width%"><br></td>
+END_OF_HTML
+	# Empty first row
+	foreach $col (@columns) {
+		my ($heading, $cols) = @{$col};
+
+		while ($cols-- > 0) {
+			write_html($handle, <<END_OF_HTML);
+	      <td width="$width%"></td>
+END_OF_HTML
+		}
+	}
+	# Next row
+	write_html($handle, <<END_OF_HTML);
+	    </tr>
+
+	    <tr>
+	      <td class="tableHead">$file_heading</td>
+END_OF_HTML
+	# Heading row
+	foreach $col (@columns) {
+		my ($heading, $cols) = @{$col};
+		my $colspan = "";
+
+		$colspan = " colspan=$cols" if ($cols > 1);
+		write_html($handle, <<END_OF_HTML);
+	      <td class="tableHead"$colspan>$heading</td>
+END_OF_HTML
+	}
+	write_html($handle, <<END_OF_HTML);
+	    </tr>
+END_OF_HTML
+}
+
+
+# write_file_table_entry(handle, base_dir, filename, page_link,
+#			 ([ found, hit, med_limit, hi_limit, graph ], ..)
+#
+# Write an entry of the file table.
+#
+
+sub write_file_table_entry(*$$$@)
+{
+	my ($handle, $base_dir, $filename, $page_link, @entries) = @_;
+	my $file_code;
+	my $entry;
+	my $esc_filename = escape_html($filename);
+
+	# Add link to source if provided
+	if (defined($page_link) && $page_link ne "") {
+		$file_code = "<a href=\"$page_link\">$esc_filename</a>";
+	} else {
+		$file_code = $esc_filename;
+	}
+
+	# First column: filename
+	write_html($handle, <<END_OF_HTML);
+	    <tr>
+	      <td class="coverFile">$file_code</td>
+END_OF_HTML
+	# Columns as defined
+	foreach $entry (@entries) {
+		my ($found, $hit, $med, $hi, $graph) = @{$entry};
+		my $bar_graph;
+		my $class;
+		my $rate;
+
+		# Generate bar graph if requested
+		if ($graph) {
+			$bar_graph = get_bar_graph_code($base_dir, $found,
+							$hit);
+			write_html($handle, <<END_OF_HTML);
+	      <td class="coverBar" align="center">
+	        $bar_graph
+	      </td>
+END_OF_HTML
+		}
+		# Get rate color and text
+		if ($found == 0) {
+			$rate = "-";
+			$class = "Hi";
+		} else {
+			$rate = rate($hit, $found, "&nbsp;%");
+			$class = $rate_name[classify_rate($found, $hit,
+					    $med, $hi)];
+		}
+		if ($opt_missed) {
+			# Show negative number of items without coverage
+			$hit = -($found - $hit);
+		}
+		write_html($handle, <<END_OF_HTML);
+	      <td class="coverPer$class">$rate</td>
+	      <td class="coverNum$class">$hit / $found</td>
+END_OF_HTML
+	}
+	# End of row
+        write_html($handle, <<END_OF_HTML);
+	    </tr>
+END_OF_HTML
+}
+
+
+#
+# write_file_table_detail_entry(filehandle, test_name, ([found, hit], ...))
+#
+# Write entry for detail section in file table.
+#
+
+sub write_file_table_detail_entry(*$@)
+{
+	my ($handle, $test, @entries) = @_;
+	my $entry;
+
+	if ($test eq "") {
+		$test = "<span style=\"font-style:italic\">&lt;unnamed&gt;</span>";
+	} elsif ($test =~ /^(.*),diff$/) {
+		$test = $1." (converted)";
+	}
+	# Testname
+	write_html($handle, <<END_OF_HTML);
+	    <tr>
+	      <td class="testName" colspan=2>$test</td>
+END_OF_HTML
+	# Test data
+	foreach $entry (@entries) {
+		my ($found, $hit) = @{$entry};
+		my $rate = rate($hit, $found, "&nbsp;%");
+
+		write_html($handle, <<END_OF_HTML);
+	      <td class="testPer">$rate</td>
+	      <td class="testNum">$hit&nbsp;/&nbsp;$found</td>
+END_OF_HTML
+	}
+
+        write_html($handle, <<END_OF_HTML);
+	    </tr>
+
+END_OF_HTML
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_epilog(filehandle)
+#
+# Write end of file table HTML code.
+#
+
+sub write_file_table_epilog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  </table>
+	  </center>
+	  <br>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_test_table_prolog(filehandle, table_heading)
+#
+# Write heading for test case description table.
+#
+
+sub write_test_table_prolog(*$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <center>
+	  <table width="80%" cellpadding=2 cellspacing=1 border=0>
+
+	    <tr>
+	      <td><br></td>
+	    </tr>
+
+	    <tr>
+	      <td class="tableHead">$_[1]</td>
+	    </tr>
+
+	    <tr>
+	      <td class="testDescription">
+	        <dl>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_test_table_entry(filehandle, test_name, test_description)
+#
+# Write entry for the test table.
+#
+
+sub write_test_table_entry(*$$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+          <dt>$_[1]<a name="$_[1]">&nbsp;</a></dt>
+          <dd>$_[2]<br><br></dd>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_test_table_epilog(filehandle)
+#
+# Write end of test description table HTML code.
+#
+
+sub write_test_table_epilog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	        </dl>
+	      </td>
+	    </tr>
+	  </table>
+	  </center>
+	  <br>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+sub fmt_centered($$)
+{
+	my ($width, $text) = @_;
+	my $w0 = length($text);
+	my $w1 = $width > $w0 ? int(($width - $w0) / 2) : 0;
+	my $w2 = $width > $w0 ? $width - $w0 - $w1 : 0;
+
+	return (" "x$w1).$text.(" "x$w2);
+}
+
+
+#
+# write_source_prolog(filehandle)
+#
+# Write start of source code table.
+#
+
+sub write_source_prolog(*)
+{
+	my $lineno_heading = "         ";
+	my $branch_heading = "";
+	my $line_heading = fmt_centered($line_field_width, "Line data");
+	my $source_heading = " Source code";
+
+	if ($br_coverage) {
+		$branch_heading = fmt_centered($br_field_width, "Branch data").
+				  " ";
+	}
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <table cellpadding=0 cellspacing=0 border=0>
+	    <tr>
+	      <td><br></td>
+	    </tr>
+	    <tr>
+	      <td>
+<pre class="sourceHeading">${lineno_heading}${branch_heading}${line_heading} ${source_heading}</pre>
+<pre class="source">
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+sub cmp_blocks($$)
+{
+	my ($a, $b) = @_;
+	my ($fa, $fb) = ($a->[0], $b->[0]);
+
+	return $fa->[0] <=> $fb->[0] if ($fa->[0] != $fb->[0]);
+	return $fa->[1] <=> $fb->[1];
+}
+
+#
+# get_branch_blocks(brdata)
+#
+# Group branches that belong to the same basic block.
+#
+# Returns: [block1, block2, ...]
+# block:   [branch1, branch2, ...]
+# branch:  [block_num, branch_num, taken_count, text_length, open, close]
+#
+
+sub get_branch_blocks($)
+{
+	my ($brdata) = @_;
+	my $last_block_num;
+	my $block = [];
+	my @blocks;
+
+	return () if (!defined($brdata));
+
+	# Group branches
+	foreach my $entry (split(/:/, $brdata)) {
+		my ($block_num, $branch, $taken) = split(/,/, $entry);
+		my $br;
+
+		if (defined($last_block_num) && $block_num != $last_block_num) {
+			push(@blocks, $block);
+			$block = [];
+		}
+		$br = [$block_num, $branch, $taken, 3, 0, 0];
+		push(@{$block}, $br);
+		$last_block_num = $block_num;
+	}
+	push(@blocks, $block) if (scalar(@{$block}) > 0);
+
+	# Add braces to first and last branch in group
+	foreach $block (@blocks) {
+		$block->[0]->[$BR_OPEN] = 1;
+		$block->[0]->[$BR_LEN]++;
+		$block->[scalar(@{$block}) - 1]->[$BR_CLOSE] = 1;
+		$block->[scalar(@{$block}) - 1]->[$BR_LEN]++;
+	}
+
+	return sort(cmp_blocks @blocks);
+}
+
+#
+# get_block_len(block)
+#
+# Calculate total text length of all branches in a block of branches.
+#
+
+sub get_block_len($)
+{
+	my ($block) = @_;
+	my $len = 0;
+	my $branch;
+
+	foreach $branch (@{$block}) {
+		$len += $branch->[$BR_LEN];
+	}
+
+	return $len;
+}
+
+
+#
+# get_branch_html(brdata)
+#
+# Return a list of HTML lines which represent the specified branch coverage
+# data in source code view.
+#
+
+sub get_branch_html($)
+{
+	my ($brdata) = @_;
+	my @blocks = get_branch_blocks($brdata);
+	my $block;
+	my $branch;
+	my $line_len = 0;
+	my $line = [];	# [branch2|" ", branch|" ", ...]
+	my @lines;	# [line1, line2, ...]
+	my @result;
+
+	# Distribute blocks to lines
+	foreach $block (@blocks) {
+		my $block_len = get_block_len($block);
+
+		# Does this block fit into the current line?
+		if ($line_len + $block_len <= $br_field_width) {
+			# Add it
+			$line_len += $block_len;
+			push(@{$line}, @{$block});
+			next;
+		} elsif ($block_len <= $br_field_width) {
+			# It would fit if the line was empty - add it to new
+			# line
+			push(@lines, $line);
+			$line_len = $block_len;
+			$line = [ @{$block} ];
+			next;
+		}
+		# Split the block into several lines
+		foreach $branch (@{$block}) {
+			if ($line_len + $branch->[$BR_LEN] >= $br_field_width) {
+				# Start a new line
+				if (($line_len + 1 <= $br_field_width) &&
+				    scalar(@{$line}) > 0 &&
+				    !$line->[scalar(@$line) - 1]->[$BR_CLOSE]) {
+					# Try to align branch symbols to be in
+					# one # row
+					push(@{$line}, " ");
+				}
+				push(@lines, $line);
+				$line_len = 0;
+				$line = [];
+			}
+			push(@{$line}, $branch);
+			$line_len += $branch->[$BR_LEN];
+		}
+	}
+	push(@lines, $line);
+
+	# Convert to HTML
+	foreach $line (@lines) {
+		my $current = "";
+		my $current_len = 0;
+
+		foreach $branch (@$line) {
+			# Skip alignment space
+			if ($branch eq " ") {
+				$current .= " ";
+				$current_len++;
+				next;
+			}
+
+			my ($block_num, $br_num, $taken, $len, $open, $close) =
+			   @{$branch};
+			my $class;
+			my $title;
+			my $text;
+
+			if ($taken eq '-') {
+				$class	= "branchNoExec";
+				$text	= " # ";
+				$title	= "Branch $br_num was not executed";
+			} elsif ($taken == 0) {
+				$class	= "branchNoCov";
+				$text	= " - ";
+				$title	= "Branch $br_num was not taken";
+			} else {
+				$class	= "branchCov";
+				$text	= " + ";
+				$title	= "Branch $br_num was taken $taken ".
+					  "time";
+				$title .= "s" if ($taken > 1);
+			}
+			$current .= "[" if ($open);
+			$current .= "<span class=\"$class\" title=\"$title\">";
+			$current .= $text."</span>";
+			$current .= "]" if ($close);
+			$current_len += $len;
+		}
+
+		# Right-align result text
+		if ($current_len < $br_field_width) {
+			$current = (" "x($br_field_width - $current_len)).
+				   $current;
+		}
+		push(@result, $current);
+	}
+
+	return @result;
+}
+
+
+#
+# format_count(count, width)
+#
+# Return a right-aligned representation of count that fits in width characters.
+#
+
+sub format_count($$)
+{
+	my ($count, $width) = @_;
+	my $result;
+	my $exp;
+
+	$result = sprintf("%*.0f", $width, $count);
+	while (length($result) > $width) {
+		last if ($count < 10);
+		$exp++;
+		$count = int($count/10);
+		$result = sprintf("%*s", $width, ">$count*10^$exp");
+	}
+	return $result;
+}
+
+#
+# write_source_line(filehandle, line_num, source, hit_count, converted,
+#                   brdata)
+#
+# Write formatted source code line. Return a line in a format as needed
+# by gen_png()
+#
+
+sub write_source_line(*$$$$$)
+{
+	my ($handle, $line, $source, $count, $converted, $brdata) = @_;
+	my $source_format;
+	my $count_format;
+	my $result;
+	my $anchor_start = "";
+	my $anchor_end = "";
+	my $count_field_width = $line_field_width - 1;
+	my @br_html;
+	my $html;
+
+	# Get branch HTML data for this line
+	@br_html = get_branch_html($brdata) if ($br_coverage);
+
+	if (!defined($count)) {
+		$result		= "";
+		$source_format	= "";
+		$count_format	= " "x$count_field_width;
+	}
+	elsif ($count == 0) {
+		$result		= $count;
+		$source_format	= '<span class="lineNoCov">';
+		$count_format	= format_count($count, $count_field_width);
+	}
+	elsif ($converted && defined($highlight)) {
+		$result		= "*".$count;
+		$source_format	= '<span class="lineDiffCov">';
+		$count_format	= format_count($count, $count_field_width);
+	}
+	else {
+		$result		= $count;
+		$source_format	= '<span class="lineCov">';
+		$count_format	= format_count($count, $count_field_width);
+	}
+	$result .= ":".$source;
+
+	# Write out a line number navigation anchor every $nav_resolution
+	# lines if necessary
+	$anchor_start	= "<a name=\"$_[1]\">";
+	$anchor_end	= "</a>";
+
+
+	# *************************************************************
+
+	$html = $anchor_start;
+	$html .= "<span class=\"lineNum\">".sprintf("%8d", $line)." </span>";
+	$html .= shift(@br_html).":" if ($br_coverage);
+	$html .= "$source_format$count_format : ";
+	$html .= escape_html($source);
+	$html .= "</span>" if ($source_format);
+	$html .= $anchor_end."\n";
+
+	write_html($handle, $html);
+
+	if ($br_coverage) {
+		# Add lines for overlong branch information
+		foreach (@br_html) {
+			write_html($handle, "<span class=\"lineNum\">".
+				   "         </span>$_\n");
+		}
+	}
+	# *************************************************************
+
+	return($result);
+}
+
+
+#
+# write_source_epilog(filehandle)
+#
+# Write end of source code table.
+#
+
+sub write_source_epilog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	</pre>
+	      </td>
+	    </tr>
+	  </table>
+	  <br>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_html_epilog(filehandle, base_dir[, break_frames])
+#
+# Write HTML page footer to FILEHANDLE. BREAK_FRAMES should be set when
+# this page is embedded in a frameset, clicking the URL link will then
+# break this frameset.
+#
+
+sub write_html_epilog(*$;$)
+{
+	my $basedir = $_[1];
+	my $break_code = "";
+	my $epilog;
+
+	if (defined($_[2]))
+	{
+		$break_code = " target=\"_parent\"";
+	}
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <table width="100%" border=0 cellspacing=0 cellpadding=0>
+	    <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+	    <tr><td class="versionInfo">Generated by: <a href="$lcov_url"$break_code>$lcov_version</a></td></tr>
+	  </table>
+	  <br>
+END_OF_HTML
+	;
+
+	$epilog = $html_epilog;
+	$epilog =~ s/\@basedir\@/$basedir/g;
+
+	write_html($_[0], $epilog);
+}
+
+
+#
+# write_frameset(filehandle, basedir, basename, pagetitle)
+#
+#
+
+sub write_frameset(*$$$)
+{
+	my $frame_width = $overview_width + 40;
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN">
+
+	<html lang="en">
+
+	<head>
+	  <meta http-equiv="Content-Type" content="text/html; charset=$charset">
+	  <title>$_[3]</title>
+	  <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+	</head>
+
+	<frameset cols="$frame_width,*">
+	  <frame src="$_[2].gcov.overview.$html_ext" name="overview">
+	  <frame src="$_[2].gcov.$html_ext" name="source">
+	  <noframes>
+	    <center>Frames not supported by your browser!<br></center>
+	  </noframes>
+	</frameset>
+
+	</html>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# sub write_overview_line(filehandle, basename, line, link)
+#
+#
+
+sub write_overview_line(*$$$)
+{
+	my $y1 = $_[2] - 1;
+	my $y2 = $y1 + $nav_resolution - 1;
+	my $x2 = $overview_width - 1;
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	    <area shape="rect" coords="0,$y1,$x2,$y2" href="$_[1].gcov.$html_ext#$_[3]" target="source" alt="overview">
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_overview(filehandle, basedir, basename, pagetitle, lines)
+#
+#
+
+sub write_overview(*$$$$)
+{
+	my $index;
+	my $max_line = $_[4] - 1;
+	my $offset;
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+
+	<html lang="en">
+
+	<head>
+	  <title>$_[3]</title>
+	  <meta http-equiv="Content-Type" content="text/html; charset=$charset">
+	  <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+	</head>
+
+	<body>
+	  <map name="overview">
+END_OF_HTML
+	;
+
+	# *************************************************************
+
+	# Make $offset the next higher multiple of $nav_resolution
+	$offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution;
+	$offset = sprintf("%d", $offset ) * $nav_resolution;
+
+	# Create image map for overview image
+	for ($index = 1; $index <= $_[4]; $index += $nav_resolution)
+	{
+		# Enforce nav_offset
+		if ($index < $offset + 1)
+		{
+			write_overview_line($_[0], $_[2], $index, 1);
+		}
+		else
+		{
+			write_overview_line($_[0], $_[2], $index, $index - $offset);
+		}
+	}
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  </map>
+
+	  <center>
+	  <a href="$_[2].gcov.$html_ext#top" target="source">Top</a><br><br>
+	  <img src="$_[2].gcov.png" width=$overview_width height=$max_line alt="Overview" border=0 usemap="#overview">
+	  </center>
+	</body>
+	</html>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+sub max($$)
+{
+	my ($a, $b) = @_;
+
+	return $a if ($a > $b);
+	return $b;
+}
+
+
+#
+# write_header(filehandle, type, trunc_file_name, rel_file_name, lines_found,
+# lines_hit, funcs_found, funcs_hit, sort_type)
+#
+# Write a complete standard page header. TYPE may be (0, 1, 2, 3, 4)
+# corresponding to (directory view header, file view header, source view
+# header, test case description header, function view header)
+#
+
+sub write_header(*$$$$$$$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $type = $_[1];
+	my $trunc_name = $_[2];
+	my $rel_filename = $_[3];
+	my $lines_found = $_[4];
+	my $lines_hit = $_[5];
+	my $fn_found = $_[6];
+	my $fn_hit = $_[7];
+	my $br_found = $_[8];
+	my $br_hit = $_[9];
+	my $sort_type = $_[10];
+	my $base_dir;
+	my $view;
+	my $test;
+	my $base_name;
+	my $style;
+	my $rate;
+	my @row_left;
+	my @row_right;
+	my $num_rows;
+	my $i;
+	my $esc_trunc_name = escape_html($trunc_name);
+
+	$base_name = basename($rel_filename);
+
+	# Prepare text for "current view" field
+	if ($type == $HDR_DIR)
+	{
+		# Main overview
+		$base_dir = "";
+		$view = $overview_title;
+	}
+	elsif ($type == $HDR_FILE)
+	{
+		# Directory overview
+		$base_dir = get_relative_base_path($rel_filename);
+		$view = "<a href=\"$base_dir"."index.$html_ext\">".
+			"$overview_title</a> - $esc_trunc_name";
+	}
+	elsif ($type == $HDR_SOURCE || $type == $HDR_FUNC)
+	{
+		# File view
+		my $dir_name = dirname($rel_filename);
+		my $esc_base_name = escape_html($base_name);
+		my $esc_dir_name = escape_html($dir_name);
+
+		$base_dir = get_relative_base_path($dir_name);
+		if ($frames)
+		{
+			# Need to break frameset when clicking any of these
+			# links
+			$view = "<a href=\"$base_dir"."index.$html_ext\" ".
+				"target=\"_parent\">$overview_title</a> - ".
+				"<a href=\"index.$html_ext\" target=\"_parent\">".
+				"$esc_dir_name</a> - $esc_base_name";
+		}
+		else
+		{
+			$view = "<a href=\"$base_dir"."index.$html_ext\">".
+				"$overview_title</a> - ".
+				"<a href=\"index.$html_ext\">".
+				"$esc_dir_name</a> - $esc_base_name";
+		}
+
+		# Add function suffix
+		if ($func_coverage) {
+			$view .= "<span style=\"font-size: 80%;\">";
+			if ($type == $HDR_SOURCE) {
+				if ($sort) {
+					$view .= " (source / <a href=\"$base_name.func-sort-c.$html_ext\">functions</a>)";
+				} else {
+					$view .= " (source / <a href=\"$base_name.func.$html_ext\">functions</a>)";
+				}
+			} elsif ($type == $HDR_FUNC) {
+				$view .= " (<a href=\"$base_name.gcov.$html_ext\">source</a> / functions)";
+			}
+			$view .= "</span>";
+		}
+	}
+	elsif ($type == $HDR_TESTDESC)
+	{
+		# Test description header
+		$base_dir = "";
+		$view = "<a href=\"$base_dir"."index.$html_ext\">".
+			"$overview_title</a> - test case descriptions";
+	}
+
+	# Prepare text for "test" field
+	$test = escape_html($test_title);
+
+	# Append link to test description page if available
+	if (%test_description && ($type != $HDR_TESTDESC))
+	{
+		if ($frames && ($type == $HDR_SOURCE || $type == $HDR_FUNC))
+		{
+			# Need to break frameset when clicking this link
+			$test .= " ( <span style=\"font-size:80%;\">".
+				 "<a href=\"$base_dir".
+				 "descriptions.$html_ext\" target=\"_parent\">".
+				 "view descriptions</a></span> )";
+		}
+		else
+		{
+			$test .= " ( <span style=\"font-size:80%;\">".
+				 "<a href=\"$base_dir".
+				 "descriptions.$html_ext\">".
+				 "view descriptions</a></span> )";
+		}
+	}
+
+	# Write header
+	write_header_prolog(*HTML_HANDLE, $base_dir);
+
+	# Left row
+	push(@row_left, [[ "10%", "headerItem", "Current view:" ],
+			 [ "35%", "headerValue", $view ]]);
+	push(@row_left, [[undef, "headerItem", "Test:"],
+			 [undef, "headerValue", $test]]);
+	push(@row_left, [[undef, "headerItem", "Date:"],
+			 [undef, "headerValue", $date]]);
+
+	# Right row
+	if ($legend && ($type == $HDR_SOURCE || $type == $HDR_FUNC)) {
+		my $text = <<END_OF_HTML;
+            Lines:
+            <span class="coverLegendCov">hit</span>
+            <span class="coverLegendNoCov">not hit</span>
+END_OF_HTML
+		if ($br_coverage) {
+			$text .= <<END_OF_HTML;
+            | Branches:
+            <span class="coverLegendCov">+</span> taken
+            <span class="coverLegendNoCov">-</span> not taken
+            <span class="coverLegendNoCov">#</span> not executed
+END_OF_HTML
+		}
+		push(@row_left, [[undef, "headerItem", "Legend:"],
+				 [undef, "headerValueLeg", $text]]);
+	} elsif ($legend && ($type != $HDR_TESTDESC)) {
+		my $text = <<END_OF_HTML;
+	    Rating:
+            <span class="coverLegendCovLo" title="Coverage rates below $med_limit % are classified as low">low: &lt; $med_limit %</span>
+            <span class="coverLegendCovMed" title="Coverage rates between $med_limit % and $hi_limit % are classified as medium">medium: &gt;= $med_limit %</span>
+            <span class="coverLegendCovHi" title="Coverage rates of $hi_limit % and more are classified as high">high: &gt;= $hi_limit %</span>
+END_OF_HTML
+		push(@row_left, [[undef, "headerItem", "Legend:"],
+				 [undef, "headerValueLeg", $text]]);
+	}
+	if ($type == $HDR_TESTDESC) {
+		push(@row_right, [[ "55%" ]]);
+	} else {
+		push(@row_right, [["15%", undef, undef ],
+				  ["10%", "headerCovTableHead", "Hit" ],
+				  ["10%", "headerCovTableHead", "Total" ],
+				  ["15%", "headerCovTableHead", "Coverage"]]);
+	}
+	# Line coverage
+	$style = $rate_name[classify_rate($lines_found, $lines_hit,
+					  $med_limit, $hi_limit)];
+	$rate = rate($lines_hit, $lines_found, " %");
+	push(@row_right, [[undef, "headerItem", "Lines:"],
+			  [undef, "headerCovTableEntry", $lines_hit],
+			  [undef, "headerCovTableEntry", $lines_found],
+			  [undef, "headerCovTableEntry$style", $rate]])
+			if ($type != $HDR_TESTDESC);
+	# Function coverage
+	if ($func_coverage) {
+		$style = $rate_name[classify_rate($fn_found, $fn_hit,
+						  $fn_med_limit, $fn_hi_limit)];
+		$rate = rate($fn_hit, $fn_found, " %");
+		push(@row_right, [[undef, "headerItem", "Functions:"],
+				  [undef, "headerCovTableEntry", $fn_hit],
+				  [undef, "headerCovTableEntry", $fn_found],
+				  [undef, "headerCovTableEntry$style", $rate]])
+			if ($type != $HDR_TESTDESC);
+	}
+	# Branch coverage
+	if ($br_coverage) {
+		$style = $rate_name[classify_rate($br_found, $br_hit,
+						  $br_med_limit, $br_hi_limit)];
+		$rate = rate($br_hit, $br_found, " %");
+		push(@row_right, [[undef, "headerItem", "Branches:"],
+				  [undef, "headerCovTableEntry", $br_hit],
+				  [undef, "headerCovTableEntry", $br_found],
+				  [undef, "headerCovTableEntry$style", $rate]])
+			if ($type != $HDR_TESTDESC);
+	}
+
+	# Print rows
+	$num_rows = max(scalar(@row_left), scalar(@row_right));
+	for ($i = 0; $i < $num_rows; $i++) {
+		my $left = $row_left[$i];
+		my $right = $row_right[$i];
+
+		if (!defined($left)) {
+			$left = [[undef, undef, undef], [undef, undef, undef]];
+		}
+		if (!defined($right)) {
+			$right = [];
+		}
+		write_header_line(*HTML_HANDLE, @{$left},
+				  [ $i == 0 ? "5%" : undef, undef, undef],
+				  @{$right});
+	}
+
+	# Fourth line
+	write_header_epilog(*HTML_HANDLE, $base_dir);
+}
+
+sub get_sorted_by_rate($$)
+{
+	my ($hash, $type) = @_;
+
+	if ($type == $SORT_LINE) {
+		# Sort by line coverage
+		return sort({$hash->{$a}[7] <=> $hash->{$b}[7]} keys(%{$hash}));
+	} elsif ($type == $SORT_FUNC) {
+		# Sort by function coverage;
+		return sort({$hash->{$a}[8] <=> $hash->{$b}[8]}	keys(%{$hash}));
+	} elsif ($type == $SORT_BRANCH) {
+		# Sort by br coverage;
+		return sort({$hash->{$a}[9] <=> $hash->{$b}[9]}	keys(%{$hash}));
+	}
+}
+
+sub get_sorted_by_missed($$)
+{
+	my ($hash, $type) = @_;
+
+	if ($type == $SORT_LINE) {
+		# Sort by number of instrumented lines without coverage
+		return sort(
+			{
+				($hash->{$b}[0] - $hash->{$b}[1]) <=>
+				($hash->{$a}[0] - $hash->{$a}[1])
+			} keys(%{$hash}));
+	} elsif ($type == $SORT_FUNC) {
+		# Sort by number of instrumented functions without coverage
+		return sort(
+			{
+				($hash->{$b}[2] - $hash->{$b}[3]) <=>
+				($hash->{$a}[2] - $hash->{$a}[3])
+			} keys(%{$hash}));
+	} elsif ($type == $SORT_BRANCH) {
+		# Sort by number of instrumented branches without coverage
+		return sort(
+			{
+				($hash->{$b}[4] - $hash->{$b}[5]) <=>
+				($hash->{$a}[4] - $hash->{$a}[5])
+			} keys(%{$hash}));
+	}
+}
+
+#
+# get_sorted_keys(hash_ref, sort_type)
+#
+# hash_ref: filename -> stats
+# stats: [ lines_found, lines_hit, fn_found, fn_hit, br_found, br_hit,
+#          link_name, line_rate, fn_rate, br_rate ]
+#
+
+sub get_sorted_keys($$)
+{
+	my ($hash, $type) = @_;
+
+	if ($type == $SORT_FILE) {
+		# Sort by name
+		return sort(keys(%{$hash}));
+	} elsif ($opt_missed) {
+		return get_sorted_by_missed($hash, $type);
+	} else {
+		return get_sorted_by_rate($hash, $type);
+	}
+}
+
+sub get_sort_code($$$)
+{
+	my ($link, $alt, $base) = @_;
+	my $png;
+	my $link_start;
+	my $link_end;
+
+	if (!defined($link)) {
+		$png = "glass.png";
+		$link_start = "";
+		$link_end = "";
+	} else {
+		$png = "updown.png";
+		$link_start = '<a href="'.$link.'">';
+		$link_end = "</a>";
+	}
+
+	return ' <span class="tableHeadSort">'.$link_start.
+	       '<img src="'.$base.$png.'" width=10 height=14 '.
+	       'alt="'.$alt.'" title="'.$alt.'" border=0>'.$link_end.'</span>';
+}
+
+sub get_file_code($$$$)
+{
+	my ($type, $text, $sort_button, $base) = @_;
+	my $result = $text;
+	my $link;
+
+	if ($sort_button) {
+		if ($type == $HEAD_NO_DETAIL) {
+			$link = "index.$html_ext";
+		} else {
+			$link = "index-detail.$html_ext";
+		}
+	}
+	$result .= get_sort_code($link, "Sort by name", $base);
+
+	return $result;
+}
+
+sub get_line_code($$$$$)
+{
+	my ($type, $sort_type, $text, $sort_button, $base) = @_;
+	my $result = $text;
+	my $sort_link;
+
+	if ($type == $HEAD_NO_DETAIL) {
+		# Just text
+		if ($sort_button) {
+			$sort_link = "index-sort-l.$html_ext";
+		}
+	} elsif ($type == $HEAD_DETAIL_HIDDEN) {
+		# Text + link to detail view
+		$result .= ' ( <a class="detail" href="index-detail'.
+			   $fileview_sortname[$sort_type].'.'.$html_ext.
+			   '">show details</a> )';
+		if ($sort_button) {
+			$sort_link = "index-sort-l.$html_ext";
+		}
+	} else {
+		# Text + link to standard view
+		$result .= ' ( <a class="detail" href="index'.
+			   $fileview_sortname[$sort_type].'.'.$html_ext.
+			   '">hide details</a> )';
+		if ($sort_button) {
+			$sort_link = "index-detail-sort-l.$html_ext";
+		}
+	}
+	# Add sort button
+	$result .= get_sort_code($sort_link, "Sort by line coverage", $base);
+
+	return $result;
+}
+
+sub get_func_code($$$$)
+{
+	my ($type, $text, $sort_button, $base) = @_;
+	my $result = $text;
+	my $link;
+
+	if ($sort_button) {
+		if ($type == $HEAD_NO_DETAIL) {
+			$link = "index-sort-f.$html_ext";
+		} else {
+			$link = "index-detail-sort-f.$html_ext";
+		}
+	}
+	$result .= get_sort_code($link, "Sort by function coverage", $base);
+	return $result;
+}
+
+sub get_br_code($$$$)
+{
+	my ($type, $text, $sort_button, $base) = @_;
+	my $result = $text;
+	my $link;
+
+	if ($sort_button) {
+		if ($type == $HEAD_NO_DETAIL) {
+			$link = "index-sort-b.$html_ext";
+		} else {
+			$link = "index-detail-sort-b.$html_ext";
+		}
+	}
+	$result .= get_sort_code($link, "Sort by branch coverage", $base);
+	return $result;
+}
+
+#
+# write_file_table(filehandle, base_dir, overview, testhash, testfnchash,
+#                  testbrhash, fileview, sort_type)
+#
+# Write a complete file table. OVERVIEW is a reference to a hash containing
+# the following mapping:
+#
+#   filename -> "lines_found,lines_hit,funcs_found,funcs_hit,page_link,
+#		 func_link"
+#
+# TESTHASH is a reference to the following hash:
+#
+#   filename -> \%testdata
+#   %testdata: name of test affecting this file -> \%testcount
+#   %testcount: line number -> execution count for a single test
+#
+# Heading of first column is "Filename" if FILEVIEW is true, "Directory name"
+# otherwise.
+#
+
+sub write_file_table(*$$$$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $base_dir = $_[1];
+	my $overview = $_[2];
+	my $testhash = $_[3];
+	my $testfnchash = $_[4];
+	my $testbrhash = $_[5];
+	my $fileview = $_[6];
+	my $sort_type = $_[7];
+	my $filename;
+	my $bar_graph;
+	my $hit;
+	my $found;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+	my $page_link;
+	my $testname;
+	my $testdata;
+	my $testfncdata;
+	my $testbrdata;
+	my %affecting_tests;
+	my $line_code = "";
+	my $func_code;
+	my $br_code;
+	my $file_code;
+	my @head_columns;
+
+	# Determine HTML code for column headings
+	if (($base_dir ne "") && $show_details)
+	{
+		my $detailed = keys(%{$testhash});
+
+		$file_code = get_file_code($detailed ? $HEAD_DETAIL_HIDDEN :
+					$HEAD_NO_DETAIL,
+					$fileview ? "Filename" : "Directory",
+					$sort && $sort_type != $SORT_FILE,
+					$base_dir);
+		$line_code = get_line_code($detailed ? $HEAD_DETAIL_SHOWN :
+					$HEAD_DETAIL_HIDDEN,
+					$sort_type,
+					"Line Coverage",
+					$sort && $sort_type != $SORT_LINE,
+					$base_dir);
+		$func_code = get_func_code($detailed ? $HEAD_DETAIL_HIDDEN :
+					$HEAD_NO_DETAIL,
+					"Functions",
+					$sort && $sort_type != $SORT_FUNC,
+					$base_dir);
+		$br_code = get_br_code($detailed ? $HEAD_DETAIL_HIDDEN :
+					$HEAD_NO_DETAIL,
+					"Branches",
+					$sort && $sort_type != $SORT_BRANCH,
+					$base_dir);
+	} else {
+		$file_code = get_file_code($HEAD_NO_DETAIL,
+					$fileview ? "Filename" : "Directory",
+					$sort && $sort_type != $SORT_FILE,
+					$base_dir);
+		$line_code = get_line_code($HEAD_NO_DETAIL, $sort_type, "Line Coverage",
+					$sort && $sort_type != $SORT_LINE,
+					$base_dir);
+		$func_code = get_func_code($HEAD_NO_DETAIL, "Functions",
+					$sort && $sort_type != $SORT_FUNC,
+					$base_dir);
+		$br_code = get_br_code($HEAD_NO_DETAIL, "Branches",
+					$sort && $sort_type != $SORT_BRANCH,
+					$base_dir);
+	}
+	push(@head_columns, [ $line_code, 3 ]);
+	push(@head_columns, [ $func_code, 2]) if ($func_coverage);
+	push(@head_columns, [ $br_code, 2]) if ($br_coverage);
+
+	write_file_table_prolog(*HTML_HANDLE, $file_code, @head_columns);
+
+	foreach $filename (get_sorted_keys($overview, $sort_type))
+	{
+		my @columns;
+		($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit,
+		 $page_link) = @{$overview->{$filename}};
+
+		# Line coverage
+		push(@columns, [$found, $hit, $med_limit, $hi_limit, 1]);
+		# Function coverage
+		if ($func_coverage) {
+			push(@columns, [$fn_found, $fn_hit, $fn_med_limit,
+					$fn_hi_limit, 0]);
+		}
+		# Branch coverage
+		if ($br_coverage) {
+			push(@columns, [$br_found, $br_hit, $br_med_limit,
+					$br_hi_limit, 0]);
+		}
+		write_file_table_entry(*HTML_HANDLE, $base_dir, $filename,
+				       $page_link, @columns);
+
+		$testdata = $testhash->{$filename};
+		$testfncdata = $testfnchash->{$filename};
+		$testbrdata = $testbrhash->{$filename};
+
+		# Check whether we should write test specific coverage
+		# as well
+		if (!($show_details && $testdata)) { next; }
+
+		# Filter out those tests that actually affect this file
+		%affecting_tests = %{ get_affecting_tests($testdata,
+					$testfncdata, $testbrdata) };
+
+		# Does any of the tests affect this file at all?
+		if (!%affecting_tests) { next; }
+
+		foreach $testname (keys(%affecting_tests))
+		{
+			my @results;
+			($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) =
+				split(",", $affecting_tests{$testname});
+
+			# Insert link to description of available
+			if ($test_description{$testname})
+			{
+				$testname = "<a href=\"$base_dir".
+					    "descriptions.$html_ext#$testname\">".
+					    "$testname</a>";
+			}
+
+			push(@results, [$found, $hit]);
+			push(@results, [$fn_found, $fn_hit]) if ($func_coverage);
+			push(@results, [$br_found, $br_hit]) if ($br_coverage);
+			write_file_table_detail_entry(*HTML_HANDLE, $testname,
+				@results);
+		}
+	}
+
+	write_file_table_epilog(*HTML_HANDLE);
+}
+
+
+#
+# get_found_and_hit(hash)
+#
+# Return the count for entries (found) and entries with an execution count
+# greater than zero (hit) in a hash (linenumber -> execution count) as
+# a list (found, hit)
+#
+
+sub get_found_and_hit($)
+{
+	my %hash = %{$_[0]};
+	my $found = 0;
+	my $hit = 0;
+
+	# Calculate sum
+	$found = 0;
+	$hit = 0;
+			
+	foreach (keys(%hash))
+	{
+		$found++;
+		if ($hash{$_}>0) { $hit++; }
+	}
+
+	return ($found, $hit);
+}
+
+
+#
+# get_func_found_and_hit(sumfnccount)
+#
+# Return (f_found, f_hit) for sumfnccount
+#
+
+sub get_func_found_and_hit($)
+{
+	my ($sumfnccount) = @_;
+	my $function;
+	my $fn_found;
+	my $fn_hit;
+
+	$fn_found = scalar(keys(%{$sumfnccount}));
+	$fn_hit = 0;
+	foreach $function (keys(%{$sumfnccount})) {
+		if ($sumfnccount->{$function} > 0) {
+			$fn_hit++;
+		}
+	}
+	return ($fn_found, $fn_hit);
+}
+
+
+sub get_br_found_and_hit($)
+{
+	my ($brcount) = @_;
+	my $db;
+
+	$db = brcount_to_db($brcount);
+
+	return brcount_db_get_found_and_hit($db);
+}
+
+
+#
+# get_affecting_tests(testdata, testfncdata, testbrdata)
+#
+# HASHREF contains a mapping filename -> (linenumber -> exec count). Return
+# a hash containing mapping filename -> "lines found, lines hit" for each
+# filename which has a nonzero hit count.
+#
+
+sub get_affecting_tests($$$)
+{
+	my ($testdata, $testfncdata, $testbrdata) = @_;
+	my $testname;
+	my $testcount;
+	my $testfnccount;
+	my $testbrcount;
+	my %result;
+	my $found;
+	my $hit;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+
+	foreach $testname (keys(%{$testdata}))
+	{
+		# Get (line number -> count) hash for this test case
+		$testcount = $testdata->{$testname};
+		$testfnccount = $testfncdata->{$testname};
+		$testbrcount = $testbrdata->{$testname};
+
+		# Calculate sum
+		($found, $hit) = get_found_and_hit($testcount);
+		($fn_found, $fn_hit) = get_func_found_and_hit($testfnccount);
+		($br_found, $br_hit) = get_br_found_and_hit($testbrcount);
+
+		if ($hit>0)
+		{
+			$result{$testname} = "$found,$hit,$fn_found,$fn_hit,".
+					     "$br_found,$br_hit";
+		}
+	}
+
+	return(\%result);
+}
+
+
+sub get_hash_reverse($)
+{
+	my ($hash) = @_;
+	my %result;
+
+	foreach (keys(%{$hash})) {
+		$result{$hash->{$_}} = $_;
+	}
+
+	return \%result;
+}
+
+#
+# write_source(filehandle, source_filename, count_data, checksum_data,
+#              converted_data, func_data, sumbrcount)
+#
+# Write an HTML view of a source code file. Returns a list containing
+# data as needed by gen_png().
+#
+# Die on error.
+#
+
+sub write_source($$$$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	local *SOURCE_HANDLE;
+	my $source_filename = $_[1];
+	my %count_data;
+	my $line_number;
+	my @result;
+	my $checkdata = $_[3];
+	my $converted = $_[4];
+	my $funcdata  = $_[5];
+	my $sumbrcount = $_[6];
+	my $datafunc = get_hash_reverse($funcdata);
+	my @file;
+
+	if ($_[2])
+	{
+		%count_data = %{$_[2]};
+	}
+
+	if (!open(SOURCE_HANDLE, "<", $source_filename)) {
+		my @lines;
+		my $last_line = 0;
+
+		if (!$ignore[$ERROR_SOURCE]) {
+			die("ERROR: cannot read $source_filename\n");
+		}
+
+		# Continue without source file
+		warn("WARNING: cannot read $source_filename!\n");
+
+		@lines = sort( { $a <=> $b }  keys(%count_data));
+		if (@lines) {
+			$last_line = $lines[scalar(@lines) - 1];
+		}
+		return ( ":" ) if ($last_line < 1);
+
+		# Simulate gcov behavior
+		for ($line_number = 1; $line_number <= $last_line;
+		     $line_number++) {
+			push(@file, "/* EOF */");
+		}
+	} else {
+		@file = <SOURCE_HANDLE>;
+	}
+	
+	write_source_prolog(*HTML_HANDLE);
+	$line_number = 0;
+	foreach (@file) {
+		$line_number++;
+		chomp($_);
+
+		# Also remove CR from line-end
+		s/\015$//;
+
+		# Source code matches coverage data?
+		if (defined($checkdata->{$line_number}) &&
+		    ($checkdata->{$line_number} ne md5_base64($_)))
+		{
+			die("ERROR: checksum mismatch  at $source_filename:".
+			    "$line_number\n");
+		}
+
+		push (@result,
+		      write_source_line(HTML_HANDLE, $line_number,
+					$_, $count_data{$line_number},
+					$converted->{$line_number},
+					$sumbrcount->{$line_number}));
+	}
+
+	close(SOURCE_HANDLE);
+	write_source_epilog(*HTML_HANDLE);
+	return(@result);
+}
+
+
+sub funcview_get_func_code($$$)
+{
+	my ($name, $base, $type) = @_;
+	my $result;
+	my $link;
+
+	if ($sort && $type == 1) {
+		$link = "$name.func.$html_ext";
+	}
+	$result = "Function Name";
+	$result .= get_sort_code($link, "Sort by function name", $base);
+
+	return $result;
+}
+
+sub funcview_get_count_code($$$)
+{
+	my ($name, $base, $type) = @_;
+	my $result;
+	my $link;
+
+	if ($sort && $type == 0) {
+		$link = "$name.func-sort-c.$html_ext";
+	}
+	$result = "Hit count";
+	$result .= get_sort_code($link, "Sort by hit count", $base);
+
+	return $result;
+}
+
+#
+# funcview_get_sorted(funcdata, sumfncdata, sort_type)
+#
+# Depending on the value of sort_type, return a list of functions sorted
+# by name (type 0) or by the associated call count (type 1).
+#
+
+sub funcview_get_sorted($$$)
+{
+	my ($funcdata, $sumfncdata, $type) = @_;
+
+	if ($type == 0) {
+		return sort(keys(%{$funcdata}));
+	}
+	return sort({
+		$sumfncdata->{$b} == $sumfncdata->{$a} ?
+			$a cmp $b : $sumfncdata->{$a} <=> $sumfncdata->{$b}
+		} keys(%{$sumfncdata}));
+}
+
+sub demangle_list($)
+{
+	my ($list) = @_;
+	my $tmpfile;
+	my $handle;
+	my %demangle;
+	my $demangle_arg = "";
+	my %versions;
+
+	# Write function names to file
+	($handle, $tmpfile) = tempfile();
+	die("ERROR: could not create temporary file") if (!defined($tmpfile));
+	print($handle join("\n", @$list));
+	close($handle);
+
+	# Extra flag necessary on OS X so that symbols listed by gcov get demangled
+	# properly.
+	if ($^O eq "darwin") {
+		$demangle_arg = "--no-strip-underscores";
+	}
+
+	# Build translation hash from c++filt output
+	open($handle, "-|", "c++filt $demangle_arg < $tmpfile") or
+		die("ERROR: could not run c++filt: $!\n");
+	foreach my $func (@$list) {
+		my $translated = <$handle>;
+		my $version;
+
+		last if (!defined($translated));
+		chomp($translated);
+
+		$version = ++$versions{$translated};
+		$translated .= ".$version" if ($version > 1);
+		$demangle{$func} = $translated;
+	}
+	close($handle);
+
+	if (scalar(keys(%demangle)) != scalar(@$list)) {
+		die("ERROR: c++filt output not as expected (".
+		    scalar(keys(%demangle))." vs ".scalar(@$list).") lines\n");
+	}
+
+	unlink($tmpfile) or
+		warn("WARNING: could not remove temporary file $tmpfile: $!\n");
+
+	return \%demangle;
+}
+
+#
+# write_function_table(filehandle, source_file, sumcount, funcdata,
+#		       sumfnccount, testfncdata, sumbrcount, testbrdata,
+#		       base_name, base_dir, sort_type)
+#
+# Write an HTML table listing all functions in a source file, including
+# also function call counts and line coverages inside of each function.
+#
+# Die on error.
+#
+
+sub write_function_table(*$$$$$$$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $source = $_[1];
+	my $sumcount = $_[2];
+	my $funcdata = $_[3];
+	my $sumfncdata = $_[4];
+	my $testfncdata = $_[5];
+	my $sumbrcount = $_[6];
+	my $testbrdata = $_[7];
+	my $name = $_[8];
+	my $base = $_[9];
+	my $type = $_[10];
+	my $func;
+	my $func_code;
+	my $count_code;
+	my $demangle;
+
+	# Get HTML code for headings
+	$func_code = funcview_get_func_code($name, $base, $type);
+	$count_code = funcview_get_count_code($name, $base, $type);
+	write_html(*HTML_HANDLE, <<END_OF_HTML)
+	  <center>
+	  <table width="60%" cellpadding=1 cellspacing=1 border=0>
+	    <tr><td><br></td></tr>
+	    <tr>
+	      <td width="80%" class="tableHead">$func_code</td>
+	      <td width="20%" class="tableHead">$count_code</td>
+	    </tr>
+END_OF_HTML
+	;
+
+	# Get demangle translation hash
+	if ($demangle_cpp) {
+		$demangle = demangle_list([ sort(keys(%{$funcdata})) ]);
+	}
+
+	# Get a sorted table
+	foreach $func (funcview_get_sorted($funcdata, $sumfncdata, $type)) {
+		if (!defined($funcdata->{$func}))
+		{
+			next;
+		}
+
+		my $startline = $funcdata->{$func} - $func_offset;
+		my $name = $func;
+		my $count = $sumfncdata->{$name};
+		my $countstyle;
+
+		# Replace function name with demangled version if available
+		$name = $demangle->{$name} if (exists($demangle->{$name}));
+
+		# Escape special characters
+		$name = escape_html($name);
+		if ($startline < 1) {
+			$startline = 1;
+		}
+		if ($count == 0) {
+			$countstyle = "coverFnLo";
+		} else {
+			$countstyle = "coverFnHi";
+		}
+
+		write_html(*HTML_HANDLE, <<END_OF_HTML)
+	    <tr>
+              <td class="coverFn"><a href="$source#$startline">$name</a></td>
+              <td class="$countstyle">$count</td>
+            </tr>
+END_OF_HTML
+                ;
+	}
+	write_html(*HTML_HANDLE, <<END_OF_HTML)
+	  </table>
+	  <br>
+	  </center>
+END_OF_HTML
+	;
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+	if (!$quiet)
+	{
+		# Print info string
+		printf(@_);
+	}
+}
+
+
+#
+# subtract_counts(data_ref, base_ref)
+#
+
+sub subtract_counts($$)
+{
+	my %data = %{$_[0]};
+	my %base = %{$_[1]};
+	my $line;
+	my $data_count;
+	my $base_count;
+	my $hit = 0;
+	my $found = 0;
+
+	foreach $line (keys(%data))
+	{
+		$found++;
+		$data_count = $data{$line};
+		$base_count = $base{$line};
+
+		if (defined($base_count))
+		{
+			$data_count -= $base_count;
+
+			# Make sure we don't get negative numbers
+			if ($data_count<0) { $data_count = 0; }
+		}
+
+		$data{$line} = $data_count;
+		if ($data_count > 0) { $hit++; }
+	}
+
+	return (\%data, $found, $hit);
+}
+
+
+#
+# subtract_fnccounts(data, base)
+#
+# Subtract function call counts found in base from those in data.
+# Return (data, f_found, f_hit).
+#
+
+sub subtract_fnccounts($$)
+{
+	my %data;
+	my %base;
+	my $func;
+	my $data_count;
+	my $base_count;
+	my $fn_hit = 0;
+	my $fn_found = 0;
+
+	%data = %{$_[0]} if (defined($_[0]));
+	%base = %{$_[1]} if (defined($_[1]));
+	foreach $func (keys(%data)) {
+		$fn_found++;
+		$data_count = $data{$func};
+		$base_count = $base{$func};
+
+		if (defined($base_count)) {
+			$data_count -= $base_count;
+
+			# Make sure we don't get negative numbers
+			if ($data_count < 0) {
+				$data_count = 0;
+			}
+		}
+
+		$data{$func} = $data_count;
+		if ($data_count > 0) {
+			$fn_hit++;
+		}
+	}
+
+	return (\%data, $fn_found, $fn_hit);
+}
+
+
+#
+# apply_baseline(data_ref, baseline_ref)
+#
+# Subtract the execution counts found in the baseline hash referenced by
+# BASELINE_REF from actual data in DATA_REF.
+#
+
+sub apply_baseline($$)
+{
+	my %data_hash = %{$_[0]};
+	my %base_hash = %{$_[1]};
+	my $filename;
+	my $testname;
+	my $data;
+	my $data_testdata;
+	my $data_funcdata;
+	my $data_checkdata;
+	my $data_testfncdata;
+	my $data_testbrdata;
+	my $data_count;
+	my $data_testfnccount;
+	my $data_testbrcount;
+	my $base;
+	my $base_checkdata;
+	my $base_sumfnccount;
+	my $base_sumbrcount;
+	my $base_count;
+	my $sumcount;
+	my $sumfnccount;
+	my $sumbrcount;
+	my $found;
+	my $hit;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+
+	foreach $filename (keys(%data_hash))
+	{
+		# Get data set for data and baseline
+		$data = $data_hash{$filename};
+		$base = $base_hash{$filename};
+
+		# Skip data entries for which no base entry exists
+		if (!defined($base))
+		{
+			next;
+		}
+
+		# Get set entries for data and baseline
+		($data_testdata, undef, $data_funcdata, $data_checkdata,
+		 $data_testfncdata, undef, $data_testbrdata) =
+			get_info_entry($data);
+		(undef, $base_count, undef, $base_checkdata, undef,
+		 $base_sumfnccount, undef, $base_sumbrcount) =
+			get_info_entry($base);
+
+		# Check for compatible checksums
+		merge_checksums($data_checkdata, $base_checkdata, $filename);
+
+		# sumcount has to be calculated anew
+		$sumcount = {};
+		$sumfnccount = {};
+		$sumbrcount = {};
+
+		# For each test case, subtract test specific counts
+		foreach $testname (keys(%{$data_testdata}))
+		{
+			# Get counts of both data and baseline
+			$data_count = $data_testdata->{$testname};
+			$data_testfnccount = $data_testfncdata->{$testname};
+			$data_testbrcount = $data_testbrdata->{$testname};
+
+			($data_count, undef, $hit) =
+				subtract_counts($data_count, $base_count);
+			($data_testfnccount) =
+				subtract_fnccounts($data_testfnccount,
+						   $base_sumfnccount);
+			($data_testbrcount) =
+				combine_brcount($data_testbrcount,
+						 $base_sumbrcount, $BR_SUB);
+
+
+			# Check whether this test case did hit any line at all
+			if ($hit > 0)
+			{
+				# Write back resulting hash
+				$data_testdata->{$testname} = $data_count;
+				$data_testfncdata->{$testname} =
+					$data_testfnccount;
+				$data_testbrdata->{$testname} =
+					$data_testbrcount;
+			}
+			else
+			{
+				# Delete test case which did not impact this
+				# file
+				delete($data_testdata->{$testname});
+				delete($data_testfncdata->{$testname});
+				delete($data_testbrdata->{$testname});
+			}
+
+			# Add counts to sum of counts
+			($sumcount, $found, $hit) =
+				add_counts($sumcount, $data_count);
+			($sumfnccount, $fn_found, $fn_hit) =
+				add_fnccount($sumfnccount, $data_testfnccount);
+			($sumbrcount, $br_found, $br_hit) =
+				combine_brcount($sumbrcount, $data_testbrcount,
+						$BR_ADD);
+		}
+
+		# Write back resulting entry
+		set_info_entry($data, $data_testdata, $sumcount, $data_funcdata,
+			       $data_checkdata, $data_testfncdata, $sumfnccount,
+			       $data_testbrdata, $sumbrcount, $found, $hit,
+			       $fn_found, $fn_hit, $br_found, $br_hit);
+
+		$data_hash{$filename} = $data;
+	}
+
+	return (\%data_hash);
+}
+
+
+#
+# remove_unused_descriptions()
+#
+# Removes all test descriptions from the global hash %test_description which
+# are not present in %info_data.
+#
+
+sub remove_unused_descriptions()
+{
+	my $filename;		# The current filename
+	my %test_list;		# Hash containing found test names
+	my $test_data;		# Reference to hash test_name -> count_data
+	my $before;		# Initial number of descriptions
+	my $after;		# Remaining number of descriptions
+	
+	$before = scalar(keys(%test_description));
+
+	foreach $filename (keys(%info_data))
+	{
+		($test_data) = get_info_entry($info_data{$filename});
+		foreach (keys(%{$test_data}))
+		{
+			$test_list{$_} = "";
+		}
+	}
+
+	# Remove descriptions for tests which are not in our list
+	foreach (keys(%test_description))
+	{
+		if (!defined($test_list{$_}))
+		{
+			delete($test_description{$_});
+		}
+	}
+
+	$after = scalar(keys(%test_description));
+	if ($after < $before)
+	{
+		info("Removed ".($before - $after).
+		     " unused descriptions, $after remaining.\n");
+	}
+}
+
+
+#
+# apply_prefix(filename, PREFIXES)
+#
+# If FILENAME begins with PREFIX from PREFIXES, remove PREFIX from FILENAME
+# and return resulting string, otherwise return FILENAME.
+#
+
+sub apply_prefix($@)
+{
+	my $filename = shift;
+	my @dir_prefix = @_;
+
+	if (@dir_prefix)
+	{
+		foreach my $prefix (@dir_prefix)
+		{
+			if ($prefix ne "" && $filename =~ /^\Q$prefix\E\/(.*)$/)
+			{
+				return substr($filename, length($prefix) + 1);
+			}
+		}
+	}
+
+	return $filename;
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+#   MODE & 1: suppress STDOUT
+#   MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+	my $mode = shift;
+	my $result;
+	local *OLD_STDERR;
+	local *OLD_STDOUT;
+
+	# Save old stdout and stderr handles
+	($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT");
+	($mode & 2) && open(OLD_STDERR, ">>&", "STDERR");
+
+	# Redirect to /dev/null
+	($mode & 1) && open(STDOUT, ">", "/dev/null");
+	($mode & 2) && open(STDERR, ">", "/dev/null");
+
+	system(@_);
+	$result = $?;
+
+	# Close redirected handles
+	($mode & 1) && close(STDOUT);
+	($mode & 2) && close(STDERR);
+
+	# Restore old handles
+	($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT");
+	($mode & 2) && open(STDERR, ">>&", "OLD_STDERR");
+
+	return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+	my $filename = $_[0];
+	my %result;
+	my $key;
+	my $value;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<", $filename))
+	{
+		warn("WARNING: cannot read configuration file $filename\n");
+		return undef;
+	}
+	while (<HANDLE>)
+	{
+		chomp;
+		# Skip comments
+		s/#.*//;
+		# Remove leading blanks
+		s/^\s+//;
+		# Remove trailing blanks
+		s/\s+$//;
+		next unless length;
+		($key, $value) = split(/\s*=\s*/, $_, 2);
+		if (defined($key) && defined($value))
+		{
+			$result{$key} = $value;
+		}
+		else
+		{
+			warn("WARNING: malformed statement in line $. ".
+			     "of configuration file $filename\n");
+		}
+	}
+	close(HANDLE);
+	return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+#   key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hashes CONFIG or OPT_RC contain a value
+# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. 
+#
+
+sub apply_config($)
+{
+	my $ref = $_[0];
+
+	foreach (keys(%{$ref}))
+	{
+		if (defined($opt_rc{$_})) {
+			${$ref->{$_}} = $opt_rc{$_};
+		} elsif (defined($config->{$_})) {
+			${$ref->{$_}} = $config->{$_};
+		}
+	}
+}
+
+
+#
+# get_html_prolog(FILENAME)
+#
+# If FILENAME is defined, return contents of file. Otherwise return default
+# HTML prolog. Die on error.
+#
+
+sub get_html_prolog($)
+{
+	my $filename = $_[0];
+	my $result = "";
+
+	if (defined($filename))
+	{
+		local *HANDLE;
+
+		open(HANDLE, "<", $filename)
+			or die("ERROR: cannot open html prolog $filename!\n");
+		while (<HANDLE>)
+		{
+			$result .= $_;
+		}
+		close(HANDLE);
+	}
+	else
+	{
+		$result = <<END_OF_HTML
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+
+<html lang="en">
+
+<head>
+  <meta http-equiv="Content-Type" content="text/html; charset=$charset">
+  <title>\@pagetitle\@</title>
+  <link rel="stylesheet" type="text/css" href="\@basedir\@gcov.css">
+</head>
+
+<body>
+
+END_OF_HTML
+		;
+	}
+
+	return $result;
+}
+
+
+#
+# get_html_epilog(FILENAME)
+#
+# If FILENAME is defined, return contents of file. Otherwise return default
+# HTML epilog. Die on error.
+#
+sub get_html_epilog($)
+{
+	my $filename = $_[0];
+	my $result = "";
+
+	if (defined($filename))
+	{
+		local *HANDLE;
+
+		open(HANDLE, "<", $filename)
+			or die("ERROR: cannot open html epilog $filename!\n");
+		while (<HANDLE>)
+		{
+			$result .= $_;
+		}
+		close(HANDLE);
+	}
+	else
+	{
+		$result = <<END_OF_HTML
+
+</body>
+</html>
+END_OF_HTML
+		;
+	}
+
+	return $result;
+
+}
+
+sub warn_handler($)
+{
+	my ($msg) = @_;
+
+	warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+	my ($msg) = @_;
+
+	die("$tool_name: $msg");
+}
+
+#
+# parse_ignore_errors(@ignore_errors)
+#
+# Parse user input about which errors to ignore.
+#
+
+sub parse_ignore_errors(@)
+{
+	my (@ignore_errors) = @_;
+	my @items;
+	my $item;
+
+	return if (!@ignore_errors);
+
+	foreach $item (@ignore_errors) {
+		$item =~ s/\s//g;
+		if ($item =~ /,/) {
+			# Split and add comma-separated parameters
+			push(@items, split(/,/, $item));
+		} else {
+			# Add single parameter
+			push(@items, $item);
+		}
+	}
+	foreach $item (@items) {
+		my $item_id = $ERROR_ID{lc($item)};
+
+		if (!defined($item_id)) {
+			die("ERROR: unknown argument for --ignore-errors: ".
+			    "$item\n");
+		}
+		$ignore[$item_id] = 1;
+	}
+}
+
+#
+# parse_dir_prefix(@dir_prefix)
+#
+# Parse user input about the prefix list
+#
+
+sub parse_dir_prefix(@)
+{
+	my (@opt_dir_prefix) = @_;
+	my $item;
+
+	return if (!@opt_dir_prefix);
+
+	foreach $item (@opt_dir_prefix) {
+		if ($item =~ /,/) {
+			# Split and add comma-separated parameters
+			push(@dir_prefix, split(/,/, $item));
+		} else {
+			# Add single parameter
+			push(@dir_prefix, $item);
+		}
+	}
+}
+
+#
+# rate(hit, found[, suffix, precision, width])
+#
+# Return the coverage rate [0..100] for HIT and FOUND values. 0 is only
+# returned when HIT is 0. 100 is only returned when HIT equals FOUND.
+# PRECISION specifies the precision of the result. SUFFIX defines a
+# string that is appended to the result if FOUND is non-zero. Spaces
+# are added to the start of the resulting string until it is at least WIDTH
+# characters wide.
+#
+
+sub rate($$;$$$)
+{
+        my ($hit, $found, $suffix, $precision, $width) = @_;
+        my $rate; 
+
+	# Assign defaults if necessary
+	$precision	= $default_precision if (!defined($precision));
+	$suffix		= ""	if (!defined($suffix));
+	$width		= 0	if (!defined($width));
+        
+        return sprintf("%*s", $width, "-") if (!defined($found) || $found == 0);
+        $rate = sprintf("%.*f", $precision, $hit * 100 / $found);
+
+	# Adjust rates if necessary
+        if ($rate == 0 && $hit > 0) {
+		$rate = sprintf("%.*f", $precision, 1 / 10 ** $precision);
+        } elsif ($rate == 100 && $hit != $found) {
+		$rate = sprintf("%.*f", $precision, 100 - 1 / 10 ** $precision);
+	}
+
+	return sprintf("%*s", $width, $rate.$suffix);
+}
diff --git a/ThirdParty/lcov/bin/geninfo b/ThirdParty/lcov/bin/geninfo
new file mode 100755
index 0000000000000000000000000000000000000000..f41eaec1cdfaadddecec3184eaeb4f7ff93c9890
--- /dev/null
+++ b/ThirdParty/lcov/bin/geninfo
@@ -0,0 +1,4014 @@
+#!/usr/bin/env perl
+#
+#   Copyright (c) International Business Machines  Corp., 2002,2012
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.                 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# geninfo
+#
+#   This script generates .info files from data files as created by code
+#   instrumented with gcc's built-in profiling mechanism. Call it with
+#   --help and refer to the geninfo man page to get information on usage
+#   and available options.
+#
+#
+# Authors:
+#   2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#                         IBM Lab Boeblingen
+#        based on code by Manoj Iyer <manjo@mail.utexas.edu> and
+#                         Megan Bock <mbock@us.ibm.com>
+#                         IBM Austin
+#   2002-09-05 / Peter Oberparleiter: implemented option that allows file list
+#   2003-04-16 / Peter Oberparleiter: modified read_gcov so that it can also
+#                parse the new gcov format which is to be introduced in gcc 3.3
+#   2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
+#   2003-07-03 / Peter Oberparleiter: added line checksum support, added
+#                --no-checksum
+#   2003-09-18 / Nigel Hinds: capture branch coverage data from GCOV
+#   2003-12-11 / Laurent Deniel: added --follow option
+#                workaround gcov (<= 3.2.x) bug with empty .da files
+#   2004-01-03 / Laurent Deniel: Ignore empty .bb files
+#   2004-02-16 / Andreas Krebbel: Added support for .gcno/.gcda files and
+#                gcov versioning
+#   2004-08-09 / Peter Oberparleiter: added configuration file support
+#   2008-07-14 / Tom Zoerner: added --function-coverage command line option
+#   2008-08-13 / Peter Oberparleiter: modified function coverage
+#                implementation (now enabled per default)
+#
+
+use strict;
+use warnings;
+use File::Basename; 
+use File::Spec::Functions qw /abs2rel catdir file_name_is_absolute splitdir
+			      splitpath catpath/;
+use Getopt::Long;
+use Digest::MD5 qw(md5_base64);
+use Cwd qw/abs_path/;
+if( $^O eq "msys" )
+{
+	require File::Spec::Win32;
+}
+
+# Constants
+our $tool_dir		= abs_path(dirname($0));
+our $lcov_version	= "LCOV version 1.14";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+our $gcov_tool		= "gcov";
+our $tool_name		= basename($0);
+
+our $GCOV_VERSION_8_0_0	= 0x80000;
+our $GCOV_VERSION_4_7_0	= 0x40700;
+our $GCOV_VERSION_3_4_0	= 0x30400;
+our $GCOV_VERSION_3_3_0	= 0x30300;
+our $GCNO_FUNCTION_TAG	= 0x01000000;
+our $GCNO_LINES_TAG	= 0x01450000;
+our $GCNO_FILE_MAGIC	= 0x67636e6f;
+our $BBG_FILE_MAGIC	= 0x67626267;
+
+# Error classes which users may specify to ignore during processing
+our $ERROR_GCOV		= 0;
+our $ERROR_SOURCE	= 1;
+our $ERROR_GRAPH	= 2;
+our %ERROR_ID = (
+	"gcov" => $ERROR_GCOV,
+	"source" => $ERROR_SOURCE,
+	"graph" => $ERROR_GRAPH,
+);
+
+our $EXCL_START = "LCOV_EXCL_START";
+our $EXCL_STOP = "LCOV_EXCL_STOP";
+
+# Marker to exclude branch coverage but keep function and line coveage
+our $EXCL_BR_START = "LCOV_EXCL_BR_START";
+our $EXCL_BR_STOP = "LCOV_EXCL_BR_STOP";
+
+# Compatibility mode values
+our $COMPAT_VALUE_OFF	= 0;
+our $COMPAT_VALUE_ON	= 1;
+our $COMPAT_VALUE_AUTO	= 2;
+
+# Compatibility mode value names
+our %COMPAT_NAME_TO_VALUE = (
+	"off"	=> $COMPAT_VALUE_OFF,
+	"on"	=> $COMPAT_VALUE_ON,
+	"auto"	=> $COMPAT_VALUE_AUTO,
+);
+
+# Compatiblity modes
+our $COMPAT_MODE_LIBTOOL	= 1 << 0;
+our $COMPAT_MODE_HAMMER		= 1 << 1;
+our $COMPAT_MODE_SPLIT_CRC	= 1 << 2;
+
+# Compatibility mode names
+our %COMPAT_NAME_TO_MODE = (
+	"libtool"	=> $COMPAT_MODE_LIBTOOL,
+	"hammer"	=> $COMPAT_MODE_HAMMER,
+	"split_crc"	=> $COMPAT_MODE_SPLIT_CRC,
+	"android_4_4_0"	=> $COMPAT_MODE_SPLIT_CRC,
+);
+
+# Map modes to names
+our %COMPAT_MODE_TO_NAME = (
+	$COMPAT_MODE_LIBTOOL	=> "libtool",
+	$COMPAT_MODE_HAMMER	=> "hammer",
+	$COMPAT_MODE_SPLIT_CRC	=> "split_crc",
+);
+
+# Compatibility mode default values
+our %COMPAT_MODE_DEFAULTS = (
+	$COMPAT_MODE_LIBTOOL	=> $COMPAT_VALUE_ON,
+	$COMPAT_MODE_HAMMER	=> $COMPAT_VALUE_AUTO,
+	$COMPAT_MODE_SPLIT_CRC	=> $COMPAT_VALUE_AUTO,
+);
+
+# Compatibility mode auto-detection routines
+sub compat_hammer_autodetect();
+our %COMPAT_MODE_AUTO = (
+	$COMPAT_MODE_HAMMER	=> \&compat_hammer_autodetect,
+	$COMPAT_MODE_SPLIT_CRC	=> 1,	# will be done later
+);
+
+our $BR_LINE		= 0;
+our $BR_BLOCK		= 1;
+our $BR_BRANCH		= 2;
+our $BR_TAKEN		= 3;
+our $BR_VEC_ENTRIES	= 4;
+our $BR_VEC_WIDTH	= 32;
+our $BR_VEC_MAX		= vec(pack('b*', 1 x $BR_VEC_WIDTH), 0, $BR_VEC_WIDTH);
+
+our $UNNAMED_BLOCK	= -1;
+
+# Prototypes
+sub print_usage(*);
+sub transform_pattern($);
+sub gen_info($);
+sub process_dafile($$);
+sub match_filename($@);
+sub solve_ambiguous_match($$$);
+sub split_filename($);
+sub solve_relative_path($$);
+sub read_gcov_header($);
+sub read_gcov_file($);
+sub info(@);
+sub map_llvm_version($);
+sub version_to_str($);
+sub get_gcov_version();
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+sub get_exclusion_data($);
+sub apply_exclusion_data($$);
+sub process_graphfile($$);
+sub filter_fn_name($);
+sub warn_handler($);
+sub die_handler($);
+sub graph_error($$);
+sub graph_expect($);
+sub graph_read(*$;$$);
+sub graph_skip(*$;$);
+sub uniq(@);
+sub sort_uniq(@);
+sub sort_uniq_lex(@);
+sub graph_cleanup($);
+sub graph_find_base($);
+sub graph_from_bb($$$$);
+sub graph_add_order($$$);
+sub read_bb_word(*;$);
+sub read_bb_value(*;$);
+sub read_bb_string(*$);
+sub read_bb($);
+sub read_bbg_word(*;$);
+sub read_bbg_value(*;$);
+sub read_bbg_string(*);
+sub read_bbg_lines_record(*$$$$$);
+sub read_bbg($);
+sub read_gcno_word(*;$$);
+sub read_gcno_value(*$;$$);
+sub read_gcno_string(*$);
+sub read_gcno_lines_record(*$$$$$$);
+sub determine_gcno_split_crc($$$$);
+sub read_gcno_function_record(*$$$$$);
+sub read_gcno($);
+sub get_gcov_capabilities();
+sub get_overall_line($$$$);
+sub print_overall_rate($$$$$$$$$);
+sub br_gvec_len($);
+sub br_gvec_get($$);
+sub debug($);
+sub int_handler();
+sub parse_ignore_errors(@);
+sub is_external($);
+sub compat_name($);
+sub parse_compat_modes($);
+sub is_compat($);
+sub is_compat_auto($);
+
+
+# Global variables
+our $gcov_version;
+our $gcov_version_string;
+our $graph_file_extension;
+our $data_file_extension;
+our @data_directory;
+our $test_name = "";
+our $quiet;
+our $help;
+our $output_filename;
+our $base_directory;
+our $version;
+our $follow;
+our $checksum;
+our $no_checksum;
+our $opt_compat_libtool;
+our $opt_no_compat_libtool;
+our $rc_adjust_src_path;# Regexp specifying parts to remove from source path
+our $adjust_src_pattern;
+our $adjust_src_replace;
+our $adjust_testname;
+our $config;		# Configuration file contents
+our @ignore_errors;	# List of errors to ignore (parameter)
+our @ignore;		# List of errors to ignore (array)
+our $initial;
+our @include_patterns; # List of source file patterns to include
+our @exclude_patterns; # List of source file patterns to exclude
+our %excluded_files; # Files excluded due to include/exclude options
+our $no_recursion = 0;
+our $maxdepth;
+our $no_markers = 0;
+our $opt_derive_func_data = 0;
+our $opt_external = 1;
+our $opt_no_external;
+our $debug = 0;
+our $gcov_caps;
+our @gcov_options;
+our @internal_dirs;
+our $opt_config_file;
+our $opt_gcov_all_blocks = 1;
+our $opt_compat;
+our %opt_rc;
+our %compat_value;
+our $gcno_split_crc;
+our $func_coverage = 1;
+our $br_coverage = 0;
+our $rc_auto_base = 1;
+our $excl_line = "LCOV_EXCL_LINE";
+our $excl_br_line = "LCOV_EXCL_BR_LINE";
+
+our $cwd = `pwd`;
+chomp($cwd);
+
+
+#
+# Code entry point
+#
+
+# Register handler routine to be called when interrupted
+$SIG{"INT"} = \&int_handler;
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Set LC_ALL so that gcov output will be in a unified format
+$ENV{"LC_ALL"} = "C";
+
+# Check command line for a configuration file name
+Getopt::Long::Configure("pass_through", "no_auto_abbrev");
+GetOptions("config-file=s" => \$opt_config_file,
+	   "rc=s%" => \%opt_rc);
+Getopt::Long::Configure("default");
+
+{
+	# Remove spaces around rc options
+	my %new_opt_rc;
+
+	while (my ($key, $value) = each(%opt_rc)) {
+		$key =~ s/^\s+|\s+$//g;
+		$value =~ s/^\s+|\s+$//g;
+
+		$new_opt_rc{$key} = $value;
+	}
+	%opt_rc = %new_opt_rc;
+}
+
+# Read configuration file if available
+if (defined($opt_config_file)) {
+	$config = read_config($opt_config_file);
+} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
+{
+	$config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+	$config = read_config("/etc/lcovrc");
+} elsif (-r "/usr/local/etc/lcovrc")
+{
+	$config = read_config("/usr/local/etc/lcovrc");
+}
+
+if ($config || %opt_rc)
+{
+	# Copy configuration file and --rc values to variables
+	apply_config({
+		"geninfo_gcov_tool"		=> \$gcov_tool,
+		"geninfo_adjust_testname"	=> \$adjust_testname,
+		"geninfo_checksum"		=> \$checksum,
+		"geninfo_no_checksum"		=> \$no_checksum, # deprecated
+		"geninfo_compat_libtool"	=> \$opt_compat_libtool,
+		"geninfo_external"		=> \$opt_external,
+		"geninfo_gcov_all_blocks"	=> \$opt_gcov_all_blocks,
+		"geninfo_compat"		=> \$opt_compat,
+		"geninfo_adjust_src_path"	=> \$rc_adjust_src_path,
+		"geninfo_auto_base"		=> \$rc_auto_base,
+		"lcov_function_coverage"	=> \$func_coverage,
+		"lcov_branch_coverage"		=> \$br_coverage,
+		"lcov_excl_line"		=> \$excl_line,
+		"lcov_excl_br_line"		=> \$excl_br_line,
+	});
+
+	# Merge options
+	if (defined($no_checksum))
+	{
+		$checksum = ($no_checksum ? 0 : 1);
+		$no_checksum = undef;
+	}
+
+	# Check regexp
+	if (defined($rc_adjust_src_path)) {
+		my ($pattern, $replace) = split(/\s*=>\s*/,
+						$rc_adjust_src_path);
+		local $SIG{__DIE__};
+		eval '$adjust_src_pattern = qr>'.$pattern.'>;';
+		if (!defined($adjust_src_pattern)) {
+			my $msg = $@;
+
+			chomp($msg);
+			$msg =~ s/at \(eval.*$//;
+			warn("WARNING: invalid pattern in ".
+			     "geninfo_adjust_src_path: $msg\n");
+		} elsif (!defined($replace)) {
+			# If no replacement is specified, simply remove pattern
+			$adjust_src_replace = "";
+		} else {
+			$adjust_src_replace = $replace;
+		}
+	}
+	for my $regexp (($excl_line, $excl_br_line)) {
+		eval 'qr/'.$regexp.'/';
+		my $error = $@;
+		chomp($error);
+		$error =~ s/at \(eval.*$//;
+		die("ERROR: invalid exclude pattern: $error") if $error;
+	}
+}
+
+# Parse command line options
+if (!GetOptions("test-name|t=s" => \$test_name,
+		"output-filename|o=s" => \$output_filename,
+		"checksum" => \$checksum,
+		"no-checksum" => \$no_checksum,
+		"base-directory|b=s" => \$base_directory,
+		"version|v" =>\$version,
+		"quiet|q" => \$quiet,
+		"help|h|?" => \$help,
+		"follow|f" => \$follow,
+		"compat-libtool" => \$opt_compat_libtool,
+		"no-compat-libtool" => \$opt_no_compat_libtool,
+		"gcov-tool=s" => \$gcov_tool,
+		"ignore-errors=s" => \@ignore_errors,
+		"initial|i" => \$initial,
+		"include=s" => \@include_patterns,
+		"exclude=s" => \@exclude_patterns,
+		"no-recursion" => \$no_recursion,
+		"no-markers" => \$no_markers,
+		"derive-func-data" => \$opt_derive_func_data,
+		"debug" => \$debug,
+		"external|e" => \$opt_external,
+		"no-external" => \$opt_no_external,
+		"compat=s" => \$opt_compat,
+		"config-file=s" => \$opt_config_file,
+		"rc=s%" => \%opt_rc,
+		))
+{
+	print(STDERR "Use $tool_name --help to get usage information\n");
+	exit(1);
+}
+else
+{
+	# Merge options
+	if (defined($no_checksum))
+	{
+		$checksum = ($no_checksum ? 0 : 1);
+		$no_checksum = undef;
+	}
+
+	if (defined($opt_no_compat_libtool))
+	{
+		$opt_compat_libtool = ($opt_no_compat_libtool ? 0 : 1);
+		$opt_no_compat_libtool = undef;
+	}
+
+	if (defined($opt_no_external)) {
+		$opt_external = 0;
+		$opt_no_external = undef;
+	}
+
+	if(@include_patterns) {
+		# Need perlreg expressions instead of shell pattern
+		@include_patterns = map({ transform_pattern($_); } @include_patterns);
+	}
+
+	if(@exclude_patterns) {
+		# Need perlreg expressions instead of shell pattern
+		@exclude_patterns = map({ transform_pattern($_); } @exclude_patterns);
+	}
+}
+
+@data_directory = @ARGV;
+
+debug("$lcov_version\n");
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print("$tool_name: $lcov_version\n");
+	exit(0);
+}
+
+# Check gcov tool
+if (system_no_output(3, $gcov_tool, "--help") == -1)
+{
+	die("ERROR: need tool $gcov_tool!\n");
+}
+
+($gcov_version, $gcov_version_string) = get_gcov_version();
+
+# Determine gcov options
+$gcov_caps = get_gcov_capabilities();
+push(@gcov_options, "-b") if ($gcov_caps->{'branch-probabilities'} &&
+			      ($br_coverage || $func_coverage));
+push(@gcov_options, "-c") if ($gcov_caps->{'branch-counts'} &&
+			      $br_coverage);
+push(@gcov_options, "-a") if ($gcov_caps->{'all-blocks'} &&
+			      $opt_gcov_all_blocks && $br_coverage);
+if ($gcov_caps->{'hash-filenames'})
+{
+	push(@gcov_options, "-x");
+} else {
+	push(@gcov_options, "-p") if ($gcov_caps->{'preserve-paths'});
+}
+
+# Determine compatibility modes
+parse_compat_modes($opt_compat);
+
+# Determine which errors the user wants us to ignore
+parse_ignore_errors(@ignore_errors);
+
+# Make sure test names only contain valid characters
+if ($test_name =~ s/\W/_/g)
+{
+	warn("WARNING: invalid characters removed from testname!\n");
+}
+
+# Adjust test name to include uname output if requested
+if ($adjust_testname)
+{
+	$test_name .= "__".`uname -a`;
+	$test_name =~ s/\W/_/g;
+}
+
+# Make sure base_directory contains an absolute path specification
+if ($base_directory)
+{
+	$base_directory = solve_relative_path($cwd, $base_directory);
+}
+
+# Check for follow option
+if ($follow)
+{
+	$follow = "-follow"
+}
+else
+{
+	$follow = "";
+}
+
+# Determine checksum mode
+if (defined($checksum))
+{
+	# Normalize to boolean
+	$checksum = ($checksum ? 1 : 0);
+}
+else
+{
+	# Default is off
+	$checksum = 0;
+}
+
+# Determine max depth for recursion
+if ($no_recursion)
+{
+	$maxdepth = "-maxdepth 1";
+}
+else
+{
+	$maxdepth = "";
+}
+
+# Check for directory name
+if (!@data_directory)
+{
+	die("No directory specified\n".
+	    "Use $tool_name --help to get usage information\n");
+}
+else
+{
+	foreach (@data_directory)
+	{
+		stat($_);
+		if (!-r _)
+		{
+			die("ERROR: cannot read $_!\n");
+		}
+	}
+}
+
+if ($gcov_version < $GCOV_VERSION_3_4_0)
+{
+	if (is_compat($COMPAT_MODE_HAMMER))
+	{
+		$data_file_extension = ".da";
+		$graph_file_extension = ".bbg";
+	}
+	else
+	{
+		$data_file_extension = ".da";
+		$graph_file_extension = ".bb";
+	}
+}
+else
+{
+	$data_file_extension = ".gcda";
+	$graph_file_extension = ".gcno";
+}	
+
+# Check output filename
+if (defined($output_filename) && ($output_filename ne "-"))
+{
+	# Initially create output filename, data is appended
+	# for each data file processed
+	local *DUMMY_HANDLE;
+	open(DUMMY_HANDLE, ">", $output_filename)
+		or die("ERROR: cannot create $output_filename!\n");
+	close(DUMMY_HANDLE);
+
+	# Make $output_filename an absolute path because we're going
+	# to change directories while processing files
+	if (!($output_filename =~ /^\/(.*)$/))
+	{
+		$output_filename = $cwd."/".$output_filename;
+	}
+}
+
+# Build list of directories to identify external files
+foreach my $entry(@data_directory, $base_directory) {
+	next if (!defined($entry));
+	push(@internal_dirs, solve_relative_path($cwd, $entry));
+}
+
+# Do something
+foreach my $entry (@data_directory) {
+	gen_info($entry);
+}
+
+if ($initial && $br_coverage) {
+	warn("Note: --initial does not generate branch coverage ".
+	     "data\n");
+}
+info("Finished .info-file creation\n");
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+
+	print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS] DIRECTORY
+
+Traverse DIRECTORY and create a .info file for each data file found. Note
+that you may specify more than one directory, all of which are then processed
+sequentially.
+
+  -h, --help                        Print this help, then exit
+  -v, --version                     Print version number, then exit
+  -q, --quiet                       Do not print progress messages
+  -i, --initial                     Capture initial zero coverage data
+  -t, --test-name NAME              Use test case name NAME for resulting data
+  -o, --output-filename OUTFILE     Write data only to OUTFILE
+  -f, --follow                      Follow links when searching .da/.gcda files
+  -b, --base-directory DIR          Use DIR as base directory for relative paths
+      --(no-)checksum               Enable (disable) line checksumming
+      --(no-)compat-libtool         Enable (disable) libtool compatibility mode
+      --gcov-tool TOOL              Specify gcov tool location
+      --ignore-errors ERROR         Continue after ERROR (gcov, source, graph)
+      --no-recursion                Exclude subdirectories from processing
+      --no-markers                  Ignore exclusion markers in source code
+      --derive-func-data            Generate function data from line data
+      --(no-)external               Include (ignore) data for external files
+      --config-file FILENAME        Specify configuration file location
+      --rc SETTING=VALUE            Override configuration file setting
+      --compat MODE=on|off|auto     Set compat MODE (libtool, hammer, split_crc)
+      --include PATTERN             Include files matching PATTERN
+      --exclude PATTERN             Exclude files matching PATTERN
+
+For more information see: $lcov_url
+END_OF_USAGE
+	;
+}
+
+
+#
+# transform_pattern(pattern)
+#
+# Transform shell wildcard expression to equivalent Perl regular expression.
+# Return transformed pattern.
+#
+
+sub transform_pattern($)
+{
+	my $pattern = $_[0];
+
+	# Escape special chars
+
+	$pattern =~ s/\\/\\\\/g;
+	$pattern =~ s/\//\\\//g;
+	$pattern =~ s/\^/\\\^/g;
+	$pattern =~ s/\$/\\\$/g;
+	$pattern =~ s/\(/\\\(/g;
+	$pattern =~ s/\)/\\\)/g;
+	$pattern =~ s/\[/\\\[/g;
+	$pattern =~ s/\]/\\\]/g;
+	$pattern =~ s/\{/\\\{/g;
+	$pattern =~ s/\}/\\\}/g;
+	$pattern =~ s/\./\\\./g;
+	$pattern =~ s/\,/\\\,/g;
+	$pattern =~ s/\|/\\\|/g;
+	$pattern =~ s/\+/\\\+/g;
+	$pattern =~ s/\!/\\\!/g;
+
+	# Transform ? => (.) and * => (.*)
+
+	$pattern =~ s/\*/\(\.\*\)/g;
+	$pattern =~ s/\?/\(\.\)/g;
+
+	return $pattern;
+}
+
+
+#
+# get_common_prefix(min_dir, filenames)
+#
+# Return the longest path prefix shared by all filenames. MIN_DIR specifies
+# the minimum number of directories that a filename may have after removing
+# the prefix.
+#
+
+sub get_common_prefix($@)
+{
+	my ($min_dir, @files) = @_;
+	my $file;
+	my @prefix;
+	my $i;
+
+	foreach $file (@files) {
+		my ($v, $d, $f) = splitpath($file);
+		my @comp = splitdir($d);
+
+		if (!@prefix) {
+			@prefix = @comp;
+			next;
+		}
+		for ($i = 0; $i < scalar(@comp) && $i < scalar(@prefix); $i++) {
+			if ($comp[$i] ne $prefix[$i] ||
+			    ((scalar(@comp) - ($i + 1)) <= $min_dir)) {
+				delete(@prefix[$i..scalar(@prefix)]);
+				last;
+			}
+		}
+	}
+
+	return catdir(@prefix);
+}
+
+#
+# gen_info(directory)
+#
+# Traverse DIRECTORY and create a .info file for each data file found.
+# The .info file contains TEST_NAME in the following format:
+#
+#   TN:<test name>
+#
+# For each source file name referenced in the data file, there is a section
+# containing source code and coverage data:
+#
+#   SF:<absolute path to the source file>
+#   FN:<line number of function start>,<function name> for each function
+#   DA:<line number>,<execution count> for each instrumented line
+#   LH:<number of lines with an execution count> greater than 0
+#   LF:<number of instrumented lines>
+#
+# Sections are separated by:
+#
+#   end_of_record
+#
+# In addition to the main source code file there are sections for each
+# #included file containing executable code. Note that the absolute path
+# of a source file is generated by interpreting the contents of the respective
+# graph file. Relative filenames are prefixed with the directory in which the
+# graph file is found. Note also that symbolic links to the graph file will be
+# resolved so that the actual file path is used instead of the path to a link.
+# This approach is necessary for the mechanism to work with the /proc/gcov
+# files.
+#
+# Die on error.
+#
+
+sub gen_info($)
+{
+	my $directory = $_[0];
+	my @file_list;
+	my $file;
+	my $prefix;
+	my $type;
+	my $ext;
+
+	if ($initial) {
+		$type = "graph";
+		$ext = $graph_file_extension;
+	} else {
+		$type = "data";
+		$ext = $data_file_extension;
+	}
+
+	if (-d $directory)
+	{
+		info("Scanning $directory for $ext files ...\n");
+
+		@file_list = `find "$directory" $maxdepth $follow -name \\*$ext -type f -o -name \\*$ext -type l 2>/dev/null`;
+		chomp(@file_list);
+		if (!@file_list) {
+			warn("WARNING: no $ext files found in $directory - ".
+			     "skipping!\n");
+			return;
+		}
+		$prefix = get_common_prefix(1, @file_list);
+		info("Found %d %s files in %s\n", $#file_list+1, $type,
+		     $directory);
+	}
+	else
+	{
+		@file_list = ($directory);
+		$prefix = "";
+	}
+
+	# Process all files in list
+	foreach $file (@file_list) {
+		# Process file
+		if ($initial) {
+			process_graphfile($file, $prefix);
+		} else {
+			process_dafile($file, $prefix);
+		}
+	}
+
+	# Report whether files were excluded.
+	if (%excluded_files) {
+		info("Excluded data for %d files due to include/exclude options\n",
+			 scalar keys %excluded_files);
+	}
+}
+
+
+#
+# derive_data(contentdata, funcdata, bbdata)
+#
+# Calculate function coverage data by combining line coverage data and the
+# list of lines belonging to a function.
+#
+# contentdata: [ instr1, count1, source1, instr2, count2, source2, ... ]
+# instr<n>: Instrumentation flag for line n
+# count<n>: Execution count for line n
+# source<n>: Source code for line n
+#
+# funcdata: [ count1, func1, count2, func2, ... ]
+# count<n>: Execution count for function number n
+# func<n>: Function name for function number n
+#
+# bbdata: function_name -> [ line1, line2, ... ]
+# line<n>: Line number belonging to the corresponding function
+#
+
+sub derive_data($$$)
+{
+	my ($contentdata, $funcdata, $bbdata) = @_;
+	my @gcov_content = @{$contentdata};
+	my @gcov_functions = @{$funcdata};
+	my %fn_count;
+	my %ln_fn;
+	my $line;
+	my $maxline;
+	my %fn_name;
+	my $fn;
+	my $count;
+
+	if (!defined($bbdata)) {
+		return @gcov_functions;
+	}
+
+	# First add existing function data
+	while (@gcov_functions) {
+		$count = shift(@gcov_functions);
+		$fn = shift(@gcov_functions);
+
+		$fn_count{$fn} = $count;
+	}
+
+	# Convert line coverage data to function data
+	foreach $fn (keys(%{$bbdata})) {
+		my $line_data = $bbdata->{$fn};
+		my $line;
+		my $fninstr = 0;
+
+		if ($fn eq "") {
+			next;
+		}
+		# Find the lowest line count for this function
+		$count = 0;
+		foreach $line (@$line_data) {
+			my $linstr = $gcov_content[ ( $line - 1 ) * 3 + 0 ];
+			my $lcount = $gcov_content[ ( $line - 1 ) * 3 + 1 ];
+
+			next if (!$linstr);
+			$fninstr = 1;
+			if (($lcount > 0) &&
+			    (($count == 0) || ($lcount < $count))) {
+				$count = $lcount;
+			}
+		}
+		next if (!$fninstr);
+		$fn_count{$fn} = $count;
+	}
+
+
+	# Check if we got data for all functions
+	foreach $fn (keys(%fn_name)) {
+		if ($fn eq "") {
+			next;
+		}
+		if (defined($fn_count{$fn})) {
+			next;
+		}
+		warn("WARNING: no derived data found for function $fn\n");
+	}
+
+	# Convert hash to list in @gcov_functions format
+	foreach $fn (sort(keys(%fn_count))) {
+		push(@gcov_functions, $fn_count{$fn}, $fn);
+	}
+
+	return @gcov_functions;
+}
+
+#
+# get_filenames(directory, pattern)
+#
+# Return a list of filenames found in directory which match the specified
+# pattern.
+#
+# Die on error.
+#
+
+sub get_filenames($$)
+{
+	my ($dirname, $pattern) = @_;
+	my @result;
+	my $directory;
+	local *DIR;
+
+	opendir(DIR, $dirname) or
+		die("ERROR: cannot read directory $dirname\n");
+	while ($directory = readdir(DIR)) {
+		push(@result, $directory) if ($directory =~ /$pattern/);
+	}
+	closedir(DIR);
+
+	return @result;
+}
+
+#
+# process_dafile(da_filename, dir)
+#
+# Create a .info file for a single data file.
+#
+# Die on error.
+#
+
+sub process_dafile($$)
+{
+	my ($file, $dir) = @_;
+	my $da_filename;	# Name of data file to process
+	my $da_dir;		# Directory of data file
+	my $source_dir;		# Directory of source file
+	my $da_basename;	# data filename without ".da/.gcda" extension
+	my $bb_filename;	# Name of respective graph file
+	my $bb_basename;	# Basename of the original graph file
+	my $graph;		# Contents of graph file
+	my $instr;		# Contents of graph file part 2
+	my $gcov_error;		# Error code of gcov tool
+	my $object_dir;		# Directory containing all object files
+	my $source_filename;	# Name of a source code file
+	my $gcov_file;		# Name of a .gcov file
+	my @gcov_content;	# Content of a .gcov file
+	my $gcov_branches;	# Branch content of a .gcov file
+	my @gcov_functions;	# Function calls of a .gcov file
+	my @gcov_list;		# List of generated .gcov files
+	my $line_number;	# Line number count
+	my $lines_hit;		# Number of instrumented lines hit
+	my $lines_found;	# Number of instrumented lines found
+	my $funcs_hit;		# Number of instrumented functions hit
+	my $funcs_found;	# Number of instrumented functions found
+	my $br_hit;
+	my $br_found;
+	my $source;		# gcov source header information
+	my $object;		# gcov object header information
+	my @matches;		# List of absolute paths matching filename
+	my $base_dir;		# Base directory for current file
+	my @tmp_links;		# Temporary links to be cleaned up
+	my @result;
+	my $index;
+	my $da_renamed;		# If data file is to be renamed
+	local *INFO_HANDLE;
+
+	info("Processing %s\n", abs2rel($file, $dir));
+	# Get path to data file in absolute and normalized form (begins with /,
+	# contains no more ../ or ./)
+	$da_filename = solve_relative_path($cwd, $file);
+
+	# Get directory and basename of data file
+	($da_dir, $da_basename) = split_filename($da_filename);
+
+	$source_dir = $da_dir;
+	if (is_compat($COMPAT_MODE_LIBTOOL)) {
+		# Avoid files from .libs dirs 	 
+		$source_dir =~ s/\.libs$//;
+	}
+
+	if (-z $da_filename)
+	{
+		$da_renamed = 1;
+	}
+	else
+	{
+		$da_renamed = 0;
+	}
+
+	# Construct base_dir for current file
+	if ($base_directory)
+	{
+		$base_dir = $base_directory;
+	}
+	else
+	{
+		$base_dir = $source_dir;
+	}
+
+	# Check for writable $base_dir (gcov will try to write files there)
+	stat($base_dir);
+	if (!-w _)
+	{
+		die("ERROR: cannot write to directory $base_dir!\n");
+	}
+
+	# Construct name of graph file
+	$bb_basename = $da_basename.$graph_file_extension;
+	$bb_filename = "$da_dir/$bb_basename";
+
+	# Find out the real location of graph file in case we're just looking at
+	# a link
+	while (readlink($bb_filename))
+	{
+		my $last_dir = dirname($bb_filename);
+
+		$bb_filename = readlink($bb_filename);
+		$bb_filename = solve_relative_path($last_dir, $bb_filename);
+	}
+
+	# Ignore empty graph file (e.g. source file with no statement)
+	if (-z $bb_filename)
+	{
+		warn("WARNING: empty $bb_filename (skipped)\n");
+		return;
+	}
+
+	# Read contents of graph file into hash. We need it later to find out
+	# the absolute path to each .gcov file created as well as for
+	# information about functions and their source code positions.
+	if ($gcov_version < $GCOV_VERSION_3_4_0)
+	{
+		if (is_compat($COMPAT_MODE_HAMMER))
+		{
+			($instr, $graph) = read_bbg($bb_filename);
+		}
+		else
+		{
+			($instr, $graph) = read_bb($bb_filename);
+		}
+	} 
+	else
+	{
+		($instr, $graph) = read_gcno($bb_filename);
+	} 
+
+	# Try to find base directory automatically if requested by user
+	if ($rc_auto_base) {
+		$base_dir = find_base_from_graph($base_dir, $instr, $graph);
+	}
+
+	($instr, $graph) = adjust_graph_filenames($base_dir, $instr, $graph);
+
+	# Set $object_dir to real location of object files. This may differ
+	# from $da_dir if the graph file is just a link to the "real" object
+	# file location.
+	$object_dir = dirname($bb_filename);
+
+	# Is the data file in a different directory? (this happens e.g. with
+	# the gcov-kernel patch)
+	if ($object_dir ne $da_dir)
+	{
+		# Need to create link to data file in $object_dir
+		system("ln", "-s", $da_filename, 
+		       "$object_dir/$da_basename$data_file_extension")
+			and die ("ERROR: cannot create link $object_dir/".
+				 "$da_basename$data_file_extension!\n");
+		push(@tmp_links,
+		     "$object_dir/$da_basename$data_file_extension");
+		# Need to create link to graph file if basename of link
+		# and file are different (CONFIG_MODVERSION compat)
+		if ((basename($bb_filename) ne $bb_basename) &&
+		    (! -e "$object_dir/$bb_basename")) {
+			symlink($bb_filename, "$object_dir/$bb_basename") or
+				warn("WARNING: cannot create link ".
+				     "$object_dir/$bb_basename\n");
+			push(@tmp_links, "$object_dir/$bb_basename");
+		}
+	}
+
+	# Change to directory containing data files and apply GCOV
+	debug("chdir($base_dir)\n");
+        chdir($base_dir);
+
+	if ($da_renamed)
+	{
+		# Need to rename empty data file to workaround
+	        # gcov <= 3.2.x bug (Abort)
+		system_no_output(3, "mv", "$da_filename", "$da_filename.ori")
+			and die ("ERROR: cannot rename $da_filename\n");
+	}
+
+	# Execute gcov command and suppress standard output
+	$gcov_error = system_no_output(1, $gcov_tool, $da_filename,
+				       "-o", $object_dir, @gcov_options);
+
+	if ($da_renamed)
+	{
+		system_no_output(3, "mv", "$da_filename.ori", "$da_filename")
+			and die ("ERROR: cannot rename $da_filename.ori");
+	}
+
+	# Clean up temporary links
+	foreach (@tmp_links) {
+		unlink($_);
+	}
+
+	if ($gcov_error)
+	{
+		if ($ignore[$ERROR_GCOV])
+		{
+			warn("WARNING: GCOV failed for $da_filename!\n");
+			return;
+		}
+		die("ERROR: GCOV failed for $da_filename!\n");
+	}
+
+	# Collect data from resulting .gcov files and create .info file
+	@gcov_list = get_filenames('.', '\.gcov$');
+
+	# Check for files
+	if (!@gcov_list)
+	{
+		warn("WARNING: gcov did not create any files for ".
+		     "$da_filename!\n");
+	}
+
+	# Check whether we're writing to a single file
+	if ($output_filename)
+	{
+		if ($output_filename eq "-")
+		{
+			*INFO_HANDLE = *STDOUT;
+		}
+		else
+		{
+			# Append to output file
+			open(INFO_HANDLE, ">>", $output_filename)
+				or die("ERROR: cannot write to ".
+				       "$output_filename!\n");
+		}
+	}
+	else
+	{
+		# Open .info file for output
+		open(INFO_HANDLE, ">", "$da_filename.info")
+			or die("ERROR: cannot create $da_filename.info!\n");
+	}
+
+	# Write test name
+	printf(INFO_HANDLE "TN:%s\n", $test_name);
+
+	# Traverse the list of generated .gcov files and combine them into a
+	# single .info file
+	foreach $gcov_file (sort(@gcov_list))
+	{
+		my $i;
+		my $num;
+
+		# Skip gcov file for gcc built-in code
+		next if ($gcov_file eq "<built-in>.gcov");
+
+		($source, $object) = read_gcov_header($gcov_file);
+
+		if (!defined($source)) {
+			# Derive source file name from gcov file name if
+			# header format could not be parsed
+			$source = $gcov_file;
+			$source =~ s/\.gcov$//;
+		}
+
+		$source = solve_relative_path($base_dir, $source);
+
+		if (defined($adjust_src_pattern)) {
+			# Apply transformation as specified by user
+			$source =~ s/$adjust_src_pattern/$adjust_src_replace/g;
+		}
+
+		# gcov will happily create output even if there's no source code
+		# available - this interferes with checksum creation so we need
+		# to pull the emergency brake here.
+		if (! -r $source && $checksum)
+		{
+			if ($ignore[$ERROR_SOURCE])
+			{
+				warn("WARNING: could not read source file ".
+				     "$source\n");
+				next;
+			}
+			die("ERROR: could not read source file $source\n");
+		}
+
+		@matches = match_filename($source, keys(%{$instr}));
+
+		# Skip files that are not mentioned in the graph file
+		if (!@matches)
+		{
+			warn("WARNING: cannot find an entry for ".$gcov_file.
+			     " in $graph_file_extension file, skipping ".
+			     "file!\n");
+			unlink($gcov_file);
+			next;
+		}
+
+		# Read in contents of gcov file
+		@result = read_gcov_file($gcov_file);
+		if (!defined($result[0])) {
+			warn("WARNING: skipping unreadable file ".
+			     $gcov_file."\n");
+			unlink($gcov_file);
+			next;
+		}
+		@gcov_content = @{$result[0]};
+		$gcov_branches = $result[1];
+		@gcov_functions = @{$result[2]};
+
+		# Skip empty files
+		if (!@gcov_content)
+		{
+			warn("WARNING: skipping empty file ".$gcov_file."\n");
+			unlink($gcov_file);
+			next;
+		}
+
+		if (scalar(@matches) == 1)
+		{
+			# Just one match
+			$source_filename = $matches[0];
+		}
+		else
+		{
+			# Try to solve the ambiguity
+			$source_filename = solve_ambiguous_match($gcov_file,
+						\@matches, \@gcov_content);
+		}
+
+		if (@include_patterns)
+		{
+			my $keep = 0;
+
+			foreach my $pattern (@include_patterns)
+			{
+				$keep ||= ($source_filename =~ (/^$pattern$/));
+			}
+
+			if (!$keep)
+			{
+				$excluded_files{$source_filename} = ();
+				unlink($gcov_file);
+				next;
+			}
+		}
+
+		if (@exclude_patterns)
+		{
+			my $exclude = 0;
+
+			foreach my $pattern (@exclude_patterns)
+			{
+				$exclude ||= ($source_filename =~ (/^$pattern$/));
+			}
+
+			if ($exclude)
+			{
+				$excluded_files{$source_filename} = ();
+				unlink($gcov_file);
+				next;
+			}
+		}
+
+		# Skip external files if requested
+		if (!$opt_external) {
+			if (is_external($source_filename)) {
+				info("  ignoring data for external file ".
+				     "$source_filename\n");
+				unlink($gcov_file);
+				next;
+			}
+		}
+
+		# Write absolute path of source file
+		printf(INFO_HANDLE "SF:%s\n", $source_filename);
+
+		# If requested, derive function coverage data from
+		# line coverage data of the first line of a function
+		if ($opt_derive_func_data) {
+			@gcov_functions =
+				derive_data(\@gcov_content, \@gcov_functions,
+					    $graph->{$source_filename});
+		}
+
+		# Write function-related information
+		if (defined($graph->{$source_filename}))
+		{
+			my $fn_data = $graph->{$source_filename};
+			my $fn;
+
+			foreach $fn (sort
+				{$fn_data->{$a}->[0] <=> $fn_data->{$b}->[0]}
+				keys(%{$fn_data})) {
+				my $ln_data = $fn_data->{$fn};
+				my $line = $ln_data->[0];
+
+				# Skip empty function
+				if ($fn eq "") {
+					next;
+				}
+				# Remove excluded functions
+				if (!$no_markers) {
+					my $gfn;
+					my $found = 0;
+
+					foreach $gfn (@gcov_functions) {
+						if ($gfn eq $fn) {
+							$found = 1;
+							last;
+						}
+					}
+					if (!$found) {
+						next;
+					}
+				}
+
+				# Normalize function name
+				$fn = filter_fn_name($fn);
+
+				print(INFO_HANDLE "FN:$line,$fn\n");
+			}
+		}
+
+		#--
+		#-- FNDA: <call-count>, <function-name>
+		#-- FNF: overall count of functions
+		#-- FNH: overall count of functions with non-zero call count
+		#--
+		$funcs_found = 0;
+		$funcs_hit = 0;
+		while (@gcov_functions)
+		{
+			my $count = shift(@gcov_functions);
+			my $fn = shift(@gcov_functions);
+
+			$fn = filter_fn_name($fn);
+			printf(INFO_HANDLE "FNDA:$count,$fn\n");
+			$funcs_found++;
+			$funcs_hit++ if ($count > 0);
+		}
+		if ($funcs_found > 0) {
+			printf(INFO_HANDLE "FNF:%s\n", $funcs_found);
+			printf(INFO_HANDLE "FNH:%s\n", $funcs_hit);
+		}
+
+		# Write coverage information for each instrumented branch:
+		#
+		#   BRDA:<line number>,<block number>,<branch number>,<taken>
+		#
+		# where 'taken' is the number of times the branch was taken
+		# or '-' if the block to which the branch belongs was never
+		# executed
+		$br_found = 0;
+		$br_hit = 0;
+		$num = br_gvec_len($gcov_branches);
+		for ($i = 0; $i < $num; $i++) {
+			my ($line, $block, $branch, $taken) =
+				br_gvec_get($gcov_branches, $i);
+
+			$block = $BR_VEC_MAX if ($block < 0);
+			print(INFO_HANDLE "BRDA:$line,$block,$branch,$taken\n");
+			$br_found++;
+			$br_hit++ if ($taken ne '-' && $taken > 0);
+		}
+		if ($br_found > 0) {
+			printf(INFO_HANDLE "BRF:%s\n", $br_found);
+			printf(INFO_HANDLE "BRH:%s\n", $br_hit);
+		}
+
+		# Reset line counters
+		$line_number = 0;
+		$lines_found = 0;
+		$lines_hit = 0;
+
+		# Write coverage information for each instrumented line
+		# Note: @gcov_content contains a list of (flag, count, source)
+		# tuple for each source code line
+		while (@gcov_content)
+		{
+			$line_number++;
+
+			# Check for instrumented line
+			if ($gcov_content[0])
+			{
+				$lines_found++;
+				printf(INFO_HANDLE "DA:".$line_number.",".
+				       $gcov_content[1].($checksum ?
+				       ",". md5_base64($gcov_content[2]) : "").
+				       "\n");
+
+				# Increase $lines_hit in case of an execution
+				# count>0
+				if ($gcov_content[1] > 0) { $lines_hit++; }
+			}
+
+			# Remove already processed data from array
+			splice(@gcov_content,0,3);
+		}
+
+		# Write line statistics and section separator
+		printf(INFO_HANDLE "LF:%s\n", $lines_found);
+		printf(INFO_HANDLE "LH:%s\n", $lines_hit);
+		print(INFO_HANDLE "end_of_record\n");
+
+		# Remove .gcov file after processing
+		unlink($gcov_file);
+	}
+
+	if (!($output_filename && ($output_filename eq "-")))
+	{
+		close(INFO_HANDLE);
+	}
+
+	# Change back to initial directory
+	chdir($cwd);
+}
+
+
+#
+# solve_relative_path(path, dir)
+#
+# Solve relative path components of DIR which, if not absolute, resides in PATH.
+#
+
+sub solve_relative_path($$)
+{
+	my $path = $_[0];
+	my $dir = $_[1];
+	my $volume;
+	my $directories;
+	my $filename;
+	my @dirs;			# holds path elements
+	my $result;
+
+	# Convert from Windows path to msys path
+	if( $^O eq "msys" )
+	{
+		# search for a windows drive letter at the beginning
+		($volume, $directories, $filename) = File::Spec::Win32->splitpath( $dir );
+		if( $volume ne '' )
+		{
+			my $uppercase_volume;
+			# transform c/d\../e/f\g to Windows style c\d\..\e\f\g
+			$dir = File::Spec::Win32->canonpath( $dir );
+			# use Win32 module to retrieve path components
+			# $uppercase_volume is not used any further
+			( $uppercase_volume, $directories, $filename ) = File::Spec::Win32->splitpath( $dir );
+			@dirs = File::Spec::Win32->splitdir( $directories );
+			
+			# prepend volume, since in msys C: is always mounted to /c
+			$volume =~ s|^([a-zA-Z]+):|/\L$1\E|;
+			unshift( @dirs, $volume );
+			
+			# transform to Unix style '/' path
+			$directories = File::Spec->catdir( @dirs );
+			$dir = File::Spec->catpath( '', $directories, $filename );
+		} else {
+			# eliminate '\' path separators
+			$dir = File::Spec->canonpath( $dir );
+		}
+	}
+
+	$result = $dir;
+	# Prepend path if not absolute
+	if ($dir =~ /^[^\/]/)
+	{
+		$result = "$path/$result";
+	}
+
+	# Remove //
+	$result =~ s/\/\//\//g;
+
+	# Remove .
+	while ($result =~ s/\/\.\//\//g)
+	{
+	}
+	$result =~ s/\/\.$/\//g;
+
+	# Remove trailing /
+	$result =~ s/\/$//g;
+
+	# Solve ..
+	while ($result =~ s/\/[^\/]+\/\.\.\//\//)
+	{
+	}
+
+	# Remove preceding ..
+	$result =~ s/^\/\.\.\//\//g;
+
+	return $result;
+}
+
+
+#
+# match_filename(gcov_filename, list)
+#
+# Return a list of those entries of LIST which match the relative filename
+# GCOV_FILENAME.
+#
+
+sub match_filename($@)
+{
+	my ($filename, @list) = @_;
+	my ($vol, $dir, $file) = splitpath($filename);
+	my @comp = splitdir($dir);
+	my $comps = scalar(@comp);
+	my $entry;
+	my @result;
+
+entry:
+	foreach $entry (@list) {
+		my ($evol, $edir, $efile) = splitpath($entry);
+		my @ecomp;
+		my $ecomps;
+		my $i;
+
+		# Filename component must match
+		if ($efile ne $file) {
+			next;
+		}
+		# Check directory components last to first for match
+		@ecomp = splitdir($edir);
+		$ecomps = scalar(@ecomp);
+		if ($ecomps < $comps) {
+			next;
+		}
+		for ($i = 0; $i < $comps; $i++) {
+			if ($comp[$comps - $i - 1] ne
+			    $ecomp[$ecomps - $i - 1]) {
+				next entry;
+			}
+		}
+		push(@result, $entry),
+	}
+
+	return @result;
+}
+
+#
+# solve_ambiguous_match(rel_filename, matches_ref, gcov_content_ref)
+#
+# Try to solve ambiguous matches of mapping (gcov file) -> (source code) file
+# by comparing source code provided in the GCOV file with that of the files
+# in MATCHES. REL_FILENAME identifies the relative filename of the gcov
+# file.
+# 
+# Return the one real match or die if there is none.
+#
+
+sub solve_ambiguous_match($$$)
+{
+	my $rel_name = $_[0];
+	my $matches = $_[1];
+	my $content = $_[2];
+	my $filename;
+	my $index;
+	my $no_match;
+	local *SOURCE;
+
+	# Check the list of matches
+	foreach $filename (@$matches)
+	{
+
+		# Compare file contents
+		open(SOURCE, "<", $filename)
+			or die("ERROR: cannot read $filename!\n");
+
+		$no_match = 0;
+		for ($index = 2; <SOURCE>; $index += 3)
+		{
+			chomp;
+
+			# Also remove CR from line-end
+			s/\015$//;
+
+			if ($_ ne @$content[$index])
+			{
+				$no_match = 1;
+				last;
+			}
+		}
+
+		close(SOURCE);
+
+		if (!$no_match)
+		{
+			info("Solved source file ambiguity for $rel_name\n");
+			return $filename;
+		}
+	}
+
+	die("ERROR: could not match gcov data for $rel_name!\n");
+}
+
+
+#
+# split_filename(filename)
+#
+# Return (path, filename, extension) for a given FILENAME.
+#
+
+sub split_filename($)
+{
+	my @path_components = split('/', $_[0]);
+	my @file_components = split('\.', pop(@path_components));
+	my $extension = pop(@file_components);
+
+	return (join("/",@path_components), join(".",@file_components),
+		$extension);
+}
+
+
+#
+# read_gcov_header(gcov_filename)
+#
+# Parse file GCOV_FILENAME and return a list containing the following
+# information:
+#
+#   (source, object)
+#
+# where:
+#
+# source: complete relative path of the source code file (gcc >= 3.3 only)
+# object: name of associated graph file
+#
+# Die on error.
+#
+
+sub read_gcov_header($)
+{
+	my $source;
+	my $object;
+	local *INPUT;
+
+	if (!open(INPUT, "<", $_[0]))
+	{
+		if ($ignore_errors[$ERROR_GCOV])
+		{
+			warn("WARNING: cannot read $_[0]!\n");
+			return (undef,undef);
+		}
+		die("ERROR: cannot read $_[0]!\n");
+	}
+
+	while (<INPUT>)
+	{
+		chomp($_);
+
+		# Also remove CR from line-end
+		s/\015$//;
+
+		if (/^\s+-:\s+0:Source:(.*)$/)
+		{
+			# Source: header entry
+			$source = $1;
+		}
+		elsif (/^\s+-:\s+0:Object:(.*)$/)
+		{
+			# Object: header entry
+			$object = $1;
+		}
+		else
+		{
+			last;
+		}
+	}
+
+	close(INPUT);
+
+	return ($source, $object);
+}
+
+
+#
+# br_gvec_len(vector)
+#
+# Return the number of entries in the branch coverage vector.
+#
+
+sub br_gvec_len($)
+{
+	my ($vec) = @_;
+
+	return 0 if (!defined($vec));
+	return (length($vec) * 8 / $BR_VEC_WIDTH) / $BR_VEC_ENTRIES;
+}
+
+
+#
+# br_gvec_get(vector, number)
+#
+# Return an entry from the branch coverage vector.
+#
+
+sub br_gvec_get($$)
+{
+	my ($vec, $num) = @_;
+	my $line;
+	my $block;
+	my $branch;
+	my $taken;
+	my $offset = $num * $BR_VEC_ENTRIES;
+
+	# Retrieve data from vector
+	$line	= vec($vec, $offset + $BR_LINE, $BR_VEC_WIDTH);
+	$block	= vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH);
+	$block = -1 if ($block == $BR_VEC_MAX);
+	$branch = vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH);
+	$taken	= vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH);
+
+	# Decode taken value from an integer
+	if ($taken == 0) {
+		$taken = "-";
+	} else {
+		$taken--;
+	}
+
+	return ($line, $block, $branch, $taken);
+}
+
+
+#
+# br_gvec_push(vector, line, block, branch, taken)
+#
+# Add an entry to the branch coverage vector.
+#
+
+sub br_gvec_push($$$$$)
+{
+	my ($vec, $line, $block, $branch, $taken) = @_;
+	my $offset;
+
+	$vec = "" if (!defined($vec));
+	$offset = br_gvec_len($vec) * $BR_VEC_ENTRIES;
+	$block = $BR_VEC_MAX if $block < 0;
+
+	# Encode taken value into an integer
+	if ($taken eq "-") {
+		$taken = 0;
+	} else {
+		$taken++;
+	}
+
+	# Add to vector
+	vec($vec, $offset + $BR_LINE, $BR_VEC_WIDTH) = $line;
+	vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH) = $block;
+	vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH) = $branch;
+	vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH) = $taken;
+
+	return $vec;
+}
+
+
+#
+# read_gcov_file(gcov_filename)
+#
+# Parse file GCOV_FILENAME (.gcov file format) and return the list:
+# (reference to gcov_content, reference to gcov_branch, reference to gcov_func)
+#
+# gcov_content is a list of 3 elements
+# (flag, count, source) for each source code line:
+#
+# $result[($line_number-1)*3+0] = instrumentation flag for line $line_number
+# $result[($line_number-1)*3+1] = execution count for line $line_number
+# $result[($line_number-1)*3+2] = source code text for line $line_number
+#
+# gcov_branch is a vector of 4 4-byte long elements for each branch:
+# line number, block number, branch number, count + 1 or 0
+#
+# gcov_func is a list of 2 elements
+# (number of calls, function name) for each function
+#
+# Die on error.
+#
+
+sub read_gcov_file($)
+{
+	my $filename = $_[0];
+	my @result = ();
+	my $branches = "";
+	my @functions = ();
+	my $number;
+	my $exclude_flag = 0;
+	my $exclude_line = 0;
+	my $exclude_br_flag = 0;
+	my $exclude_branch = 0;
+	my $last_block = $UNNAMED_BLOCK;
+	my $last_line = 0;
+	local *INPUT;
+
+	if (!open(INPUT, "<", $filename)) {
+		if ($ignore_errors[$ERROR_GCOV])
+		{
+			warn("WARNING: cannot read $filename!\n");
+			return (undef, undef, undef);
+		}
+		die("ERROR: cannot read $filename!\n");
+	}
+
+	if ($gcov_version < $GCOV_VERSION_3_3_0)
+	{
+		# Expect gcov format as used in gcc < 3.3
+		while (<INPUT>)
+		{
+			chomp($_);
+
+			# Also remove CR from line-end
+			s/\015$//;
+
+			if (/^branch\s+(\d+)\s+taken\s+=\s+(\d+)/) {
+				next if (!$br_coverage);
+				next if ($exclude_line);
+				next if ($exclude_branch);
+				$branches = br_gvec_push($branches, $last_line,
+						$last_block, $1, $2);
+			} elsif (/^branch\s+(\d+)\s+never\s+executed/) {
+				next if (!$br_coverage);
+				next if ($exclude_line);
+				next if ($exclude_branch);
+				$branches = br_gvec_push($branches, $last_line,
+						$last_block, $1, '-');
+			}
+			elsif (/^call/ || /^function/)
+			{
+				# Function call return data
+			}
+			else
+			{
+				$last_line++;
+				# Check for exclusion markers
+				if (!$no_markers) {
+					if (/$EXCL_STOP/) {
+						$exclude_flag = 0;
+					} elsif (/$EXCL_START/) {
+						$exclude_flag = 1;
+					}
+					if (/$excl_line/ || $exclude_flag) {
+						$exclude_line = 1;
+					} else {
+						$exclude_line = 0;
+					}
+				}
+				# Check for exclusion markers (branch exclude)
+				if (!$no_markers) {
+					if (/$EXCL_BR_STOP/) {
+						$exclude_br_flag = 0;
+					} elsif (/$EXCL_BR_START/) {
+						$exclude_br_flag = 1;
+					}
+					if (/$excl_br_line/ || $exclude_br_flag) {
+						$exclude_branch = 1;
+					} else {
+						$exclude_branch = 0;
+					}
+				}
+				# Source code execution data
+				if (/^\t\t(.*)$/)
+				{
+					# Uninstrumented line
+					push(@result, 0);
+					push(@result, 0);
+					push(@result, $1);
+					next;
+				}
+				$number = (split(" ",substr($_, 0, 16)))[0];
+
+				# Check for zero count which is indicated
+				# by ######
+				if ($number eq "######") { $number = 0;	}
+
+				if ($exclude_line) {
+					# Register uninstrumented line instead
+					push(@result, 0);
+					push(@result, 0);
+				} else {
+					push(@result, 1);
+					push(@result, $number);
+				}
+				push(@result, substr($_, 16));
+			}
+		}
+	}
+	else
+	{
+		# Expect gcov format as used in gcc >= 3.3
+		while (<INPUT>)
+		{
+			chomp($_);
+
+			# Also remove CR from line-end
+			s/\015$//;
+
+			if (/^\s*(\d+|\$+|\%+):\s*(\d+)-block\s+(\d+)\s*$/) {
+				# Block information - used to group related
+				# branches
+				$last_line = $2;
+				$last_block = $3;
+			} elsif (/^branch\s+(\d+)\s+taken\s+(\d+)/) {
+				next if (!$br_coverage);
+				next if ($exclude_line);
+				next if ($exclude_branch);
+				$branches = br_gvec_push($branches, $last_line,
+						$last_block, $1, $2);
+			} elsif (/^branch\s+(\d+)\s+never\s+executed/) {
+				next if (!$br_coverage);
+				next if ($exclude_line);
+				next if ($exclude_branch);
+				$branches = br_gvec_push($branches, $last_line,
+						$last_block, $1, '-');
+			}
+			elsif (/^function\s+(.+)\s+called\s+(\d+)\s+/)
+			{
+				next if (!$func_coverage);
+				if ($exclude_line) {
+					next;
+				}
+				push(@functions, $2, $1);
+			}
+			elsif (/^call/)
+			{
+				# Function call return data
+			}
+			elsif (/^\s*([^:]+):\s*([^:]+):(.*)$/)
+			{
+				my ($count, $line, $code) = ($1, $2, $3);
+
+				# Skip instance-specific counts
+				next if ($line <= (scalar(@result) / 3));
+
+				$last_line = $line;
+				$last_block = $UNNAMED_BLOCK;
+				# Check for exclusion markers
+				if (!$no_markers) {
+					if (/$EXCL_STOP/) {
+						$exclude_flag = 0;
+					} elsif (/$EXCL_START/) {
+						$exclude_flag = 1;
+					}
+					if (/$excl_line/ || $exclude_flag) {
+						$exclude_line = 1;
+					} else {
+						$exclude_line = 0;
+					}
+				}
+				# Check for exclusion markers (branch exclude)
+				if (!$no_markers) {
+					if (/$EXCL_BR_STOP/) {
+						$exclude_br_flag = 0;
+					} elsif (/$EXCL_BR_START/) {
+						$exclude_br_flag = 1;
+					}
+					if (/$excl_br_line/ || $exclude_br_flag) {
+						$exclude_branch = 1;
+					} else {
+						$exclude_branch = 0;
+					}
+				}
+
+				# Strip unexecuted basic block marker
+				$count =~ s/\*$//;
+
+				# <exec count>:<line number>:<source code>
+				if ($line eq "0")
+				{
+					# Extra data
+				}
+				elsif ($count eq "-")
+				{
+					# Uninstrumented line
+					push(@result, 0);
+					push(@result, 0);
+					push(@result, $code);
+				}
+				else
+				{
+					if ($exclude_line) {
+						push(@result, 0);
+						push(@result, 0);
+					} else {
+						# Check for zero count
+						if ($count =~ /^[#=]/) {
+							$count = 0;
+						}
+						push(@result, 1);
+						push(@result, $count);
+					}
+					push(@result, $code);
+				}
+			}
+		}
+	}
+
+	close(INPUT);
+	if ($exclude_flag || $exclude_br_flag) {
+		warn("WARNING: unterminated exclusion section in $filename\n");
+	}
+	return(\@result, $branches, \@functions);
+}
+
+
+# Map LLVM versions to the version of GCC gcov which they emulate.
+
+sub map_llvm_version($)
+{
+	my ($ver) = @_;
+
+	return 0x040200 if ($ver >= 0x030400);
+
+	warn("WARNING: This version of LLVM's gcov is unknown.  ".
+	     "Assuming it emulates GCC gcov version 4.2.\n");
+
+	return 0x040200;
+}
+
+
+# Return a readable version of encoded gcov version.
+
+sub version_to_str($)
+{
+	my ($ver) = @_;
+	my ($a, $b, $c);
+
+	$a = $ver >> 16 & 0xff;
+	$b = $ver >> 8 & 0xff;
+	$c = $ver & 0xff;
+
+	return "$a.$b.$c";
+}
+
+
+#
+# Get the GCOV tool version. Return an integer number which represents the
+# GCOV version. Version numbers can be compared using standard integer
+# operations.
+#
+
+sub get_gcov_version()
+{
+	local *HANDLE;
+	my $version_string;
+	my $result;
+	my ($a, $b, $c) = (4, 2, 0);	# Fallback version
+
+	# Examples for gcov version output:
+	#
+	# gcov (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+	#
+	# gcov (crosstool-NG 1.18.0) 4.7.2
+	#
+	# LLVM (http://llvm.org/):
+	#   LLVM version 3.4svn
+	#
+	# Apple LLVM version 8.0.0 (clang-800.0.38)
+	#       Optimized build.
+	#       Default target: x86_64-apple-darwin16.0.0
+	#       Host CPU: haswell
+
+	open(GCOV_PIPE, "-|", "$gcov_tool --version")
+		or die("ERROR: cannot retrieve gcov version!\n");
+	local $/;
+	$version_string = <GCOV_PIPE>;
+	close(GCOV_PIPE);
+
+	# Remove all bracketed information
+	$version_string =~ s/\([^\)]*\)//g;
+
+	if ($version_string =~ /(\d+)\.(\d+)(\.(\d+))?/) {
+		($a, $b, $c) = ($1, $2, $4);
+		$c = 0 if (!defined($c));
+	} else {
+		warn("WARNING: cannot determine gcov version - ".
+		     "assuming $a.$b.$c\n");
+	}
+	$result = $a << 16 | $b << 8 | $c;
+
+	if ($version_string =~ /LLVM/) {
+		$result = map_llvm_version($result);
+		info("Found LLVM gcov version $a.$b.$c, which emulates gcov ".
+		     "version ".version_to_str($result)."\n");
+	} else {
+		info("Found gcov version: ".version_to_str($result)."\n");
+	}
+
+	return ($result, $version_string);
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+	if (!$quiet)
+	{
+		# Print info string
+		if (defined($output_filename) && ($output_filename eq "-"))
+		{
+			# Don't interfere with the .info output to STDOUT
+			printf(STDERR @_);
+		}
+		else
+		{
+			printf(@_);
+		}
+	}
+}
+
+
+#
+# int_handler()
+#
+# Called when the script was interrupted by an INT signal (e.g. CTRl-C)
+#
+
+sub int_handler()
+{
+	if ($cwd) { chdir($cwd); }
+	info("Aborted.\n");
+	exit(1);
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+#   MODE & 1: suppress STDOUT
+#   MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+	my $mode = shift;
+	my $result;
+	local *OLD_STDERR;
+	local *OLD_STDOUT;
+
+	# Save old stdout and stderr handles
+	($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT");
+	($mode & 2) && open(OLD_STDERR, ">>&", "STDERR");
+
+	# Redirect to /dev/null
+	($mode & 1) && open(STDOUT, ">", "/dev/null");
+	($mode & 2) && open(STDERR, ">", "/dev/null");
+ 
+	debug("system(".join(' ', @_).")\n");
+	system(@_);
+	$result = $?;
+
+	# Close redirected handles
+	($mode & 1) && close(STDOUT);
+	($mode & 2) && close(STDERR);
+
+	# Restore old handles
+	($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT");
+	($mode & 2) && open(STDERR, ">>&", "OLD_STDERR");
+ 
+	return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+	my $filename = $_[0];
+	my %result;
+	my $key;
+	my $value;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<", $filename))
+	{
+		warn("WARNING: cannot read configuration file $filename\n");
+		return undef;
+	}
+	while (<HANDLE>)
+	{
+		chomp;
+		# Skip comments
+		s/#.*//;
+		# Remove leading blanks
+		s/^\s+//;
+		# Remove trailing blanks
+		s/\s+$//;
+		next unless length;
+		($key, $value) = split(/\s*=\s*/, $_, 2);
+		if (defined($key) && defined($value))
+		{
+			$result{$key} = $value;
+		}
+		else
+		{
+			warn("WARNING: malformed statement in line $. ".
+			     "of configuration file $filename\n");
+		}
+	}
+	close(HANDLE);
+	return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+#   key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hashes CONFIG or OPT_RC contain a value
+# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. 
+#
+
+sub apply_config($)
+{
+	my $ref = $_[0];
+
+	foreach (keys(%{$ref}))
+	{
+		if (defined($opt_rc{$_})) {
+			${$ref->{$_}} = $opt_rc{$_};
+		} elsif (defined($config->{$_})) {
+			${$ref->{$_}} = $config->{$_};
+		}
+	}
+}
+
+
+#
+# get_exclusion_data(filename)
+#
+# Scan specified source code file for exclusion markers and return
+#   linenumber -> 1
+# for all lines which should be excluded.
+#
+
+sub get_exclusion_data($)
+{
+	my ($filename) = @_;
+	my %list;
+	my $flag = 0;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<", $filename)) {
+		warn("WARNING: could not open $filename\n");
+		return undef;
+	}
+	while (<HANDLE>) {
+		if (/$EXCL_STOP/) {
+			$flag = 0;
+		} elsif (/$EXCL_START/) {
+			$flag = 1;
+		}
+		if (/$excl_line/ || $flag) {
+			$list{$.} = 1;
+		}
+	}
+	close(HANDLE);
+
+	if ($flag) {
+		warn("WARNING: unterminated exclusion section in $filename\n");
+	}
+
+	return \%list;
+}
+
+
+#
+# apply_exclusion_data(instr, graph)
+#
+# Remove lines from instr and graph data structures which are marked
+# for exclusion in the source code file.
+#
+# Return adjusted (instr, graph).
+#
+# graph         : file name -> function data
+# function data : function name -> line data
+# line data     : [ line1, line2, ... ]
+#
+# instr     : filename -> line data
+# line data : [ line1, line2, ... ]
+#
+
+sub apply_exclusion_data($$)
+{
+	my ($instr, $graph) = @_;
+	my $filename;
+	my %excl_data;
+	my $excl_read_failed = 0;
+
+	# Collect exclusion marker data
+	foreach $filename (sort_uniq_lex(keys(%{$graph}), keys(%{$instr}))) {
+		my $excl = get_exclusion_data($filename);
+
+		# Skip and note if file could not be read
+		if (!defined($excl)) {
+			$excl_read_failed = 1;
+			next;
+		}
+
+		# Add to collection if there are markers
+		$excl_data{$filename} = $excl if (keys(%{$excl}) > 0);
+	}
+
+	# Warn if not all source files could be read
+	if ($excl_read_failed) {
+		warn("WARNING: some exclusion markers may be ignored\n");
+	}
+
+	# Skip if no markers were found
+	return ($instr, $graph) if (keys(%excl_data) == 0);
+
+	# Apply exclusion marker data to graph
+	foreach $filename (keys(%excl_data)) {
+		my $function_data = $graph->{$filename};
+		my $excl = $excl_data{$filename};
+		my $function;
+
+		next if (!defined($function_data));
+
+		foreach $function (keys(%{$function_data})) {
+			my $line_data = $function_data->{$function};
+			my $line;
+			my @new_data;
+
+			# To be consistent with exclusion parser in non-initial
+			# case we need to remove a function if the first line
+			# was excluded
+			if ($excl->{$line_data->[0]}) {
+				delete($function_data->{$function});
+				next;
+			}
+			# Copy only lines which are not excluded
+			foreach $line (@{$line_data}) {
+				push(@new_data, $line) if (!$excl->{$line});
+			}
+
+			# Store modified list
+			if (scalar(@new_data) > 0) {
+				$function_data->{$function} = \@new_data;
+			} else {
+				# All of this function was excluded
+				delete($function_data->{$function});
+			}
+		}
+
+		# Check if all functions of this file were excluded
+		if (keys(%{$function_data}) == 0) {
+			delete($graph->{$filename});
+		}
+	}
+
+	# Apply exclusion marker data to instr
+	foreach $filename (keys(%excl_data)) {
+		my $line_data = $instr->{$filename};
+		my $excl = $excl_data{$filename};
+		my $line;
+		my @new_data;
+
+		next if (!defined($line_data));
+
+		# Copy only lines which are not excluded
+		foreach $line (@{$line_data}) {
+			push(@new_data, $line) if (!$excl->{$line});
+		}
+
+		# Store modified list
+		$instr->{$filename} = \@new_data;
+	}
+
+	return ($instr, $graph);
+}
+
+
+sub process_graphfile($$)
+{
+	my ($file, $dir) = @_;
+	my $graph_filename = $file;
+	my $graph_dir;
+	my $graph_basename;
+	my $source_dir;
+	my $base_dir;
+	my $graph;
+	my $instr;
+	my $filename;
+	local *INFO_HANDLE;
+
+	info("Processing %s\n", abs2rel($file, $dir));
+
+	# Get path to data file in absolute and normalized form (begins with /,
+	# contains no more ../ or ./)
+	$graph_filename = solve_relative_path($cwd, $graph_filename);
+
+	# Get directory and basename of data file
+	($graph_dir, $graph_basename) = split_filename($graph_filename);
+
+	$source_dir = $graph_dir;
+	if (is_compat($COMPAT_MODE_LIBTOOL)) {
+		# Avoid files from .libs dirs 	 
+		$source_dir =~ s/\.libs$//;
+	}
+
+	# Construct base_dir for current file
+	if ($base_directory)
+	{
+		$base_dir = $base_directory;
+	}
+	else
+	{
+		$base_dir = $source_dir;
+	}
+
+	# Ignore empty graph file (e.g. source file with no statement)
+	if (-z $graph_filename)
+	{
+		warn("WARNING: empty $graph_filename (skipped)\n");
+		return;
+	}
+
+	if ($gcov_version < $GCOV_VERSION_3_4_0)
+	{
+		if (is_compat($COMPAT_MODE_HAMMER))
+		{
+			($instr, $graph) = read_bbg($graph_filename);
+		}
+		else
+		{
+			($instr, $graph) = read_bb($graph_filename);
+		}
+	} 
+	else
+	{
+		($instr, $graph) = read_gcno($graph_filename);
+	}
+
+	# Try to find base directory automatically if requested by user
+	if ($rc_auto_base) {
+		$base_dir = find_base_from_graph($base_dir, $instr, $graph);
+	}
+
+	($instr, $graph) = adjust_graph_filenames($base_dir, $instr, $graph);
+
+	if (!$no_markers) {
+		# Apply exclusion marker data to graph file data
+		($instr, $graph) = apply_exclusion_data($instr, $graph);
+	}
+
+	# Check whether we're writing to a single file
+	if ($output_filename)
+	{
+		if ($output_filename eq "-")
+		{
+			*INFO_HANDLE = *STDOUT;
+		}
+		else
+		{
+			# Append to output file
+			open(INFO_HANDLE, ">>", $output_filename)
+				or die("ERROR: cannot write to ".
+				       "$output_filename!\n");
+		}
+	}
+	else
+	{
+		# Open .info file for output
+		open(INFO_HANDLE, ">", "$graph_filename.info")
+			or die("ERROR: cannot create $graph_filename.info!\n");
+	}
+
+	# Write test name
+	printf(INFO_HANDLE "TN:%s\n", $test_name);
+	foreach $filename (sort(keys(%{$instr})))
+	{
+		my $funcdata = $graph->{$filename};
+		my $line;
+		my $linedata;
+
+		# Skip external files if requested
+		if (!$opt_external) {
+			if (is_external($filename)) {
+				info("  ignoring data for external file ".
+				     "$filename\n");
+				next;
+			}
+		}
+
+		print(INFO_HANDLE "SF:$filename\n");
+
+		if (defined($funcdata) && $func_coverage) {
+			my @functions = sort {$funcdata->{$a}->[0] <=>
+					      $funcdata->{$b}->[0]}
+					     keys(%{$funcdata});
+			my $func;
+
+			# Gather list of instrumented lines and functions
+			foreach $func (@functions) {
+				$linedata = $funcdata->{$func};
+
+				# Print function name and starting line
+				print(INFO_HANDLE "FN:".$linedata->[0].
+				      ",".filter_fn_name($func)."\n");
+			}
+			# Print zero function coverage data
+			foreach $func (@functions) {
+				print(INFO_HANDLE "FNDA:0,".
+				      filter_fn_name($func)."\n");
+			}
+			# Print function summary
+			print(INFO_HANDLE "FNF:".scalar(@functions)."\n");
+			print(INFO_HANDLE "FNH:0\n");
+		}
+		# Print zero line coverage data
+		foreach $line (@{$instr->{$filename}}) {
+			print(INFO_HANDLE "DA:$line,0\n");
+		}
+		# Print line summary
+		print(INFO_HANDLE "LF:".scalar(@{$instr->{$filename}})."\n");
+		print(INFO_HANDLE "LH:0\n");
+
+		print(INFO_HANDLE "end_of_record\n");
+	}
+	if (!($output_filename && ($output_filename eq "-")))
+	{
+		close(INFO_HANDLE);
+	}
+}
+
+sub filter_fn_name($)
+{
+	my ($fn) = @_;
+
+	# Remove characters used internally as function name delimiters
+	$fn =~ s/[,=]/_/g;
+
+	return $fn;
+}
+
+sub warn_handler($)
+{
+	my ($msg) = @_;
+
+	warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+	my ($msg) = @_;
+
+	die("$tool_name: $msg");
+}
+
+
+#
+# graph_error(filename, message)
+#
+# Print message about error in graph file. If ignore_graph_error is set, return.
+# Otherwise abort.
+#
+
+sub graph_error($$)
+{
+	my ($filename, $msg) = @_;
+
+	if ($ignore[$ERROR_GRAPH]) {
+		warn("WARNING: $filename: $msg - skipping\n");
+		return;
+	}
+	die("ERROR: $filename: $msg\n");
+}
+
+#
+# graph_expect(description)
+#
+# If debug is set to a non-zero value, print the specified description of what
+# is expected to be read next from the graph file.
+#
+
+sub graph_expect($)
+{
+	my ($msg) = @_;
+
+	if (!$debug || !defined($msg)) {
+		return;
+	}
+
+	print(STDERR "DEBUG: expecting $msg\n");
+}
+
+#
+# graph_read(handle, bytes[, description, peek])
+#
+# Read and return the specified number of bytes from handle. Return undef
+# if the number of bytes could not be read. If PEEK is non-zero, reset
+# file position after read.
+#
+
+sub graph_read(*$;$$)
+{
+	my ($handle, $length, $desc, $peek) = @_;
+	my $data;
+	my $result;
+	my $pos;
+
+	graph_expect($desc);
+	if ($peek) {
+		$pos = tell($handle);
+		if ($pos == -1) {
+			warn("Could not get current file position: $!\n");
+			return undef;
+		}
+	}
+	$result = read($handle, $data, $length);
+	if ($debug) {
+		my $op = $peek ? "peek" : "read";
+		my $ascii = "";
+		my $hex = "";
+		my $i;
+
+		print(STDERR "DEBUG: $op($length)=$result: ");
+		for ($i = 0; $i < length($data); $i++) {
+			my $c = substr($data, $i, 1);;
+			my $n = ord($c);
+
+			$hex .= sprintf("%02x ", $n);
+			if ($n >= 32 && $n <= 127) {
+				$ascii .= $c;
+			} else {
+				$ascii .= ".";
+			}
+		}
+		print(STDERR "$hex |$ascii|");
+		print(STDERR "\n");
+	}
+	if ($peek) {
+		if (!seek($handle, $pos, 0)) {
+			warn("Could not set file position: $!\n");
+			return undef;
+		}
+	}
+	if ($result != $length) {
+		return undef;
+	}
+	return $data;
+}
+
+#
+# graph_skip(handle, bytes[, description])
+#
+# Read and discard the specified number of bytes from handle. Return non-zero
+# if bytes could be read, zero otherwise.
+#
+
+sub graph_skip(*$;$)
+{
+	my ($handle, $length, $desc) = @_;
+
+	if (defined(graph_read($handle, $length, $desc))) {
+		return 1;
+	}
+	return 0;
+}
+
+#
+# uniq(list)
+#
+# Return list without duplicate entries.
+#
+
+sub uniq(@)
+{
+	my (@list) = @_;
+	my @new_list;
+	my %known;
+
+	foreach my $item (@list) {
+		next if ($known{$item});
+		$known{$item} = 1;
+		push(@new_list, $item);
+	}
+
+	return @new_list;
+}
+
+#
+# sort_uniq(list)
+#
+# Return list in numerically ascending order and without duplicate entries.
+#
+
+sub sort_uniq(@)
+{
+	my (@list) = @_;
+	my %hash;
+
+	foreach (@list) {
+		$hash{$_} = 1;
+	}
+	return sort { $a <=> $b } keys(%hash);
+}
+
+#
+# sort_uniq_lex(list)
+#
+# Return list in lexically ascending order and without duplicate entries.
+#
+
+sub sort_uniq_lex(@)
+{
+	my (@list) = @_;
+	my %hash;
+
+	foreach (@list) {
+		$hash{$_} = 1;
+	}
+	return sort keys(%hash);
+}
+
+#
+# parent_dir(dir)
+#
+# Return parent directory for DIR. DIR must not contain relative path
+# components.
+#
+
+sub parent_dir($)
+{
+	my ($dir) = @_;
+	my ($v, $d, $f) = splitpath($dir, 1);
+	my @dirs = splitdir($d);
+
+	pop(@dirs);
+
+	return catpath($v, catdir(@dirs), $f);
+}
+
+#
+# find_base_from_graph(base_dir, instr, graph)
+#
+# Try to determine the base directory of the graph file specified by INSTR
+# and GRAPH. The base directory is the base for all relative filenames in
+# the graph file. It is defined by the current working directory at time
+# of compiling the source file.
+#
+# This function implements a heuristic which relies on the following
+# assumptions:
+# - all files used for compilation are still present at their location
+# - the base directory is either BASE_DIR or one of its parent directories
+# - files by the same name are not present in multiple parent directories
+#
+
+sub find_base_from_graph($$$)
+{
+	my ($base_dir, $instr, $graph) = @_;
+	my $old_base;
+	my $best_miss;
+	my $best_base;
+	my %rel_files;
+
+	# Determine list of relative paths
+	foreach my $filename (keys(%{$instr}), keys(%{$graph})) {
+		next if (file_name_is_absolute($filename));
+
+		$rel_files{$filename} = 1;
+	}
+
+	# Early exit if there are no relative paths
+	return $base_dir if (!%rel_files);
+
+	do {
+		my $miss = 0;
+
+		foreach my $filename (keys(%rel_files)) {
+			if (!-e solve_relative_path($base_dir, $filename)) {
+				$miss++;
+			}
+		}
+
+		debug("base_dir=$base_dir miss=$miss\n");
+
+		# Exit if we find an exact match with no misses
+		return $base_dir if ($miss == 0);
+
+		# No exact match, aim for the one with the least source file
+		# misses
+		if (!defined($best_base) || $miss < $best_miss) {
+			$best_base = $base_dir;
+			$best_miss = $miss;
+		}
+
+		# Repeat until there's no more parent directory
+		$old_base = $base_dir;
+		$base_dir = parent_dir($base_dir);
+	} while ($old_base ne $base_dir);
+
+	return $best_base;
+}
+
+#
+# adjust_graph_filenames(base_dir, instr, graph)
+#
+# Make relative paths in INSTR and GRAPH absolute and apply
+# geninfo_adjust_src_path setting to graph file data.
+#
+
+sub adjust_graph_filenames($$$)
+{
+	my ($base_dir, $instr, $graph) = @_;
+
+	foreach my $filename (keys(%{$instr})) {
+		my $old_filename = $filename;
+
+		# Convert to absolute canonical form
+		$filename = solve_relative_path($base_dir, $filename);
+
+		# Apply adjustment
+		if (defined($adjust_src_pattern)) {
+			$filename =~ s/$adjust_src_pattern/$adjust_src_replace/g;
+		}
+
+		if ($filename ne $old_filename) {
+			$instr->{$filename} = delete($instr->{$old_filename});
+		}
+	}
+
+	foreach my $filename (keys(%{$graph})) {
+		my $old_filename = $filename;
+
+		# Make absolute
+		# Convert to absolute canonical form
+		$filename = solve_relative_path($base_dir, $filename);
+
+		# Apply adjustment
+		if (defined($adjust_src_pattern)) {
+			$filename =~ s/$adjust_src_pattern/$adjust_src_replace/g;
+		}
+
+		if ($filename ne $old_filename) {
+			$graph->{$filename} = delete($graph->{$old_filename});
+		}
+	}
+
+	return ($instr, $graph);
+}
+
+#
+# graph_cleanup(graph)
+#
+# Remove entries for functions with no lines. Remove duplicate line numbers.
+# Sort list of line numbers numerically ascending.
+#
+
+sub graph_cleanup($)
+{
+	my ($graph) = @_;
+	my $filename;
+
+	foreach $filename (keys(%{$graph})) {
+		my $per_file = $graph->{$filename};
+		my $function;
+
+		foreach $function (keys(%{$per_file})) {
+			my $lines = $per_file->{$function};
+
+			if (scalar(@$lines) == 0) {
+				# Remove empty function
+				delete($per_file->{$function});
+				next;
+			}
+			# Normalize list
+			$per_file->{$function} = [ uniq(@$lines) ];
+		}
+		if (scalar(keys(%{$per_file})) == 0) {
+			# Remove empty file
+			delete($graph->{$filename});
+		}
+	}
+}
+
+#
+# graph_find_base(bb)
+#
+# Try to identify the filename which is the base source file for the
+# specified bb data.
+#
+
+sub graph_find_base($)
+{
+	my ($bb) = @_;
+	my %file_count;
+	my $basefile;
+	my $file;
+	my $func;
+	my $filedata;
+	my $count;
+	my $num;
+
+	# Identify base name for this bb data.
+	foreach $func (keys(%{$bb})) {
+		$filedata = $bb->{$func};
+
+		foreach $file (keys(%{$filedata})) {
+			$count = $file_count{$file};
+
+			# Count file occurrence
+			$file_count{$file} = defined($count) ? $count + 1 : 1;
+		}
+	}
+	$count = 0;
+	$num = 0;
+	foreach $file (keys(%file_count)) {
+		if ($file_count{$file} > $count) {
+			# The file that contains code for the most functions
+			# is likely the base file
+			$count = $file_count{$file};
+			$num = 1;
+			$basefile = $file;
+		} elsif ($file_count{$file} == $count) {
+			# If more than one file could be the basefile, we
+			# don't have a basefile
+			$basefile = undef;
+		}
+	}
+
+	return $basefile;
+}
+
+#
+# graph_from_bb(bb, fileorder, bb_filename, fileorder_first)
+#
+# Convert data from bb to the graph format and list of instrumented lines.
+#
+# If FILEORDER_FIRST is set, use fileorder data to determine a functions
+# base source file.
+#
+# Returns (instr, graph).
+#
+# bb         : function name -> file data
+#            : undef -> file order
+# file data  : filename -> line data
+# line data  : [ line1, line2, ... ]
+#
+# file order : function name -> [ filename1, filename2, ... ]
+#
+# graph         : file name -> function data
+# function data : function name -> line data
+# line data     : [ line1, line2, ... ]
+#
+# instr     : filename -> line data
+# line data : [ line1, line2, ... ]
+#
+
+sub graph_from_bb($$$$)
+{
+	my ($bb, $fileorder, $bb_filename, $fileorder_first) = @_;
+	my $graph = {};
+	my $instr = {};
+	my $basefile;
+	my $file;
+	my $func;
+	my $filedata;
+	my $linedata;
+	my $order;
+
+	$basefile = graph_find_base($bb);
+	# Create graph structure
+	foreach $func (keys(%{$bb})) {
+		$filedata = $bb->{$func};
+		$order = $fileorder->{$func};
+
+		# Account for lines in functions
+		if (defined($basefile) && defined($filedata->{$basefile}) &&
+		    !$fileorder_first) {
+			# If the basefile contributes to this function,
+			# account this function to the basefile.
+			$graph->{$basefile}->{$func} = $filedata->{$basefile};
+		} else {
+			# If the basefile does not contribute to this function,
+			# account this function to the first file contributing
+			# lines.
+			$graph->{$order->[0]}->{$func} =
+				$filedata->{$order->[0]};
+		}
+
+		foreach $file (keys(%{$filedata})) {
+			# Account for instrumented lines
+			$linedata = $filedata->{$file};
+			push(@{$instr->{$file}}, @$linedata);
+		}
+	}
+	# Clean up array of instrumented lines
+	foreach $file (keys(%{$instr})) {
+		$instr->{$file} = [ sort_uniq(@{$instr->{$file}}) ];
+	}
+
+	return ($instr, $graph);
+}
+
+#
+# graph_add_order(fileorder, function, filename)
+#
+# Add an entry for filename to the fileorder data set for function.
+#
+
+sub graph_add_order($$$)
+{
+	my ($fileorder, $function, $filename) = @_;
+	my $item;
+	my $list;
+
+	$list = $fileorder->{$function};
+	foreach $item (@$list) {
+		if ($item eq $filename) {
+			return;
+		}
+	}
+	push(@$list, $filename);
+	$fileorder->{$function} = $list;
+}
+
+#
+# read_bb_word(handle[, description])
+#
+# Read and return a word in .bb format from handle.
+#
+
+sub read_bb_word(*;$)
+{
+	my ($handle, $desc) = @_;
+
+	return graph_read($handle, 4, $desc);
+}
+
+#
+# read_bb_value(handle[, description])
+#
+# Read a word in .bb format from handle and return the word and its integer
+# value.
+#
+
+sub read_bb_value(*;$)
+{
+	my ($handle, $desc) = @_;
+	my $word;
+
+	$word = read_bb_word($handle, $desc);
+	return undef if (!defined($word));
+
+	return ($word, unpack("V", $word));
+}
+
+#
+# read_bb_string(handle, delimiter)
+#
+# Read and return a string in .bb format from handle up to the specified
+# delimiter value.
+#
+
+sub read_bb_string(*$)
+{
+	my ($handle, $delimiter) = @_;
+	my $word;
+	my $value;
+	my $string = "";
+
+	graph_expect("string");
+	do {
+		($word, $value) = read_bb_value($handle, "string or delimiter");
+		return undef if (!defined($value));
+		if ($value != $delimiter) {
+			$string .= $word;
+		}
+	} while ($value != $delimiter);
+	$string =~ s/\0//g;
+
+	return $string;
+}
+
+#
+# read_bb(filename)
+#
+# Read the contents of the specified .bb file and return (instr, graph), where:
+#
+#   instr     : filename -> line data
+#   line data : [ line1, line2, ... ]
+#
+#   graph     :     filename -> file_data
+#   file_data : function name -> line_data
+#   line_data : [ line1, line2, ... ]
+#
+# See the gcov info pages of gcc 2.95 for a description of the .bb file format.
+#
+
+sub read_bb($)
+{
+	my ($bb_filename) = @_;
+	my $minus_one = 0x80000001;
+	my $minus_two = 0x80000002;
+	my $value;
+	my $filename;
+	my $function;
+	my $bb = {};
+	my $fileorder = {};
+	my $instr;
+	my $graph;
+	local *HANDLE;
+
+	open(HANDLE, "<", $bb_filename) or goto open_error;
+	binmode(HANDLE);
+	while (!eof(HANDLE)) {
+		$value = read_bb_value(*HANDLE, "data word");
+		goto incomplete if (!defined($value));
+		if ($value == $minus_one) {
+			# Source file name
+			graph_expect("filename");
+			$filename = read_bb_string(*HANDLE, $minus_one);
+			goto incomplete if (!defined($filename));
+		} elsif ($value == $minus_two) {
+			# Function name
+			graph_expect("function name");
+			$function = read_bb_string(*HANDLE, $minus_two);
+			goto incomplete if (!defined($function));
+		} elsif ($value > 0) {
+			# Line number
+			if (!defined($filename) || !defined($function)) {
+				warn("WARNING: unassigned line number ".
+				     "$value\n");
+				next;
+			}
+			push(@{$bb->{$function}->{$filename}}, $value);
+			graph_add_order($fileorder, $function, $filename);
+		}
+	}
+	close(HANDLE);
+
+	($instr, $graph) = graph_from_bb($bb, $fileorder, $bb_filename, 0);
+	graph_cleanup($graph);
+
+	return ($instr, $graph);
+
+open_error:
+	graph_error($bb_filename, "could not open file");
+	return undef;
+incomplete:
+	graph_error($bb_filename, "reached unexpected end of file");
+	return undef;
+}
+
+#
+# read_bbg_word(handle[, description])
+#
+# Read and return a word in .bbg format.
+#
+
+sub read_bbg_word(*;$)
+{
+	my ($handle, $desc) = @_;
+
+	return graph_read($handle, 4, $desc);
+}
+
+#
+# read_bbg_value(handle[, description])
+#
+# Read a word in .bbg format from handle and return its integer value.
+#
+
+sub read_bbg_value(*;$)
+{
+	my ($handle, $desc) = @_;
+	my $word;
+
+	$word = read_bbg_word($handle, $desc);
+	return undef if (!defined($word));
+
+	return unpack("N", $word);
+}
+
+#
+# read_bbg_string(handle)
+#
+# Read and return a string in .bbg format.
+#
+
+sub read_bbg_string(*)
+{
+	my ($handle, $desc) = @_;
+	my $length;
+	my $string;
+
+	graph_expect("string");
+	# Read string length
+	$length = read_bbg_value($handle, "string length");
+	return undef if (!defined($length));
+	if ($length == 0) {
+		return "";
+	}
+	# Read string
+	$string = graph_read($handle, $length, "string");
+	return undef if (!defined($string));
+	# Skip padding
+	graph_skip($handle, 4 - $length % 4, "string padding") or return undef;
+
+	return $string;
+}
+
+#
+# read_bbg_lines_record(handle, bbg_filename, bb, fileorder, filename,
+#                       function)
+#
+# Read a bbg format lines record from handle and add the relevant data to
+# bb and fileorder. Return filename on success, undef on error.
+#
+
+sub read_bbg_lines_record(*$$$$$)
+{
+	my ($handle, $bbg_filename, $bb, $fileorder, $filename, $function) = @_;
+	my $string;
+	my $lineno;
+
+	graph_expect("lines record");
+	# Skip basic block index
+	graph_skip($handle, 4, "basic block index") or return undef;
+	while (1) {
+		# Read line number
+		$lineno = read_bbg_value($handle, "line number");
+		return undef if (!defined($lineno));
+		if ($lineno == 0) {
+			# Got a marker for a new filename
+			graph_expect("filename");
+			$string = read_bbg_string($handle);
+			return undef if (!defined($string));
+			# Check for end of record
+			if ($string eq "") {
+				return $filename;
+			}
+			$filename = $string;
+			if (!exists($bb->{$function}->{$filename})) {
+				$bb->{$function}->{$filename} = [];
+			}
+			next;
+		}
+		# Got an actual line number
+		if (!defined($filename)) {
+			warn("WARNING: unassigned line number in ".
+			     "$bbg_filename\n");
+			next;
+		}
+		push(@{$bb->{$function}->{$filename}}, $lineno);
+		graph_add_order($fileorder, $function, $filename);
+	}
+}
+
+#
+# read_bbg(filename)
+#
+# Read the contents of the specified .bbg file and return the following mapping:
+#   graph:     filename -> file_data
+#   file_data: function name -> line_data
+#   line_data: [ line1, line2, ... ]
+#
+# See the gcov-io.h file in the SLES 9 gcc 3.3.3 source code for a description
+# of the .bbg format.
+#
+
+sub read_bbg($)
+{
+	my ($bbg_filename) = @_;
+	my $file_magic = 0x67626267;
+	my $tag_function = 0x01000000;
+	my $tag_lines = 0x01450000;
+	my $word;
+	my $tag;
+	my $length;
+	my $function;
+	my $filename;
+	my $bb = {};
+	my $fileorder = {};
+	my $instr;
+	my $graph;
+	local *HANDLE;
+
+	open(HANDLE, "<", $bbg_filename) or goto open_error;
+	binmode(HANDLE);
+	# Read magic
+	$word = read_bbg_value(*HANDLE, "file magic");
+	goto incomplete if (!defined($word));
+	# Check magic
+	if ($word != $file_magic) {
+		goto magic_error;
+	}
+	# Skip version
+	graph_skip(*HANDLE, 4, "version") or goto incomplete;
+	while (!eof(HANDLE)) {
+		# Read record tag
+		$tag = read_bbg_value(*HANDLE, "record tag");
+		goto incomplete if (!defined($tag));
+		# Read record length
+		$length = read_bbg_value(*HANDLE, "record length");
+		goto incomplete if (!defined($tag));
+		if ($tag == $tag_function) {
+			graph_expect("function record");
+			# Read function name
+			graph_expect("function name");
+			$function = read_bbg_string(*HANDLE);
+			goto incomplete if (!defined($function));
+			$filename = undef;
+			# Skip function checksum
+			graph_skip(*HANDLE, 4, "function checksum")
+				or goto incomplete;
+		} elsif ($tag == $tag_lines) {
+			# Read lines record
+			$filename = read_bbg_lines_record(HANDLE, $bbg_filename,
+					  $bb, $fileorder, $filename,
+					  $function);
+			goto incomplete if (!defined($filename));
+		} else {
+			# Skip record contents
+			graph_skip(*HANDLE, $length, "unhandled record")
+				or goto incomplete;
+		}
+	}
+	close(HANDLE);
+	($instr, $graph) = graph_from_bb($bb, $fileorder, $bbg_filename, 0);
+
+	graph_cleanup($graph);
+
+	return ($instr, $graph);
+
+open_error:
+	graph_error($bbg_filename, "could not open file");
+	return undef;
+incomplete:
+	graph_error($bbg_filename, "reached unexpected end of file");
+	return undef;
+magic_error:
+	graph_error($bbg_filename, "found unrecognized bbg file magic");
+	return undef;
+}
+
+#
+# read_gcno_word(handle[, description, peek])
+#
+# Read and return a word in .gcno format.
+#
+
+sub read_gcno_word(*;$$)
+{
+	my ($handle, $desc, $peek) = @_;
+
+	return graph_read($handle, 4, $desc, $peek);
+}
+
+#
+# read_gcno_value(handle, big_endian[, description, peek])
+#
+# Read a word in .gcno format from handle and return its integer value
+# according to the specified endianness. If PEEK is non-zero, reset file
+# position after read.
+#
+
+sub read_gcno_value(*$;$$)
+{
+	my ($handle, $big_endian, $desc, $peek) = @_;
+	my $word;
+	my $pos;
+
+	$word = read_gcno_word($handle, $desc, $peek);
+	return undef if (!defined($word));
+	if ($big_endian) {
+		return unpack("N", $word);
+	} else {
+		return unpack("V", $word);
+	}
+}
+
+#
+# read_gcno_string(handle, big_endian)
+#
+# Read and return a string in .gcno format.
+#
+
+sub read_gcno_string(*$)
+{
+	my ($handle, $big_endian) = @_;
+	my $length;
+	my $string;
+
+	graph_expect("string");
+	# Read string length
+	$length = read_gcno_value($handle, $big_endian, "string length");
+	return undef if (!defined($length));
+	if ($length == 0) {
+		return "";
+	}
+	$length *= 4;
+	# Read string
+	$string = graph_read($handle, $length, "string and padding");
+	return undef if (!defined($string));
+	$string =~ s/\0//g;
+
+	return $string;
+}
+
+#
+# read_gcno_lines_record(handle, gcno_filename, bb, fileorder, filename,
+#                        function, big_endian)
+#
+# Read a gcno format lines record from handle and add the relevant data to
+# bb and fileorder. Return filename on success, undef on error.
+#
+
+sub read_gcno_lines_record(*$$$$$$)
+{
+	my ($handle, $gcno_filename, $bb, $fileorder, $filename, $function,
+	    $big_endian) = @_;
+	my $string;
+	my $lineno;
+
+	graph_expect("lines record");
+	# Skip basic block index
+	graph_skip($handle, 4, "basic block index") or return undef;
+	while (1) {
+		# Read line number
+		$lineno = read_gcno_value($handle, $big_endian, "line number");
+		return undef if (!defined($lineno));
+		if ($lineno == 0) {
+			# Got a marker for a new filename
+			graph_expect("filename");
+			$string = read_gcno_string($handle, $big_endian);
+			return undef if (!defined($string));
+			# Check for end of record
+			if ($string eq "") {
+				return $filename;
+			}
+			$filename = $string;
+			if (!exists($bb->{$function}->{$filename})) {
+				$bb->{$function}->{$filename} = [];
+			}
+			next;
+		}
+		# Got an actual line number
+		if (!defined($filename)) {
+			warn("WARNING: unassigned line number in ".
+			     "$gcno_filename\n");
+			next;
+		}
+		# Add to list
+		push(@{$bb->{$function}->{$filename}}, $lineno);
+		graph_add_order($fileorder, $function, $filename);
+	}
+}
+
+#
+# determine_gcno_split_crc(handle, big_endian, rec_length, version)
+#
+# Determine if HANDLE refers to a .gcno file with a split checksum function
+# record format. Return non-zero in case of split checksum format, zero
+# otherwise, undef in case of read error.
+#
+
+sub determine_gcno_split_crc($$$$)
+{
+	my ($handle, $big_endian, $rec_length, $version) = @_;
+	my $strlen;
+	my $overlong_string;
+
+	return 1 if ($version >= $GCOV_VERSION_4_7_0);
+	return 1 if (is_compat($COMPAT_MODE_SPLIT_CRC));
+
+	# Heuristic:
+	# Decide format based on contents of next word in record:
+	# - pre-gcc 4.7
+	#   This is the function name length / 4 which should be
+	#   less than the remaining record length
+	# - gcc 4.7
+	#   This is a checksum, likely with high-order bits set,
+	#   resulting in a large number
+	$strlen = read_gcno_value($handle, $big_endian, undef, 1);
+	return undef if (!defined($strlen));
+	$overlong_string = 1 if ($strlen * 4 >= $rec_length - 12);
+
+	if ($overlong_string) {
+		if (is_compat_auto($COMPAT_MODE_SPLIT_CRC)) {
+			info("Auto-detected compatibility mode for split ".
+			     "checksum .gcno file format\n");
+
+			return 1;
+		} else {
+			# Sanity check
+			warn("Found overlong string in function record: ".
+			     "try '--compat split_crc'\n");
+		}
+	}
+
+	return 0;
+}
+
+#
+# read_gcno_function_record(handle, graph, big_endian, rec_length, version)
+#
+# Read a gcno format function record from handle and add the relevant data
+# to graph. Return (filename, function, artificial) on success, undef on error.
+#
+
+sub read_gcno_function_record(*$$$$$)
+{
+	my ($handle, $bb, $fileorder, $big_endian, $rec_length, $version) = @_;
+	my $filename;
+	my $function;
+	my $lineno;
+	my $lines;
+	my $artificial;
+
+	graph_expect("function record");
+	# Skip ident and checksum
+	graph_skip($handle, 8, "function ident and checksum") or return undef;
+	# Determine if this is a function record with split checksums
+	if (!defined($gcno_split_crc)) {
+		$gcno_split_crc = determine_gcno_split_crc($handle, $big_endian,
+							   $rec_length,
+							   $version);
+		return undef if (!defined($gcno_split_crc));
+	}
+	# Skip cfg checksum word in case of split checksums
+	graph_skip($handle, 4, "function cfg checksum") if ($gcno_split_crc);
+	# Read function name
+	graph_expect("function name");
+	$function = read_gcno_string($handle, $big_endian);
+	return undef if (!defined($function));
+	if ($version >= $GCOV_VERSION_8_0_0) {
+		$artificial = read_gcno_value($handle, $big_endian,
+					      "compiler-generated entity flag");
+		return undef if (!defined($artificial));
+	}
+	# Read filename
+	graph_expect("filename");
+	$filename = read_gcno_string($handle, $big_endian);
+	return undef if (!defined($filename));
+	# Read first line number
+	$lineno = read_gcno_value($handle, $big_endian, "initial line number");
+	return undef if (!defined($lineno));
+	# Skip column and ending line number
+	if ($version >= $GCOV_VERSION_8_0_0) {
+		graph_skip($handle, 4, "column number") or return undef;
+		graph_skip($handle, 4, "ending line number") or return undef;
+	}
+	# Add to list
+	push(@{$bb->{$function}->{$filename}}, $lineno);
+	graph_add_order($fileorder, $function, $filename);
+
+	return ($filename, $function, $artificial);
+}
+
+#
+# map_gcno_version
+#
+# Map version number as found in .gcno files to the format used in geninfo.
+#
+
+sub map_gcno_version($)
+{
+	my ($version) = @_;
+	my ($a, $b, $c);
+	my ($major, $minor);
+
+	$a = $version >> 24;
+	$b = $version >> 16 & 0xff;
+	$c = $version >> 8 & 0xff;
+
+	if ($a < ord('A')) {
+		$major = $a - ord('0');
+		$minor = ($b - ord('0')) * 10 + $c - ord('0');
+	} else {
+		$major = ($a - ord('A')) * 10 + $b - ord('0');
+		$minor = $c - ord('0');
+	}
+
+	return $major << 16 | $minor << 8;
+}
+
+sub remove_fn_from_hash($$)
+{
+	my ($hash, $fns) = @_;
+
+	foreach my $fn (@$fns) {
+		delete($hash->{$fn});
+	}
+}
+
+#
+# read_gcno(filename)
+#
+# Read the contents of the specified .gcno file and return the following
+# mapping:
+#   graph:    filename -> file_data
+#   file_data: function name -> line_data
+#   line_data: [ line1, line2, ... ]
+#
+# See the gcov-io.h file in the gcc 3.3 source code for a description of
+# the .gcno format.
+#
+
+sub read_gcno($)
+{
+	my ($gcno_filename) = @_;
+	my $file_magic = 0x67636e6f;
+	my $tag_function = 0x01000000;
+	my $tag_lines = 0x01450000;
+	my $big_endian;
+	my $word;
+	my $tag;
+	my $length;
+	my $filename;
+	my $function;
+	my $bb = {};
+	my $fileorder = {};
+	my $instr;
+	my $graph;
+	my $filelength;
+	my $version;
+	my $artificial;
+	my @artificial_fns;
+	local *HANDLE;
+
+	open(HANDLE, "<", $gcno_filename) or goto open_error;
+	$filelength = (stat(HANDLE))[7];
+	binmode(HANDLE);
+	# Read magic
+	$word = read_gcno_word(*HANDLE, "file magic");
+	goto incomplete if (!defined($word));
+	# Determine file endianness
+	if (unpack("N", $word) == $file_magic) {
+		$big_endian = 1;
+	} elsif (unpack("V", $word) == $file_magic) {
+		$big_endian = 0;
+	} else {
+		goto magic_error;
+	}
+	# Read version
+	$version = read_gcno_value(*HANDLE, $big_endian, "compiler version");
+	$version = map_gcno_version($version);
+	debug(sprintf("found version 0x%08x\n", $version));
+	# Skip stamp
+	graph_skip(*HANDLE, 4, "file timestamp") or goto incomplete;
+	if ($version >= $GCOV_VERSION_8_0_0) {
+		graph_skip(*HANDLE, 4, "support unexecuted blocks flag")
+			or goto incomplete;
+	}
+	while (!eof(HANDLE)) {
+		my $next_pos;
+		my $curr_pos;
+
+		# Read record tag
+		$tag = read_gcno_value(*HANDLE, $big_endian, "record tag");
+		goto incomplete if (!defined($tag));
+		# Read record length
+		$length = read_gcno_value(*HANDLE, $big_endian,
+					  "record length");
+		goto incomplete if (!defined($length));
+		# Convert length to bytes
+		$length *= 4;
+		# Calculate start of next record
+		$next_pos = tell(HANDLE);
+		goto tell_error if ($next_pos == -1);
+		$next_pos += $length;
+		# Catch garbage at the end of a gcno file
+		if ($next_pos > $filelength) {
+			debug("Overlong record: file_length=$filelength ".
+			      "rec_length=$length\n");
+			warn("WARNING: $gcno_filename: Overlong record at end ".
+			     "of file!\n");
+			last;
+		}
+		# Process record
+		if ($tag == $tag_function) {
+			($filename, $function, $artificial) =
+				read_gcno_function_record(
+				*HANDLE, $bb, $fileorder, $big_endian,
+				$length, $version);
+			goto incomplete if (!defined($function));
+			push(@artificial_fns, $function) if ($artificial);
+		} elsif ($tag == $tag_lines) {
+			# Read lines record
+			$filename = read_gcno_lines_record(*HANDLE,
+					$gcno_filename, $bb, $fileorder,
+					$filename, $function, $big_endian);
+			goto incomplete if (!defined($filename));
+		} else {
+			# Skip record contents
+			graph_skip(*HANDLE, $length, "unhandled record")
+				or goto incomplete;
+		}
+		# Ensure that we are at the start of the next record
+		$curr_pos = tell(HANDLE);
+		goto tell_error if ($curr_pos == -1);
+		next if ($curr_pos == $next_pos);
+		goto record_error if ($curr_pos > $next_pos);
+		graph_skip(*HANDLE, $next_pos - $curr_pos,
+			   "unhandled record content")
+			or goto incomplete;
+	}
+	close(HANDLE);
+
+	# Remove artificial functions from result data
+	remove_fn_from_hash($bb, \@artificial_fns);
+	remove_fn_from_hash($fileorder, \@artificial_fns);
+
+	($instr, $graph) = graph_from_bb($bb, $fileorder, $gcno_filename, 1);
+	graph_cleanup($graph);
+
+	return ($instr, $graph);
+
+open_error:
+	graph_error($gcno_filename, "could not open file");
+	return undef;
+incomplete:
+	graph_error($gcno_filename, "reached unexpected end of file");
+	return undef;
+magic_error:
+	graph_error($gcno_filename, "found unrecognized gcno file magic");
+	return undef;
+tell_error:
+	graph_error($gcno_filename, "could not determine file position");
+	return undef;
+record_error:
+	graph_error($gcno_filename, "found unrecognized record format");
+	return undef;
+}
+
+sub debug($)
+{
+	my ($msg) = @_;
+
+	return if (!$debug);
+	print(STDERR "DEBUG: $msg");
+}
+
+#
+# get_gcov_capabilities
+#
+# Determine the list of available gcov options.
+#
+
+sub get_gcov_capabilities()
+{
+	my $help = `$gcov_tool --help`;
+	my %capabilities;
+	my %short_option_translations = (
+		'a' => 'all-blocks',
+		'b' => 'branch-probabilities',
+		'c' => 'branch-counts',
+		'f' => 'function-summaries',
+		'h' => 'help',
+		'l' => 'long-file-names',
+		'n' => 'no-output',
+		'o' => 'object-directory',
+		'p' => 'preserve-paths',
+		'u' => 'unconditional-branches',
+		'v' => 'version',
+		'x' => 'hash-filenames',
+	);
+
+	foreach (split(/\n/, $help)) {
+		my $capability;
+		if (/--(\S+)/) {
+			$capability = $1;
+		} else {
+			# If the line provides a short option, translate it.
+			next if (!/^\s*-(\S)\s/);
+			$capability = $short_option_translations{$1};
+			next if not defined($capability);
+		}
+		next if ($capability eq 'help');
+		next if ($capability eq 'version');
+		next if ($capability eq 'object-directory');
+
+		$capabilities{$capability} = 1;
+		debug("gcov has capability '$capability'\n");
+	}
+
+	return \%capabilities;
+}
+
+#
+# parse_ignore_errors(@ignore_errors)
+#
+# Parse user input about which errors to ignore.
+#
+
+sub parse_ignore_errors(@)
+{
+	my (@ignore_errors) = @_;
+	my @items;
+	my $item;
+
+	return if (!@ignore_errors);
+
+	foreach $item (@ignore_errors) {
+		$item =~ s/\s//g;
+		if ($item =~ /,/) {
+			# Split and add comma-separated parameters
+			push(@items, split(/,/, $item));
+		} else {
+			# Add single parameter
+			push(@items, $item);
+		}
+	}
+	foreach $item (@items) {
+		my $item_id = $ERROR_ID{lc($item)};
+
+		if (!defined($item_id)) {
+			die("ERROR: unknown argument for --ignore-errors: ".
+			    "$item\n");
+		}
+		$ignore[$item_id] = 1;
+	}
+}
+
+#
+# is_external(filename)
+#
+# Determine if a file is located outside of the specified data directories.
+#
+
+sub is_external($)
+{
+	my ($filename) = @_;
+	my $dir;
+
+	foreach $dir (@internal_dirs) {
+		return 0 if ($filename =~ /^\Q$dir\/\E/);
+	}
+	return 1;
+}
+
+#
+# compat_name(mode)
+#
+# Return the name of compatibility mode MODE.
+#
+
+sub compat_name($)
+{
+	my ($mode) = @_;
+	my $name = $COMPAT_MODE_TO_NAME{$mode};
+
+	return $name if (defined($name));
+
+	return "<unknown>";
+}
+
+#
+# parse_compat_modes(opt)
+#
+# Determine compatibility mode settings.
+#
+
+sub parse_compat_modes($)
+{
+	my ($opt) = @_;
+	my @opt_list;
+	my %specified;
+
+	# Initialize with defaults
+	%compat_value = %COMPAT_MODE_DEFAULTS;
+
+	# Add old style specifications
+	if (defined($opt_compat_libtool)) {
+		$compat_value{$COMPAT_MODE_LIBTOOL} =
+			$opt_compat_libtool ? $COMPAT_VALUE_ON
+					    : $COMPAT_VALUE_OFF;
+	}
+
+	# Parse settings
+	if (defined($opt)) {
+		@opt_list = split(/\s*,\s*/, $opt);
+	}
+	foreach my $directive (@opt_list) {
+		my ($mode, $value);
+
+		# Either
+		#   mode=off|on|auto or
+		#   mode (implies on)
+		if ($directive !~ /^(\w+)=(\w+)$/ &&
+		    $directive !~ /^(\w+)$/) {
+			die("ERROR: Unknown compatibility mode specification: ".
+			    "$directive!\n");
+		}
+		# Determine mode
+		$mode = $COMPAT_NAME_TO_MODE{lc($1)};
+		if (!defined($mode)) {
+			die("ERROR: Unknown compatibility mode '$1'!\n");
+		}
+		$specified{$mode} = 1;
+		# Determine value
+		if (defined($2)) {
+			$value = $COMPAT_NAME_TO_VALUE{lc($2)};
+			if (!defined($value)) {
+				die("ERROR: Unknown compatibility mode ".
+				    "value '$2'!\n");
+			}
+		} else {
+			$value = $COMPAT_VALUE_ON;
+		}
+		$compat_value{$mode} = $value;
+	}
+	# Perform auto-detection
+	foreach my $mode (sort(keys(%compat_value))) {
+		my $value = $compat_value{$mode};
+		my $is_autodetect = "";
+		my $name = compat_name($mode);
+
+		if ($value == $COMPAT_VALUE_AUTO) {
+			my $autodetect = $COMPAT_MODE_AUTO{$mode};
+
+			if (!defined($autodetect)) {
+				die("ERROR: No auto-detection for ".
+				    "mode '$name' available!\n");
+			}
+
+			if (ref($autodetect) eq "CODE") {
+				$value = &$autodetect();
+				$compat_value{$mode} = $value;
+				$is_autodetect = " (auto-detected)";
+			}
+		}
+
+		if ($specified{$mode}) {
+			if ($value == $COMPAT_VALUE_ON) {
+				info("Enabling compatibility mode ".
+				     "'$name'$is_autodetect\n");
+			} elsif ($value == $COMPAT_VALUE_OFF) {
+				info("Disabling compatibility mode ".
+				     "'$name'$is_autodetect\n");
+			} else {
+				info("Using delayed auto-detection for ".
+				     "compatibility mode ".
+				     "'$name'\n");
+			}
+		}
+	}
+}
+
+sub compat_hammer_autodetect()
+{
+        if ($gcov_version_string =~ /suse/i && $gcov_version == 0x30303 ||
+            $gcov_version_string =~ /mandrake/i && $gcov_version == 0x30302)
+	{
+		info("Auto-detected compatibility mode for GCC 3.3 (hammer)\n");
+		return $COMPAT_VALUE_ON;
+	}
+	return $COMPAT_VALUE_OFF;
+}
+
+#
+# is_compat(mode)
+#
+# Return non-zero if compatibility mode MODE is enabled.
+#
+
+sub is_compat($)
+{
+	my ($mode) = @_;
+
+	return 1 if ($compat_value{$mode} == $COMPAT_VALUE_ON);
+	return 0;
+}
+
+#
+# is_compat_auto(mode)
+#
+# Return non-zero if compatibility mode MODE is set to auto-detect.
+#
+
+sub is_compat_auto($)
+{
+	my ($mode) = @_;
+
+	return 1 if ($compat_value{$mode} == $COMPAT_VALUE_AUTO);
+	return 0;
+}
diff --git a/ThirdParty/lcov/bin/genpng b/ThirdParty/lcov/bin/genpng
new file mode 100755
index 0000000000000000000000000000000000000000..943a49d5f0454e01aa12430fbc97a44805ac404e
--- /dev/null
+++ b/ThirdParty/lcov/bin/genpng
@@ -0,0 +1,389 @@
+#!/usr/bin/env perl
+#
+#   Copyright (c) International Business Machines  Corp., 2002
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.                 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# genpng
+#
+#   This script creates an overview PNG image of a source code file by
+#   representing each source code character by a single pixel.
+#
+#   Note that the Perl module GD.pm is required for this script to work.
+#   It may be obtained from http://www.cpan.org
+#
+# History:
+#   2002-08-26: created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#
+
+use strict;
+use warnings;
+use File::Basename; 
+use Getopt::Long;
+use Cwd qw/abs_path/;
+
+
+# Constants
+our $tool_dir		= abs_path(dirname($0));
+our $lcov_version	= "LCOV version 1.14";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name		= basename($0);
+
+
+# Prototypes
+sub gen_png($$$@);
+sub check_and_load_module($);
+sub genpng_print_usage(*);
+sub genpng_process_file($$$$);
+sub genpng_warn_handler($);
+sub genpng_die_handler($);
+
+
+#
+# Code entry point
+#
+
+# Check whether required module GD.pm is installed
+if (check_and_load_module("GD"))
+{
+	# Note: cannot use die() to print this message because inserting this
+	# code into another script via do() would not fail as required!
+	print(STDERR <<END_OF_TEXT)
+ERROR: required module GD.pm not found on this system (see www.cpan.org).
+END_OF_TEXT
+	;
+	exit(2);
+}
+
+# Check whether we're called from the command line or from another script
+if (!caller)
+{
+	my $filename;
+	my $tab_size = 4;
+	my $width = 80;
+	my $out_filename;
+	my $help;
+	my $version;
+
+	$SIG{__WARN__} = \&genpng_warn_handler;
+	$SIG{__DIE__} = \&genpng_die_handler;
+
+	# Parse command line options
+	if (!GetOptions("tab-size=i" => \$tab_size,
+			"width=i" => \$width,
+			"output-filename=s" => \$out_filename,
+			"help" => \$help,
+			"version" => \$version))
+	{
+		print(STDERR "Use $tool_name --help to get usage ".
+		      "information\n");
+		exit(1);
+	}
+
+	$filename = $ARGV[0];
+
+	# Check for help flag
+	if ($help)
+	{
+		genpng_print_usage(*STDOUT);
+		exit(0);
+	}
+
+	# Check for version flag
+	if ($version)
+	{
+		print("$tool_name: $lcov_version\n");
+		exit(0);
+	}
+
+	# Check options
+	if (!$filename)
+	{
+		die("No filename specified\n");
+	}
+
+	# Check for output filename
+	if (!$out_filename)
+	{
+		$out_filename = "$filename.png";
+	}
+
+	genpng_process_file($filename, $out_filename, $width, $tab_size);
+	exit(0);
+}
+
+
+#
+# genpng_print_usage(handle)
+#
+# Write out command line usage information to given filehandle.
+#
+
+sub genpng_print_usage(*)
+{
+	local *HANDLE = $_[0];
+
+	print(HANDLE <<END_OF_USAGE)
+Usage: $tool_name [OPTIONS] SOURCEFILE
+
+Create an overview image for a given source code file of either plain text
+or .gcov file format.
+
+  -h, --help                        Print this help, then exit
+  -v, --version                     Print version number, then exit
+  -t, --tab-size TABSIZE            Use TABSIZE spaces in place of tab
+  -w, --width WIDTH                 Set width of output image to WIDTH pixel
+  -o, --output-filename FILENAME    Write image to FILENAME
+
+For more information see: $lcov_url
+END_OF_USAGE
+	;
+}
+
+
+#
+# check_and_load_module(module_name)
+#
+# Check whether a module by the given name is installed on this system
+# and make it known to the interpreter if available. Return undefined if it
+# is installed, an error message otherwise.
+#
+
+sub check_and_load_module($)
+{
+	eval("use $_[0];");
+	return $@;
+}
+
+
+#
+# genpng_process_file(filename, out_filename, width, tab_size)
+#
+
+sub genpng_process_file($$$$)
+{
+	my $filename		= $_[0];
+	my $out_filename	= $_[1];
+	my $width		= $_[2];
+	my $tab_size		= $_[3];
+	local *HANDLE;
+	my @source;
+
+	open(HANDLE, "<", $filename)
+		or die("ERROR: cannot open $filename!\n");
+
+	# Check for .gcov filename extension
+	if ($filename =~ /^(.*).gcov$/)
+	{
+		# Assume gcov text format
+		while (<HANDLE>)
+		{
+			if (/^\t\t(.*)$/)
+			{
+				# Uninstrumented line
+				push(@source, ":$1");
+			}
+			elsif (/^      ######    (.*)$/)
+			{
+				# Line with zero execution count
+				push(@source, "0:$1");
+			}
+			elsif (/^( *)(\d*)    (.*)$/)
+			{
+				# Line with positive execution count
+				push(@source, "$2:$3");
+			}
+		}
+	}
+	else
+	{
+		# Plain text file
+		while (<HANDLE>) { push(@source, ":$_"); }
+	}
+	close(HANDLE);
+
+	gen_png($out_filename, $width, $tab_size, @source);
+}
+
+
+#
+# gen_png(filename, width, tab_size, source)
+#
+# Write an overview PNG file to FILENAME. Source code is defined by SOURCE
+# which is a list of lines <count>:<source code> per source code line.
+# The output image will be made up of one pixel per character of source,
+# coloring will be done according to execution counts. WIDTH defines the
+# image width. TAB_SIZE specifies the number of spaces to use as replacement
+# string for tabulator signs in source code text.
+#
+# Die on error.
+#
+
+sub gen_png($$$@)
+{
+	my $filename = shift(@_);	# Filename for PNG file
+	my $overview_width = shift(@_);	# Imagewidth for image
+	my $tab_size = shift(@_);	# Replacement string for tab signs
+	my @source = @_;	# Source code as passed via argument 2
+	my $height;		# Height as define by source size
+	my $overview;		# Source code overview image data
+	my $col_plain_back;	# Color for overview background
+	my $col_plain_text;	# Color for uninstrumented text
+	my $col_cov_back;	# Color for background of covered lines
+	my $col_cov_text;	# Color for text of covered lines
+	my $col_nocov_back;	# Color for background of lines which
+				# were not covered (count == 0)
+	my $col_nocov_text;	# Color for test of lines which were not
+				# covered (count == 0)
+	my $col_hi_back;	# Color for background of highlighted lines
+	my $col_hi_text;	# Color for text of highlighted lines
+	my $line;		# Current line during iteration
+	my $row = 0;		# Current row number during iteration
+	my $column;		# Current column number during iteration
+	my $color_text;		# Current text color during iteration
+	my $color_back;		# Current background color during iteration
+	my $last_count;		# Count of last processed line
+	my $count;		# Count of current line
+	my $source;		# Source code of current line
+	my $replacement;	# Replacement string for tabulator chars
+	local *PNG_HANDLE;	# Handle for output PNG file
+
+	# Handle empty source files
+	if (!@source) {
+		@source = ( "" );
+	}
+	$height = scalar(@source);
+	# Create image
+	$overview = new GD::Image($overview_width, $height)
+		or die("ERROR: cannot allocate overview image!\n");
+
+	# Define colors
+	$col_plain_back	= $overview->colorAllocate(0xff, 0xff, 0xff);
+	$col_plain_text	= $overview->colorAllocate(0xaa, 0xaa, 0xaa);
+	$col_cov_back	= $overview->colorAllocate(0xaa, 0xa7, 0xef);
+	$col_cov_text	= $overview->colorAllocate(0x5d, 0x5d, 0xea);
+	$col_nocov_back = $overview->colorAllocate(0xff, 0x00, 0x00);
+	$col_nocov_text = $overview->colorAllocate(0xaa, 0x00, 0x00);
+	$col_hi_back = $overview->colorAllocate(0x00, 0xff, 0x00);
+	$col_hi_text = $overview->colorAllocate(0x00, 0xaa, 0x00);
+
+	# Visualize each line
+	foreach $line (@source)
+	{
+		# Replace tabs with spaces to keep consistent with source
+		# code view
+		while ($line =~ /^([^\t]*)(\t)/)
+		{
+			$replacement = " "x($tab_size - ((length($1) - 1) %
+				       $tab_size));
+			$line =~ s/^([^\t]*)(\t)/$1$replacement/;
+		}
+
+		# Skip lines which do not follow the <count>:<line>
+		# specification, otherwise $1 = count, $2 = source code
+		if (!($line =~ /(\*?)(\d*):(.*)$/)) { next; }
+		$count = $2;
+		$source = $3;
+
+		# Decide which color pair to use
+
+		# If this line was not instrumented but the one before was,
+		# take the color of that line to widen color areas in
+		# resulting image
+		if (($count eq "") && defined($last_count) &&
+		    ($last_count ne ""))
+		{
+			$count = $last_count;
+		}
+
+		if ($count eq "")
+		{
+			# Line was not instrumented
+			$color_text = $col_plain_text;
+			$color_back = $col_plain_back;
+		}
+		elsif ($count == 0)
+		{
+			# Line was instrumented but not executed
+			$color_text = $col_nocov_text;
+			$color_back = $col_nocov_back;
+		}
+		elsif ($1 eq "*")
+		{
+			# Line was highlighted
+			$color_text = $col_hi_text;
+			$color_back = $col_hi_back;
+		}
+		else
+		{
+			# Line was instrumented and executed
+			$color_text = $col_cov_text;
+			$color_back = $col_cov_back;
+		}
+
+		# Write one pixel for each source character
+		$column = 0;
+		foreach (split("", $source))
+		{
+			# Check for width
+			if ($column >= $overview_width) { last; }
+
+			if ($_ eq " ")
+			{
+				# Space
+				$overview->setPixel($column++, $row,
+						    $color_back);
+			}
+			else
+			{
+				# Text
+				$overview->setPixel($column++, $row,
+						    $color_text);
+			}
+		}
+
+		# Fill rest of line		
+		while ($column < $overview_width)
+		{
+			$overview->setPixel($column++, $row, $color_back);
+		}
+
+		$last_count = $2;
+
+		$row++;
+	}
+
+	# Write PNG file
+	open (PNG_HANDLE, ">", $filename)
+		or die("ERROR: cannot write png file $filename!\n");
+	binmode(*PNG_HANDLE);
+	print(PNG_HANDLE $overview->png());
+	close(PNG_HANDLE);
+}
+
+sub genpng_warn_handler($)
+{
+	my ($msg) = @_;
+
+	warn("$tool_name: $msg");
+}
+
+sub genpng_die_handler($)
+{
+	my ($msg) = @_;
+
+	die("$tool_name: $msg");
+}
diff --git a/ThirdParty/lcov/bin/get_changes.sh b/ThirdParty/lcov/bin/get_changes.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ec373b4f4f023207c5aed6383f293a5c8a4ab4e0
--- /dev/null
+++ b/ThirdParty/lcov/bin/get_changes.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+#
+# Usage: get_changes.sh
+#
+# Print lcov change log information as provided by Git
+
+TOOLDIR=$(cd $(dirname $0) >/dev/null ; pwd)
+
+cd $TOOLDIR
+
+if ! git --no-pager log --no-merges --decorate=short --color=never 2>/dev/null ; then
+	cat "$TOOLDIR/../CHANGES" 2>/dev/null
+fi 
diff --git a/ThirdParty/lcov/bin/get_version.sh b/ThirdParty/lcov/bin/get_version.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ac5a36314699c7ccfa374d14a1c6e5ab68a62270
--- /dev/null
+++ b/ThirdParty/lcov/bin/get_version.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+# Usage: get_version.sh --version|--release|--full
+#
+# Print lcov version or release information as provided by Git, .version
+# or a fallback.
+
+TOOLDIR=$(cd $(dirname $0) >/dev/null ; pwd)
+GITVER=$(cd $TOOLDIR ; git describe --tags 2>/dev/null)
+
+if [ -z "$GITVER" ] ; then
+	# Get version information from file
+	if [ -e "$TOOLDIR/../.version" ] ; then
+		source "$TOOLDIR/../.version"
+	fi
+else
+	# Get version information from git
+	FULL=${GITVER:1}
+	VERSION=${GITVER%%-*}
+	VERSION=${VERSION:1}
+	if [ "${GITVER#*-}" != "$GITVER" ] ; then
+		RELEASE=${GITVER#*-}
+		RELEASE=${RELEASE/-/.}
+	fi
+fi
+
+# Fallback
+[ -z "$VERSION" ] && VERSION="1.0"
+[ -z "$RELEASE" ] && RELEASE="1"
+[ -z "$FULL" ]    && FULL="$VERSION"
+
+[ "$1" == "--version" ] && echo -n "$VERSION"
+[ "$1" == "--release" ] && echo -n "$RELEASE"
+[ "$1" == "--full"    ] && echo -n "$FULL"
diff --git a/ThirdParty/lcov/bin/install.sh b/ThirdParty/lcov/bin/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2cdef45b6ca50fe0f745140c860f15b06e883ede
--- /dev/null
+++ b/ThirdParty/lcov/bin/install.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+#
+# install.sh [--uninstall] sourcefile targetfile [install options]
+#
+
+
+# Check for uninstall option
+if test "x$1" == "x--uninstall" ; then
+  UNINSTALL=true
+  SOURCE=$2
+  TARGET=$3
+  shift 3
+else
+  UNINSTALL=false
+  SOURCE=$1
+  TARGET=$2
+  shift 2
+fi
+
+# Check usage
+if test -z "$SOURCE" || test -z "$TARGET" ; then
+  echo Usage: install.sh [--uninstall] source target [install options] >&2
+  exit 1
+fi
+
+
+#
+# do_install(SOURCE_FILE, TARGET_FILE)
+#
+
+do_install()
+{
+  local SOURCE=$1
+  local TARGET=$2
+  local PARAMS=$3
+
+  install -d $(dirname $TARGET)
+  install -p $PARAMS $SOURCE $TARGET
+  if [ -n "$LCOV_PERL_PATH" ] ; then
+    # Replace Perl interpreter specification
+    sed -e "1 s%^#\!.*perl.*$%#\!$LCOV_PERL_PATH%" -i $TARGET
+  fi
+}
+
+
+#
+# do_uninstall(SOURCE_FILE, TARGET_FILE)
+#
+
+do_uninstall()
+{
+  local SOURCE=$1
+  local TARGET=$2
+
+  # Does target exist?
+  if test -r $TARGET ; then
+    # Is target of the same version as this package?
+    if diff -I '^our \$lcov_version' -I '^\.TH ' -I '^#!' $SOURCE $TARGET >/dev/null; then
+      rm -f $TARGET
+    else
+      echo WARNING: Skipping uninstall for $TARGET - versions differ! >&2
+    fi
+  else
+    echo WARNING: Skipping uninstall for $TARGET - not installed! >&2
+  fi
+}
+
+
+# Call sub routine
+if $UNINSTALL ; then
+  do_uninstall $SOURCE $TARGET
+else
+  do_install $SOURCE $TARGET "$*"
+fi
+
+exit 0
diff --git a/ThirdParty/lcov/bin/lcov b/ThirdParty/lcov/bin/lcov
new file mode 100755
index 0000000000000000000000000000000000000000..33c9f4d16e718f2f76e83d090b2ed2beeec4435f
--- /dev/null
+++ b/ThirdParty/lcov/bin/lcov
@@ -0,0 +1,4329 @@
+#!/usr/bin/env perl
+#
+#   Copyright (c) International Business Machines  Corp., 2002,2012
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.                 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# lcov
+#
+#   This is a wrapper script which provides a single interface for accessing
+#   LCOV coverage data.
+#
+#
+# History:
+#   2002-08-29 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#                         IBM Lab Boeblingen
+#   2002-09-05 / Peter Oberparleiter: implemented --kernel-directory +
+#                multiple directories
+#   2002-10-16 / Peter Oberparleiter: implemented --add-tracefile option
+#   2002-10-17 / Peter Oberparleiter: implemented --extract option
+#   2002-11-04 / Peter Oberparleiter: implemented --list option
+#   2003-03-07 / Paul Larson: Changed to make it work with the latest gcov 
+#                kernel patch.  This will break it with older gcov-kernel
+#                patches unless you change the value of $gcovmod in this script
+#   2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
+#                when trying to combine .info files containing data without
+#                a test name
+#   2003-04-10 / Peter Oberparleiter: extended Paul's change so that LCOV
+#                works both with the new and the old gcov-kernel patch
+#   2003-04-10 / Peter Oberparleiter: added $gcov_dir constant in anticipation
+#                of a possible move of the gcov kernel directory to another
+#                file system in a future version of the gcov-kernel patch
+#   2003-04-15 / Paul Larson: make info write to STDERR, not STDOUT
+#   2003-04-15 / Paul Larson: added --remove option
+#   2003-04-30 / Peter Oberparleiter: renamed --reset to --zerocounters
+#                to remove naming ambiguity with --remove
+#   2003-04-30 / Peter Oberparleiter: adjusted help text to include --remove
+#   2003-06-27 / Peter Oberparleiter: implemented --diff
+#   2003-07-03 / Peter Oberparleiter: added line checksum support, added
+#                --no-checksum
+#   2003-12-11 / Laurent Deniel: added --follow option
+#   2004-03-29 / Peter Oberparleiter: modified --diff option to better cope with
+#                ambiguous patch file entries, modified --capture option to use
+#                modprobe before insmod (needed for 2.6)
+#   2004-03-30 / Peter Oberparleiter: added --path option
+#   2004-08-09 / Peter Oberparleiter: added configuration file support
+#   2008-08-13 / Peter Oberparleiter: added function coverage support
+#
+
+use strict;
+use warnings;
+use File::Basename;
+use File::Path;
+use File::Find;
+use File::Temp qw /tempdir/;
+use File::Spec::Functions qw /abs2rel canonpath catdir catfile catpath
+			      file_name_is_absolute rootdir splitdir splitpath/;
+use Getopt::Long;
+use Cwd qw /abs_path getcwd/;
+
+
+# Global constants
+our $tool_dir		= abs_path(dirname($0));
+our $lcov_version	= "LCOV version 1.14";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name		= basename($0);
+
+# Directory containing gcov kernel files
+our $gcov_dir;
+
+# Where to create temporary directories
+our $tmp_dir;
+
+# Internal constants
+our $GKV_PROC = 0;	# gcov-kernel data in /proc via external patch
+our $GKV_SYS = 1;	# gcov-kernel data in /sys via vanilla 2.6.31+
+our @GKV_NAME = ( "external", "upstream" );
+our $pkg_gkv_file = ".gcov_kernel_version";
+our $pkg_build_file = ".build_directory";
+
+# Branch data combination types
+our $BR_SUB = 0;
+our $BR_ADD = 1;
+
+# Prototypes
+sub print_usage(*);
+sub check_options();
+sub userspace_reset();
+sub userspace_capture();
+sub kernel_reset();
+sub kernel_capture();
+sub kernel_capture_initial();
+sub package_capture();
+sub add_traces();
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$$$$$$;$$$$$$);
+sub add_counts($$);
+sub merge_checksums($$$);
+sub combine_info_entries($$$);
+sub combine_info_files($$);
+sub write_info_file(*$);
+sub extract();
+sub remove();
+sub list();
+sub get_common_filename($$);
+sub read_diff($);
+sub diff();
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+sub info(@);
+sub create_temp_dir();
+sub transform_pattern($);
+sub warn_handler($);
+sub die_handler($);
+sub abort_handler($);
+sub temp_cleanup();
+sub setup_gkv();
+sub get_overall_line($$$$);
+sub print_overall_rate($$$$$$$$$);
+sub lcov_geninfo(@);
+sub create_package($$$;$);
+sub get_func_found_and_hit($);
+sub summary();
+sub rate($$;$$$);
+
+# Global variables & initialization
+our @directory;		# Specifies where to get coverage data from
+our @kernel_directory;	# If set, captures only from specified kernel subdirs
+our @add_tracefile;	# If set, reads in and combines all files in list
+our $list;		# If set, list contents of tracefile
+our $extract;		# If set, extracts parts of tracefile
+our $remove;		# If set, removes parts of tracefile
+our $diff;		# If set, modifies tracefile according to diff
+our $reset;		# If set, reset all coverage data to zero
+our $capture;		# If set, capture data
+our $output_filename;	# Name for file to write coverage data to
+our $test_name = "";	# Test case name
+our $quiet = "";	# If set, suppress information messages
+our $help;		# Help option flag
+our $version;		# Version option flag
+our $convert_filenames;	# If set, convert filenames when applying diff
+our $strip;		# If set, strip leading directories when applying diff
+our $temp_dir_name;	# Name of temporary directory
+our $cwd = `pwd`;	# Current working directory
+our $data_stdout;	# If set, indicates that data is written to stdout
+our $follow;		# If set, indicates that find shall follow links
+our $diff_path = "";	# Path removed from tracefile when applying diff
+our $base_directory;	# Base directory (cwd of gcc during compilation)
+our $checksum;		# If set, calculate a checksum for each line
+our $no_checksum;	# If set, don't calculate a checksum for each line
+our $compat_libtool;	# If set, indicates that libtool mode is to be enabled
+our $no_compat_libtool;	# If set, indicates that libtool mode is to be disabled
+our $gcov_tool;
+our @opt_ignore_errors;
+our $initial;
+our @include_patterns; # List of source file patterns to include
+our @exclude_patterns; # List of source file patterns to exclude
+our $no_recursion = 0;
+our $to_package;
+our $from_package;
+our $maxdepth;
+our $no_markers;
+our $config;		# Configuration file contents
+chomp($cwd);
+our @temp_dirs;
+our $gcov_gkv;		# gcov kernel support version found on machine
+our $opt_derive_func_data;
+our $opt_debug;
+our $opt_list_full_path;
+our $opt_no_list_full_path;
+our $opt_list_width = 80;
+our $opt_list_truncate_max = 20;
+our $opt_external;
+our $opt_no_external;
+our $opt_config_file;
+our %opt_rc;
+our @opt_summary;
+our $opt_compat;
+our $ln_overall_found;
+our $ln_overall_hit;
+our $fn_overall_found;
+our $fn_overall_hit;
+our $br_overall_found;
+our $br_overall_hit;
+our $func_coverage = 1;
+our $br_coverage = 0;
+
+
+#
+# Code entry point
+#
+
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+$SIG{'INT'} = \&abort_handler;
+$SIG{'QUIT'} = \&abort_handler;
+
+# Check command line for a configuration file name
+Getopt::Long::Configure("pass_through", "no_auto_abbrev");
+GetOptions("config-file=s" => \$opt_config_file,
+	   "rc=s%" => \%opt_rc);
+Getopt::Long::Configure("default");
+
+{
+	# Remove spaces around rc options
+	my %new_opt_rc;
+
+	while (my ($key, $value) = each(%opt_rc)) {
+		$key =~ s/^\s+|\s+$//g;
+		$value =~ s/^\s+|\s+$//g;
+
+		$new_opt_rc{$key} = $value;
+	}
+	%opt_rc = %new_opt_rc;
+}
+
+# Read configuration file if available
+if (defined($opt_config_file)) {
+	$config = read_config($opt_config_file);
+} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
+{
+	$config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+	$config = read_config("/etc/lcovrc");
+} elsif (-r "/usr/local/etc/lcovrc")
+{
+	$config = read_config("/usr/local/etc/lcovrc");
+}
+
+if ($config || %opt_rc)
+{
+	# Copy configuration file and --rc values to variables
+	apply_config({
+		"lcov_gcov_dir"		=> \$gcov_dir,
+		"lcov_tmp_dir"		=> \$tmp_dir,
+		"lcov_list_full_path"	=> \$opt_list_full_path,
+		"lcov_list_width"	=> \$opt_list_width,
+		"lcov_list_truncate_max"=> \$opt_list_truncate_max,
+		"lcov_branch_coverage"	=> \$br_coverage,
+		"lcov_function_coverage"=> \$func_coverage,
+	});
+}
+
+# Parse command line options
+if (!GetOptions("directory|d|di=s" => \@directory,
+		"add-tracefile|a=s" => \@add_tracefile,
+		"list|l=s" => \$list,
+		"kernel-directory|k=s" => \@kernel_directory,
+		"extract|e=s" => \$extract,
+		"remove|r=s" => \$remove,
+		"diff=s" => \$diff,
+		"convert-filenames" => \$convert_filenames,
+		"strip=i" => \$strip,
+		"capture|c" => \$capture,
+		"output-file|o=s" => \$output_filename,
+		"test-name|t=s" => \$test_name,
+		"zerocounters|z" => \$reset,
+		"quiet|q" => \$quiet,
+		"help|h|?" => \$help,
+		"version|v" => \$version,
+		"follow|f" => \$follow,
+		"path=s" => \$diff_path,
+		"base-directory|b=s" => \$base_directory,
+		"checksum" => \$checksum,
+		"no-checksum" => \$no_checksum,
+		"compat-libtool" => \$compat_libtool,
+		"no-compat-libtool" => \$no_compat_libtool,
+		"gcov-tool=s" => \$gcov_tool,
+		"ignore-errors=s" => \@opt_ignore_errors,
+		"initial|i" => \$initial,
+		"include=s" => \@include_patterns,
+		"exclude=s" => \@exclude_patterns,
+		"no-recursion" => \$no_recursion,
+		"to-package=s" => \$to_package,
+		"from-package=s" => \$from_package,
+		"no-markers" => \$no_markers,
+		"derive-func-data" => \$opt_derive_func_data,
+		"debug" => \$opt_debug,
+		"list-full-path" => \$opt_list_full_path,
+		"no-list-full-path" => \$opt_no_list_full_path,
+		"external" => \$opt_external,
+		"no-external" => \$opt_no_external,
+		"summary=s" => \@opt_summary,
+		"compat=s" => \$opt_compat,
+		"config-file=s" => \$opt_config_file,
+		"rc=s%" => \%opt_rc,
+		))
+{
+	print(STDERR "Use $tool_name --help to get usage information\n");
+	exit(1);
+}
+else
+{
+	# Merge options
+	if (defined($no_checksum))
+	{
+		$checksum = ($no_checksum ? 0 : 1);
+		$no_checksum = undef;
+	}
+
+	if (defined($no_compat_libtool))
+	{
+		$compat_libtool = ($no_compat_libtool ? 0 : 1);
+		$no_compat_libtool = undef;
+	}
+
+	if (defined($opt_no_list_full_path))
+	{
+		$opt_list_full_path = ($opt_no_list_full_path ? 0 : 1);
+		$opt_no_list_full_path = undef;
+	}
+
+	if (defined($opt_no_external)) {
+		$opt_external = 0;
+		$opt_no_external = undef;
+	}
+}
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print("$tool_name: $lcov_version\n");
+	exit(0);
+}
+
+# Check list width option
+if ($opt_list_width <= 40) {
+	die("ERROR: lcov_list_width parameter out of range (needs to be ".
+	    "larger than 40)\n");
+}
+
+# Normalize --path text
+$diff_path =~ s/\/$//;
+
+if ($follow)
+{
+	$follow = "-follow";
+}
+else
+{
+	$follow = "";
+}
+
+if ($no_recursion)
+{
+	$maxdepth = "-maxdepth 1";
+}
+else
+{
+	$maxdepth = "";
+}
+
+# Check for valid options
+check_options();
+
+# Only --extract, --remove and --diff allow unnamed parameters
+if (@ARGV && !($extract || $remove || $diff || @opt_summary))
+{
+	die("Extra parameter found: '".join(" ", @ARGV)."'\n".
+	    "Use $tool_name --help to get usage information\n");
+}
+
+# Check for output filename
+$data_stdout = !($output_filename && ($output_filename ne "-"));
+
+if ($capture)
+{
+	if ($data_stdout)
+	{
+		# Option that tells geninfo to write to stdout
+		$output_filename = "-";
+	}
+}
+
+# Determine kernel directory for gcov data
+if (!$from_package && !@directory && ($capture || $reset)) {
+	($gcov_gkv, $gcov_dir) = setup_gkv();
+}
+
+# Check for requested functionality
+if ($reset)
+{
+	$data_stdout = 0;
+	# Differentiate between user space and kernel reset
+	if (@directory)
+	{
+		userspace_reset();
+	}
+	else
+	{
+		kernel_reset();
+	}
+}
+elsif ($capture)
+{
+	# Capture source can be user space, kernel or package
+	if ($from_package) {
+		package_capture();
+	} elsif (@directory) {
+		userspace_capture();
+	} else {
+		if ($initial) {
+			if (defined($to_package)) {
+				die("ERROR: --initial cannot be used together ".
+				    "with --to-package\n");
+			}
+			kernel_capture_initial();
+		} else {
+			kernel_capture();
+		}
+	}
+}
+elsif (@add_tracefile)
+{
+	($ln_overall_found, $ln_overall_hit,
+	 $fn_overall_found, $fn_overall_hit,
+	 $br_overall_found, $br_overall_hit) = add_traces();
+}
+elsif ($remove)
+{
+	($ln_overall_found, $ln_overall_hit,
+	 $fn_overall_found, $fn_overall_hit,
+	 $br_overall_found, $br_overall_hit) = remove();
+}
+elsif ($extract)
+{
+	($ln_overall_found, $ln_overall_hit,
+	 $fn_overall_found, $fn_overall_hit,
+	 $br_overall_found, $br_overall_hit) = extract();
+}
+elsif ($list)
+{
+	$data_stdout = 0;
+	list();
+}
+elsif ($diff)
+{
+	if (scalar(@ARGV) != 1)
+	{
+		die("ERROR: option --diff requires one additional argument!\n".
+		    "Use $tool_name --help to get usage information\n");
+	}
+	($ln_overall_found, $ln_overall_hit,
+	 $fn_overall_found, $fn_overall_hit,
+	 $br_overall_found, $br_overall_hit) = diff();
+}
+elsif (@opt_summary)
+{
+	$data_stdout = 0;
+	($ln_overall_found, $ln_overall_hit,
+	 $fn_overall_found, $fn_overall_hit,
+	 $br_overall_found, $br_overall_hit) = summary();
+}
+
+temp_cleanup();
+
+if (defined($ln_overall_found)) {
+	print_overall_rate(1, $ln_overall_found, $ln_overall_hit,
+			   1, $fn_overall_found, $fn_overall_hit,
+			   1, $br_overall_found, $br_overall_hit);
+} else {
+	info("Done.\n") if (!$list && !$capture);
+}
+exit(0);
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+
+	print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS]
+
+Use lcov to collect coverage data from either the currently running Linux
+kernel or from a user space application. Specify the --directory option to
+get coverage data for a user space program.
+
+Misc:
+  -h, --help                      Print this help, then exit
+  -v, --version                   Print version number, then exit
+  -q, --quiet                     Do not print progress messages
+
+Operation:
+  -z, --zerocounters              Reset all execution counts to zero
+  -c, --capture                   Capture coverage data
+  -a, --add-tracefile FILE        Add contents of tracefiles
+  -e, --extract FILE PATTERN      Extract files matching PATTERN from FILE
+  -r, --remove FILE PATTERN       Remove files matching PATTERN from FILE
+  -l, --list FILE                 List contents of tracefile FILE
+      --diff FILE DIFF            Transform tracefile FILE according to DIFF
+      --summary FILE              Show summary coverage data for tracefiles
+
+Options:
+  -i, --initial                   Capture initial zero coverage data
+  -t, --test-name NAME            Specify test name to be stored with data
+  -o, --output-file FILENAME      Write data to FILENAME instead of stdout
+  -d, --directory DIR             Use .da files in DIR instead of kernel
+  -f, --follow                    Follow links when searching .da files
+  -k, --kernel-directory KDIR     Capture kernel coverage data only from KDIR
+  -b, --base-directory DIR        Use DIR as base directory for relative paths
+      --convert-filenames         Convert filenames when applying diff
+      --strip DEPTH               Strip initial DEPTH directory levels in diff
+      --path PATH                 Strip PATH from tracefile when applying diff
+      --(no-)checksum             Enable (disable) line checksumming
+      --(no-)compat-libtool       Enable (disable) libtool compatibility mode
+      --gcov-tool TOOL            Specify gcov tool location
+      --ignore-errors ERRORS      Continue after ERRORS (gcov, source, graph)
+      --no-recursion              Exclude subdirectories from processing
+      --to-package FILENAME       Store unprocessed coverage data in FILENAME
+      --from-package FILENAME     Capture from unprocessed data in FILENAME
+      --no-markers                Ignore exclusion markers in source code
+      --derive-func-data          Generate function data from line data
+      --list-full-path            Print full path during a list operation
+      --(no-)external             Include (ignore) data for external files
+      --config-file FILENAME      Specify configuration file location
+      --rc SETTING=VALUE          Override configuration file setting
+      --compat MODE=on|off|auto   Set compat MODE (libtool, hammer, split_crc)
+      --include PATTERN           Include files matching PATTERN
+      --exclude PATTERN           Exclude files matching PATTERN
+
+For more information see: $lcov_url
+END_OF_USAGE
+	;
+}
+
+
+#
+# check_options()
+#
+# Check for valid combination of command line options. Die on error.
+#
+
+sub check_options()
+{
+	my $i = 0;
+
+	# Count occurrence of mutually exclusive options
+	$reset && $i++;
+	$capture && $i++;
+	@add_tracefile && $i++;
+	$extract && $i++;
+	$remove && $i++;
+	$list && $i++;
+	$diff && $i++;
+	@opt_summary && $i++;
+	
+	if ($i == 0)
+	{
+		die("Need one of options -z, -c, -a, -e, -r, -l, ".
+		    "--diff or --summary\n".
+		    "Use $tool_name --help to get usage information\n");
+	}
+	elsif ($i > 1)
+	{
+		die("ERROR: only one of -z, -c, -a, -e, -r, -l, ".
+		    "--diff or --summary allowed!\n".
+		    "Use $tool_name --help to get usage information\n");
+	}
+}
+
+
+#
+# userspace_reset()
+#
+# Reset coverage data found in DIRECTORY by deleting all contained .da files.
+#
+# Die on error.
+#
+
+sub userspace_reset()
+{
+	my $current_dir;
+	my @file_list;
+
+	foreach $current_dir (@directory)
+	{
+		info("Deleting all .da files in $current_dir".
+		     ($no_recursion?"\n":" and subdirectories\n"));
+		@file_list = `find "$current_dir" $maxdepth $follow -name \\*\\.da -type f -o -name \\*\\.gcda -type f 2>/dev/null`;
+		chomp(@file_list);
+		foreach (@file_list)
+		{
+			unlink($_) or die("ERROR: cannot remove file $_!\n");
+		}
+	}
+}
+
+
+#
+# userspace_capture()
+#
+# Capture coverage data found in DIRECTORY and write it to a package (if
+# TO_PACKAGE specified) or to OUTPUT_FILENAME or STDOUT.
+#
+# Die on error.
+#
+
+sub userspace_capture()
+{
+	my $dir;
+	my $build;
+
+	if (!defined($to_package)) {
+		lcov_geninfo(@directory);
+		return;
+	}
+	if (scalar(@directory) != 1) {
+		die("ERROR: -d may be specified only once with --to-package\n");
+	}
+	$dir = $directory[0];
+	if (defined($base_directory)) {
+		$build = $base_directory;
+	} else {
+		$build = $dir;
+	}
+	create_package($to_package, $dir, $build);
+}
+
+
+#
+# kernel_reset()
+#
+# Reset kernel coverage.
+#
+# Die on error.
+#
+
+sub kernel_reset()
+{
+	local *HANDLE;
+	my $reset_file;
+
+	info("Resetting kernel execution counters\n");
+	if (-e "$gcov_dir/vmlinux") {
+		$reset_file = "$gcov_dir/vmlinux";
+	} elsif (-e "$gcov_dir/reset") {
+		$reset_file = "$gcov_dir/reset";
+	} else {
+		die("ERROR: no reset control found in $gcov_dir\n");
+	}
+	open(HANDLE, ">", $reset_file) or
+		die("ERROR: cannot write to $reset_file!\n");
+	print(HANDLE "0");
+	close(HANDLE);
+}
+
+
+#
+# lcov_copy_single(from, to)
+# 
+# Copy single regular file FROM to TO without checking its size. This is
+# required to work with special files generated by the kernel
+# seq_file-interface.
+#
+#
+sub lcov_copy_single($$)
+{
+	my ($from, $to) = @_;
+	my $content;
+	local $/;
+	local *HANDLE;
+
+	open(HANDLE, "<", $from) or die("ERROR: cannot read $from: $!\n");
+	$content = <HANDLE>;
+	close(HANDLE);
+	open(HANDLE, ">", $to) or die("ERROR: cannot write $from: $!\n");
+	if (defined($content)) {
+		print(HANDLE $content);
+	}
+	close(HANDLE);
+}
+
+#
+# lcov_find(dir, function, data[, extension, ...)])
+#
+# Search DIR for files and directories whose name matches PATTERN and run
+# FUNCTION for each match. If not pattern is specified, match all names.
+#
+# FUNCTION has the following prototype:
+#   function(dir, relative_name, data)
+#
+# Where:
+#   dir: the base directory for this search
+#   relative_name: the name relative to the base directory of this entry
+#   data: the DATA variable passed to lcov_find
+#
+sub lcov_find($$$;@)
+{
+	my ($dir, $fn, $data, @pattern) = @_;
+	my $result;
+	my $_fn = sub {
+		my $filename = $File::Find::name;
+
+		if (defined($result)) {
+			return;
+		}		
+		$filename = abs2rel($filename, $dir);
+		foreach (@pattern) {
+			if ($filename =~ /$_/) {
+				goto ok;
+			}
+		}
+		return;
+	ok:
+		$result = &$fn($dir, $filename, $data);
+	};
+	if (scalar(@pattern) == 0) {
+		@pattern = ".*";
+	}
+	find( { wanted => $_fn, no_chdir => 1 }, $dir);
+
+	return $result;
+}
+
+#
+# lcov_copy_fn(from, rel, to)
+#
+# Copy directories, files and links from/rel to to/rel.
+#
+
+sub lcov_copy_fn($$$)
+{
+	my ($from, $rel, $to) = @_;
+	my $absfrom = canonpath(catfile($from, $rel));
+	my $absto = canonpath(catfile($to, $rel));
+
+	if (-d) {
+		if (! -d $absto) {
+			mkpath($absto) or
+				die("ERROR: cannot create directory $absto\n");
+			chmod(0700, $absto);
+		}
+	} elsif (-l) {
+		# Copy symbolic link
+		my $link = readlink($absfrom);
+
+		if (!defined($link)) {
+			die("ERROR: cannot read link $absfrom: $!\n");
+		}
+		symlink($link, $absto) or
+			die("ERROR: cannot create link $absto: $!\n");
+	} else {
+		lcov_copy_single($absfrom, $absto);
+		chmod(0600, $absto);
+	}
+	return undef;
+}
+
+#
+# lcov_copy(from, to, subdirs)
+# 
+# Copy all specified SUBDIRS and files from directory FROM to directory TO. For
+# regular files, copy file contents without checking its size. This is required
+# to work with seq_file-generated files.
+#
+
+sub lcov_copy($$;@)
+{
+	my ($from, $to, @subdirs) = @_;
+	my @pattern;
+
+	foreach (@subdirs) {
+		push(@pattern, "^$_");
+	}
+	lcov_find($from, \&lcov_copy_fn, $to, @pattern);
+}
+
+#
+# lcov_geninfo(directory)
+#
+# Call geninfo for the specified directory and with the parameters specified
+# at the command line.
+#
+
+sub lcov_geninfo(@)
+{
+	my (@dir) = @_;
+	my @param;
+
+	# Capture data
+	info("Capturing coverage data from ".join(" ", @dir)."\n");
+	@param = ("$tool_dir/geninfo", @dir);
+	if ($output_filename)
+	{
+		@param = (@param, "--output-filename", $output_filename);
+	}
+	if ($test_name)
+	{
+		@param = (@param, "--test-name", $test_name);
+	}
+	if ($follow)
+	{
+		@param = (@param, "--follow");
+	}
+	if ($quiet)
+	{
+		@param = (@param, "--quiet");
+	}
+	if (defined($checksum))
+	{
+		if ($checksum)
+		{
+			@param = (@param, "--checksum");
+		}
+		else
+		{
+			@param = (@param, "--no-checksum");
+		}
+	}
+	if ($base_directory)
+	{
+		@param = (@param, "--base-directory", $base_directory);
+	}
+	if ($no_compat_libtool)
+	{
+		@param = (@param, "--no-compat-libtool");
+	}
+	elsif ($compat_libtool)
+	{
+		@param = (@param, "--compat-libtool");
+	}
+	if ($gcov_tool)
+	{
+		@param = (@param, "--gcov-tool", $gcov_tool);
+	}
+	foreach (@opt_ignore_errors) {
+		@param = (@param, "--ignore-errors", $_);
+	}
+	if ($no_recursion) {
+		@param = (@param, "--no-recursion");
+	}
+	if ($initial)
+	{
+		@param = (@param, "--initial");
+	}
+	if ($no_markers)
+	{
+		@param = (@param, "--no-markers");
+	}
+	if ($opt_derive_func_data)
+	{
+		@param = (@param, "--derive-func-data");
+	}
+	if ($opt_debug)
+	{
+		@param = (@param, "--debug");
+	}
+	if (defined($opt_external) && $opt_external)
+	{
+		@param = (@param, "--external");
+	}
+	if (defined($opt_external) && !$opt_external)
+	{
+		@param = (@param, "--no-external");
+	}
+	if (defined($opt_compat)) {
+		@param = (@param, "--compat", $opt_compat);
+	}
+	if (%opt_rc) {
+		foreach my $key (keys(%opt_rc)) {
+			@param = (@param, "--rc", "$key=".$opt_rc{$key});
+		}
+	}
+	if (defined($opt_config_file)) {
+		@param = (@param, "--config-file", $opt_config_file);
+	}
+	foreach (@include_patterns) {
+		@param = (@param, "--include", $_);
+	}
+	foreach (@exclude_patterns) {
+		@param = (@param, "--exclude", $_);
+	}
+
+	system(@param) and exit($? >> 8);
+}
+
+#
+# read_file(filename)
+#
+# Return the contents of the file defined by filename.
+#
+
+sub read_file($)
+{
+	my ($filename) = @_;
+	my $content;
+	local $\;
+	local *HANDLE;
+
+	open(HANDLE, "<", $filename) || return undef;
+	$content = <HANDLE>;
+	close(HANDLE);
+
+	return $content;
+}
+
+#
+# get_package(package_file)
+#
+# Unpack unprocessed coverage data files from package_file to a temporary
+# directory and return directory name, build directory and gcov kernel version
+# as found in package.
+#
+
+sub get_package($)
+{
+	my ($file) = @_;
+	my $dir = create_temp_dir();
+	my $gkv;
+	my $build;
+	my $cwd = getcwd();
+	my $count;
+	local *HANDLE;
+
+	info("Reading package $file:\n");
+	$file = abs_path($file);
+	chdir($dir);
+	open(HANDLE, "-|", "tar xvfz '$file' 2>/dev/null")
+		or die("ERROR: could not process package $file\n");
+	$count = 0;
+	while (<HANDLE>) {
+		if (/\.da$/ || /\.gcda$/) {
+			$count++;
+		}
+	}
+	close(HANDLE);
+	if ($count == 0) {
+		die("ERROR: no data file found in package $file\n");
+	}
+	info("  data directory .......: $dir\n");
+	$build = read_file("$dir/$pkg_build_file");
+	if (defined($build)) {
+		info("  build directory ......: $build\n");
+	}
+	$gkv = read_file("$dir/$pkg_gkv_file");
+	if (defined($gkv)) {
+		$gkv = int($gkv);
+		if ($gkv != $GKV_PROC && $gkv != $GKV_SYS) {
+			die("ERROR: unsupported gcov kernel version found ".
+			    "($gkv)\n");
+		}
+		info("  content type .........: kernel data\n");
+		info("  gcov kernel version ..: %s\n", $GKV_NAME[$gkv]);
+	} else {
+		info("  content type .........: application data\n");
+	}
+	info("  data files ...........: $count\n");
+	chdir($cwd);
+
+	return ($dir, $build, $gkv);
+}
+
+#
+# write_file(filename, $content)
+#
+# Create a file named filename and write the specified content to it.
+#
+
+sub write_file($$)
+{
+	my ($filename, $content) = @_;
+	local *HANDLE;
+
+	open(HANDLE, ">", $filename) || return 0;
+	print(HANDLE $content);
+	close(HANDLE) || return 0;
+
+	return 1;
+}
+
+# count_package_data(filename)
+#
+# Count the number of coverage data files in the specified package file.
+#
+
+sub count_package_data($)
+{
+	my ($filename) = @_;
+	local *HANDLE;
+	my $count = 0;
+
+	open(HANDLE, "-|", "tar tfz '$filename'") or return undef;
+	while (<HANDLE>) {
+		if (/\.da$/ || /\.gcda$/) {
+			$count++;
+		}
+	}
+	close(HANDLE);
+	return $count;
+}
+
+#
+# create_package(package_file, source_directory, build_directory[,
+# 		 kernel_gcov_version])
+#
+# Store unprocessed coverage data files from source_directory to package_file.
+#
+
+sub create_package($$$;$)
+{
+	my ($file, $dir, $build, $gkv) = @_;
+	my $cwd = getcwd();
+
+	# Check for availability of tar tool first
+	system("tar --help > /dev/null")
+		and die("ERROR: tar command not available\n");
+
+	# Print information about the package
+	info("Creating package $file:\n");
+	info("  data directory .......: $dir\n");
+
+	# Handle build directory
+	if (defined($build)) {
+		info("  build directory ......: $build\n");
+		write_file("$dir/$pkg_build_file", $build)
+			or die("ERROR: could not write to ".
+			       "$dir/$pkg_build_file\n");
+	}
+
+	# Handle gcov kernel version data
+	if (defined($gkv)) {
+		info("  content type .........: kernel data\n");
+		info("  gcov kernel version ..: %s\n", $GKV_NAME[$gkv]);
+		write_file("$dir/$pkg_gkv_file", $gkv)
+			or die("ERROR: could not write to ".
+			       "$dir/$pkg_gkv_file\n");
+	} else {
+		info("  content type .........: application data\n");
+	}
+
+	# Create package
+	$file = abs_path($file);
+	chdir($dir);
+	system("tar cfz $file .")
+		and die("ERROR: could not create package $file\n");
+	chdir($cwd);
+
+	# Remove temporary files
+	unlink("$dir/$pkg_build_file");
+	unlink("$dir/$pkg_gkv_file");
+
+	# Show number of data files
+	if (!$quiet) {
+		my $count = count_package_data($file);
+
+		if (defined($count)) {
+			info("  data files ...........: $count\n");
+		}
+	}
+}
+
+sub find_link_fn($$$)
+{
+	my ($from, $rel, $filename) = @_;
+	my $absfile = catfile($from, $rel, $filename);
+
+	if (-l $absfile) {
+		return $absfile;
+	}
+	return undef;
+}
+
+#
+# get_base(dir)
+#
+# Return (BASE, OBJ), where
+#  - BASE: is the path to the kernel base directory relative to dir
+#  - OBJ: is the absolute path to the kernel build directory
+#
+
+sub get_base($)
+{
+	my ($dir) = @_;
+	my $marker = "kernel/gcov/base.gcno";
+	my $markerfile;
+	my $sys;
+	my $obj;
+	my $link;
+
+	$markerfile = lcov_find($dir, \&find_link_fn, $marker);
+	if (!defined($markerfile)) {
+		return (undef, undef);
+	}
+
+	# sys base is parent of parent of markerfile.
+	$sys = abs2rel(dirname(dirname(dirname($markerfile))), $dir);
+
+	# obj base is parent of parent of markerfile link target.
+	$link = readlink($markerfile);
+	if (!defined($link)) {
+		die("ERROR: could not read $markerfile\n");
+	}
+	$obj = dirname(dirname(dirname($link)));
+
+	return ($sys, $obj);
+}
+
+#
+# apply_base_dir(data_dir, base_dir, build_dir, @directories)
+#
+# Make entries in @directories relative to data_dir.
+#
+
+sub apply_base_dir($$$@)
+{
+	my ($data, $base, $build, @dirs) = @_;
+	my $dir;
+	my @result;
+
+	foreach $dir (@dirs) {
+		# Is directory path relative to data directory?
+		if (-d catdir($data, $dir)) {
+			push(@result, $dir);
+			next;
+		}
+		# Relative to the auto-detected base-directory?
+		if (defined($base)) {
+			if (-d catdir($data, $base, $dir)) {
+				push(@result, catdir($base, $dir));
+				next;
+			}
+		}
+		# Relative to the specified base-directory?
+		if (defined($base_directory)) {
+			if (file_name_is_absolute($base_directory)) {
+				$base = abs2rel($base_directory, rootdir());
+			} else {
+				$base = $base_directory;
+			}
+			if (-d catdir($data, $base, $dir)) {
+				push(@result, catdir($base, $dir));
+				next;
+			}
+		}
+		# Relative to the build directory?
+		if (defined($build)) {
+			if (file_name_is_absolute($build)) {
+				$base = abs2rel($build, rootdir());
+			} else {
+				$base = $build;
+			}
+			if (-d catdir($data, $base, $dir)) {
+				push(@result, catdir($base, $dir));
+				next;
+			}
+		}
+		die("ERROR: subdirectory $dir not found\n".
+		    "Please use -b to specify the correct directory\n");
+	}
+	return @result;
+}
+
+#
+# copy_gcov_dir(dir, [@subdirectories])
+#
+# Create a temporary directory and copy all or, if specified, only some
+# subdirectories from dir to that directory. Return the name of the temporary
+# directory.
+#
+
+sub copy_gcov_dir($;@)
+{
+	my ($data, @dirs) = @_;
+	my $tempdir = create_temp_dir();
+
+	info("Copying data to temporary directory $tempdir\n");
+	lcov_copy($data, $tempdir, @dirs);
+
+	return $tempdir;
+}
+
+#
+# kernel_capture_initial
+#
+# Capture initial kernel coverage data, i.e. create a coverage data file from
+# static graph files which contains zero coverage data for all instrumented
+# lines.
+#
+
+sub kernel_capture_initial()
+{
+	my $build;
+	my $source;
+	my @params;
+
+	if (defined($base_directory)) {
+		$build = $base_directory;
+		$source = "specified";
+	} else {
+		(undef, $build) = get_base($gcov_dir);
+		if (!defined($build)) {
+			die("ERROR: could not auto-detect build directory.\n".
+			    "Please use -b to specify the build directory\n");
+		}
+		$source = "auto-detected";
+	}
+	info("Using $build as kernel build directory ($source)\n");
+	# Build directory needs to be passed to geninfo
+	$base_directory = $build;
+	if (@kernel_directory) {
+		foreach my $dir (@kernel_directory) {
+			push(@params, "$build/$dir");
+		}
+	} else {
+		push(@params, $build);
+	}
+	lcov_geninfo(@params);
+}
+
+#
+# kernel_capture_from_dir(directory, gcov_kernel_version, build)
+#
+# Perform the actual kernel coverage capturing from the specified directory
+# assuming that the data was copied from the specified gcov kernel version.
+#
+
+sub kernel_capture_from_dir($$$)
+{
+	my ($dir, $gkv, $build) = @_;
+
+	# Create package or coverage file
+	if (defined($to_package)) {
+		create_package($to_package, $dir, $build, $gkv);
+	} else {
+		# Build directory needs to be passed to geninfo
+		$base_directory = $build;
+		lcov_geninfo($dir);
+	}
+}
+
+#
+# adjust_kernel_dir(dir, build)
+#
+# Adjust directories specified with -k so that they point to the directory
+# relative to DIR. Return the build directory if specified or the auto-
+# detected build-directory.
+#
+
+sub adjust_kernel_dir($$)
+{
+	my ($dir, $build) = @_;
+	my ($sys_base, $build_auto) = get_base($dir);
+
+	if (!defined($build)) {
+		$build = $build_auto;
+	}
+	if (!defined($build)) {
+		die("ERROR: could not auto-detect build directory.\n".
+		    "Please use -b to specify the build directory\n");
+	}
+	# Make @kernel_directory relative to sysfs base
+	if (@kernel_directory) {
+		@kernel_directory = apply_base_dir($dir, $sys_base, $build,
+						   @kernel_directory);
+	}
+	return $build;
+}
+
+sub kernel_capture()
+{
+	my $data_dir;
+	my $build = $base_directory;
+
+	if ($gcov_gkv == $GKV_SYS) {
+		$build = adjust_kernel_dir($gcov_dir, $build);
+	}
+	$data_dir = copy_gcov_dir($gcov_dir, @kernel_directory);
+	kernel_capture_from_dir($data_dir, $gcov_gkv, $build);
+}
+
+#
+# link_data_cb(datadir, rel, graphdir)
+#
+# Create symbolic link in GRAPDIR/REL pointing to DATADIR/REL.
+#
+
+sub link_data_cb($$$)
+{
+	my ($datadir, $rel, $graphdir) = @_;
+	my $absfrom = catfile($datadir, $rel);
+	my $absto = catfile($graphdir, $rel);
+	my $base;
+	my $dir;
+
+	if (-e $absto) {
+		die("ERROR: could not create symlink at $absto: ".
+		    "File already exists!\n");
+	}
+	if (-l $absto) {
+		# Broken link - possibly from an interrupted earlier run
+		unlink($absto);
+	}
+
+	# Check for graph file
+	$base = $absto;
+	$base =~ s/\.(gcda|da)$//;
+	if (! -e $base.".gcno" && ! -e $base.".bbg" && ! -e $base.".bb") {
+		die("ERROR: No graph file found for $absfrom in ".
+		    dirname($base)."!\n");
+	}
+
+	symlink($absfrom, $absto) or
+		die("ERROR: could not create symlink at $absto: $!\n");
+}
+
+#
+# unlink_data_cb(datadir, rel, graphdir)
+#
+# Remove symbolic link from GRAPHDIR/REL to DATADIR/REL.
+#
+
+sub unlink_data_cb($$$)
+{
+	my ($datadir, $rel, $graphdir) = @_;
+	my $absfrom = catfile($datadir, $rel);
+	my $absto = catfile($graphdir, $rel);
+	my $target;
+
+	return if (!-l $absto);
+	$target = readlink($absto);
+	return if (!defined($target) || $target ne $absfrom);
+
+	unlink($absto) or
+		warn("WARNING: could not remove symlink $absto: $!\n");
+}
+
+#
+# link_data(datadir, graphdir, create)
+#
+# If CREATE is non-zero, create symbolic links in GRAPHDIR for data files
+# found in DATADIR. Otherwise remove link in GRAPHDIR.
+#
+
+sub link_data($$$)
+{
+	my ($datadir, $graphdir, $create) = @_;
+
+	$datadir = abs_path($datadir);
+	$graphdir = abs_path($graphdir);
+	if ($create) {
+		lcov_find($datadir, \&link_data_cb, $graphdir, '\.gcda$',
+			  '\.da$');
+	} else {
+		lcov_find($datadir, \&unlink_data_cb, $graphdir, '\.gcda$',
+			  '\.da$');
+	}
+}
+
+#
+# find_graph_cb(datadir, rel, count_ref)
+#
+# Count number of files found.
+#
+
+sub find_graph_cb($$$)
+{
+	my ($dir, $rel, $count_ref) = @_;
+
+	($$count_ref)++;
+}
+
+#
+# find_graph(dir)
+#
+# Search DIR for a graph file. Return non-zero if one was found, zero otherwise.
+#
+
+sub find_graph($)
+{
+	my ($dir) = @_;
+	my $count = 0;
+
+	lcov_find($dir, \&find_graph_cb, \$count, '\.gcno$', '\.bb$', '\.bbg$');
+
+	return $count > 0 ? 1 : 0;
+}
+
+#
+# package_capture()
+#
+# Capture coverage data from a package of unprocessed coverage data files
+# as generated by lcov --to-package.
+#
+
+sub package_capture()
+{
+	my $dir;
+	my $build;
+	my $gkv;
+
+	($dir, $build, $gkv) = get_package($from_package);
+
+	# Check for build directory
+	if (defined($base_directory)) {
+		if (defined($build)) {
+			info("Using build directory specified by -b.\n");
+		}
+		$build = $base_directory;
+	}
+
+	# Do the actual capture
+	if (defined($gkv)) {
+		if ($gkv == $GKV_SYS) {
+			$build = adjust_kernel_dir($dir, $build);
+		}
+		if (@kernel_directory) {
+			$dir = copy_gcov_dir($dir, @kernel_directory);	
+		}
+		kernel_capture_from_dir($dir, $gkv, $build);
+	} else {
+		# Build directory needs to be passed to geninfo
+		$base_directory = $build;
+		if (find_graph($dir)) {
+			# Package contains graph files - collect from there
+			lcov_geninfo($dir);
+		} else {
+			# No graph files found, link data files next to
+			# graph files
+			link_data($dir, $base_directory, 1);
+			lcov_geninfo($base_directory);
+			link_data($dir, $base_directory, 0);
+		}
+	}
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+	if (!$quiet)
+	{
+		# Print info string
+		if (!$data_stdout)
+		{
+			printf(@_)
+		}
+		else
+		{
+			# Don't interfere with the .info output to STDOUT
+			printf(STDERR @_);
+		}
+	}
+}
+
+
+#
+# create_temp_dir()
+#
+# Create a temporary directory and return its path.
+#
+# Die on error.
+#
+
+sub create_temp_dir()
+{
+	my $dir;
+
+	if (defined($tmp_dir)) {
+		$dir = tempdir(DIR => $tmp_dir, CLEANUP => 1);
+	} else {
+		$dir = tempdir(CLEANUP => 1);
+	}
+	if (!defined($dir)) {
+		die("ERROR: cannot create temporary directory\n");
+	}
+	push(@temp_dirs, $dir);
+
+	return $dir;
+}
+
+sub compress_brcount($)
+{
+	my ($brcount) = @_;
+	my $db;
+
+	$db = brcount_to_db($brcount);
+	return db_to_brcount($db, $brcount);
+}
+
+sub get_br_found_and_hit($)
+{
+	my ($brcount) = @_;
+	my $db;
+
+	$db = brcount_to_db($brcount);
+
+	return brcount_db_get_found_and_hit($db);
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test"  -> \%testdata
+#        "sum"   -> \%sumcount
+#        "func"  -> \%funcdata
+#        "found" -> $lines_found (number of instrumented lines found in file)
+#	 "hit"   -> $lines_hit (number of executed lines in file)
+#        "f_found" -> $fn_found (number of instrumented functions found in file)
+#	 "f_hit"   -> $fn_hit (number of executed functions in file)
+#        "b_found" -> $br_found (number of instrumented branches found in file)
+#	 "b_hit"   -> $br_hit (number of executed branches in file)
+#        "check" -> \%checkdata
+#        "testfnc" -> \%testfncdata
+#        "sumfnc"  -> \%sumfnccount
+#        "testbr"  -> \%testbrdata
+#        "sumbr"   -> \%sumbrcount
+#
+# %testdata   : name of test affecting this file -> \%testcount
+# %testfncdata: name of test affecting this file -> \%testfnccount
+# %testbrdata:  name of test affecting this file -> \%testbrcount
+#
+# %testcount   : line number   -> execution count for a single test
+# %testfnccount: function name -> execution count for a single test
+# %testbrcount : line number   -> branch coverage data for a single test
+# %sumcount    : line number   -> execution count for all tests
+# %sumfnccount : function name -> execution count for all tests
+# %sumbrcount  : line number   -> branch coverage data for all tests
+# %funcdata    : function name -> line number
+# %checkdata   : line number   -> checksum of source code line
+# $brdata      : text "block,branch,taken:..."
+# 
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error.
+#
+
+sub read_info_file($)
+{
+	my $tracefile = $_[0];		# Name of tracefile
+	my %result;			# Resulting hash: file -> data
+	my $data;			# Data handle for current entry
+	my $testdata;			#       "             "
+	my $testcount;			#       "             "
+	my $sumcount;			#       "             "
+	my $funcdata;			#       "             "
+	my $checkdata;			#       "             "
+	my $testfncdata;
+	my $testfnccount;
+	my $sumfnccount;
+	my $testbrdata;
+	my $testbrcount;
+	my $sumbrcount;
+	my $line;			# Current line read from .info file
+	my $testname;			# Current test name
+	my $filename;			# Current filename
+	my $hitcount;			# Count for lines hit
+	my $count;			# Execution count of current line
+	my $negative;			# If set, warn about negative counts
+	my $changed_testname;		# If set, warn about changed testname
+	my $line_checksum;		# Checksum of current line
+	local *INFO_HANDLE;		# Filehandle for .info file
+
+	info("Reading tracefile $tracefile\n");
+
+	# Check if file exists and is readable
+	stat($_[0]);
+	if (!(-r _))
+	{
+		die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	# Check if this is really a plain file
+	if (!(-f _))
+	{
+		die("ERROR: not a plain file: $_[0]!\n");
+	}
+
+	# Check for .gz extension
+	if ($_[0] =~ /\.gz$/)
+	{
+		# Check for availability of GZIP tool
+		system_no_output(1, "gunzip" ,"-h")
+			and die("ERROR: gunzip command not available!\n");
+
+		# Check integrity of compressed file
+		system_no_output(1, "gunzip", "-t", $_[0])
+			and die("ERROR: integrity check failed for ".
+				"compressed file $_[0]!\n");
+
+		# Open compressed file
+		open(INFO_HANDLE, "-|", "gunzip -c '$_[0]'")
+			or die("ERROR: cannot start gunzip to decompress ".
+			       "file $_[0]!\n");
+	}
+	else
+	{
+		# Open decompressed file
+		open(INFO_HANDLE, "<", $_[0])
+			or die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	$testname = "";
+	while (<INFO_HANDLE>)
+	{
+		chomp($_);
+		$line = $_;
+
+		# Switch statement
+		foreach ($line)
+		{
+			/^TN:([^,]*)(,diff)?/ && do
+			{
+				# Test name information found
+				$testname = defined($1) ? $1 : "";
+				if ($testname =~ s/\W/_/g)
+				{
+					$changed_testname = 1;
+				}
+				$testname .= $2 if (defined($2));
+				last;
+			};
+
+			/^[SK]F:(.*)/ && do
+			{
+				# Filename information found
+				# Retrieve data for new entry
+				$filename = $1;
+
+				$data = $result{$filename};
+				($testdata, $sumcount, $funcdata, $checkdata,
+				 $testfncdata, $sumfnccount, $testbrdata,
+				 $sumbrcount) =
+					get_info_entry($data);
+
+				if (defined($testname))
+				{
+					$testcount = $testdata->{$testname};
+					$testfnccount = $testfncdata->{$testname};
+					$testbrcount = $testbrdata->{$testname};
+				}
+				else
+				{
+					$testcount = {};
+					$testfnccount = {};
+					$testbrcount = {};
+				}
+				last;
+			};
+
+			/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
+			{
+				# Fix negative counts
+				$count = $2 < 0 ? 0 : $2;
+				if ($2 < 0)
+				{
+					$negative = 1;
+				}
+				# Execution count found, add to structure
+				# Add summary counts
+				$sumcount->{$1} += $count;
+
+				# Add test-specific counts
+				if (defined($testname))
+				{
+					$testcount->{$1} += $count;
+				}
+
+				# Store line checksum if available
+				if (defined($3))
+				{
+					$line_checksum = substr($3, 1);
+
+					# Does it match a previous definition
+					if (defined($checkdata->{$1}) &&
+					    ($checkdata->{$1} ne
+					     $line_checksum))
+					{
+						die("ERROR: checksum mismatch ".
+						    "at $filename:$1\n");
+					}
+
+					$checkdata->{$1} = $line_checksum;
+				}
+				last;
+			};
+
+			/^FN:(\d+),([^,]+)/ && do
+			{
+				last if (!$func_coverage);
+
+				# Function data found, add to structure
+				$funcdata->{$2} = $1;
+
+				# Also initialize function call data
+				if (!defined($sumfnccount->{$2})) {
+					$sumfnccount->{$2} = 0;
+				}
+				if (defined($testname))
+				{
+					if (!defined($testfnccount->{$2})) {
+						$testfnccount->{$2} = 0;
+					}
+				}
+				last;
+			};
+
+			/^FNDA:(\d+),([^,]+)/ && do
+			{
+				last if (!$func_coverage);
+
+				# Function call count found, add to structure
+				# Add summary counts
+				$sumfnccount->{$2} += $1;
+
+				# Add test-specific counts
+				if (defined($testname))
+				{
+					$testfnccount->{$2} += $1;
+				}
+				last;
+			};
+
+			/^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do {
+				# Branch coverage data found
+				my ($line, $block, $branch, $taken) =
+				   ($1, $2, $3, $4);
+
+				last if (!$br_coverage);
+				$sumbrcount->{$line} .=
+					"$block,$branch,$taken:";
+
+				# Add test-specific counts
+				if (defined($testname)) {
+					$testbrcount->{$line} .=
+						"$block,$branch,$taken:";
+				}
+				last;
+			};
+
+			/^end_of_record/ && do
+			{
+				# Found end of section marker
+				if ($filename)
+				{
+					# Store current section data
+					if (defined($testname))
+					{
+						$testdata->{$testname} =
+							$testcount;
+						$testfncdata->{$testname} =
+							$testfnccount;
+						$testbrdata->{$testname} =
+							$testbrcount;
+					}	
+
+					set_info_entry($data, $testdata,
+						       $sumcount, $funcdata,
+						       $checkdata, $testfncdata,
+						       $sumfnccount,
+						       $testbrdata,
+						       $sumbrcount);
+					$result{$filename} = $data;
+					last;
+				}
+			};
+
+			# default
+			last;
+		}
+	}
+	close(INFO_HANDLE);
+
+	# Calculate hit and found values for lines and functions of each file
+	foreach $filename (keys(%result))
+	{
+		$data = $result{$filename};
+
+		($testdata, $sumcount, undef, undef, $testfncdata,
+		 $sumfnccount, $testbrdata, $sumbrcount) =
+			get_info_entry($data);
+
+		# Filter out empty files
+		if (scalar(keys(%{$sumcount})) == 0)
+		{
+			delete($result{$filename});
+			next;
+		}
+		# Filter out empty test cases
+		foreach $testname (keys(%{$testdata}))
+		{
+			if (!defined($testdata->{$testname}) ||
+			    scalar(keys(%{$testdata->{$testname}})) == 0)
+			{
+				delete($testdata->{$testname});
+				delete($testfncdata->{$testname});
+			}
+		}
+
+		$data->{"found"} = scalar(keys(%{$sumcount}));
+		$hitcount = 0;
+
+		foreach (keys(%{$sumcount}))
+		{
+			if ($sumcount->{$_} > 0) { $hitcount++; }
+		}
+
+		$data->{"hit"} = $hitcount;
+
+		# Get found/hit values for function call data
+		$data->{"f_found"} = scalar(keys(%{$sumfnccount}));
+		$hitcount = 0;
+
+		foreach (keys(%{$sumfnccount})) {
+			if ($sumfnccount->{$_} > 0) {
+				$hitcount++;
+			}
+		}
+		$data->{"f_hit"} = $hitcount;
+
+		# Combine branch data for the same branches
+		(undef, $data->{"b_found"}, $data->{"b_hit"}) =
+			compress_brcount($sumbrcount);
+		foreach $testname (keys(%{$testbrdata})) {
+			compress_brcount($testbrdata->{$testname});
+		}
+	}
+
+	if (scalar(keys(%result)) == 0)
+	{
+		die("ERROR: no valid records found in tracefile $tracefile\n");
+	}
+	if ($negative)
+	{
+		warn("WARNING: negative counts found in tracefile ".
+		     "$tracefile\n");
+	}
+	if ($changed_testname)
+	{
+		warn("WARNING: invalid characters removed from testname in ".
+		     "tracefile $tracefile\n");
+	}
+
+	return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
+#  ref, testfncdata hash ref, sumfnccount hash ref, testbrdata hash ref,
+#  sumbrcount hash ref, lines found, lines hit, functions found,
+#  functions hit, branches found, branches hit)
+#
+
+sub get_info_entry($)
+{
+	my $testdata_ref = $_[0]->{"test"};
+	my $sumcount_ref = $_[0]->{"sum"};
+	my $funcdata_ref = $_[0]->{"func"};
+	my $checkdata_ref = $_[0]->{"check"};
+	my $testfncdata = $_[0]->{"testfnc"};
+	my $sumfnccount = $_[0]->{"sumfnc"};
+	my $testbrdata = $_[0]->{"testbr"};
+	my $sumbrcount = $_[0]->{"sumbr"};
+	my $lines_found = $_[0]->{"found"};
+	my $lines_hit = $_[0]->{"hit"};
+	my $f_found = $_[0]->{"f_found"};
+	my $f_hit = $_[0]->{"f_hit"};
+	my $br_found = $_[0]->{"b_found"};
+	my $br_hit = $_[0]->{"b_hit"};
+
+	return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
+		$testfncdata, $sumfnccount, $testbrdata, $sumbrcount,
+		$lines_found, $lines_hit, $f_found, $f_hit,
+		$br_found, $br_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
+#                checkdata_ref, testfncdata_ref, sumfcncount_ref,
+#                testbrdata_ref, sumbrcount_ref[,lines_found,
+#                lines_hit, f_found, f_hit, $b_found, $b_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$$$$$$;$$$$$$)
+{
+	my $data_ref = $_[0];
+
+	$data_ref->{"test"} = $_[1];
+	$data_ref->{"sum"} = $_[2];
+	$data_ref->{"func"} = $_[3];
+	$data_ref->{"check"} = $_[4];
+	$data_ref->{"testfnc"} = $_[5];
+	$data_ref->{"sumfnc"} = $_[6];
+	$data_ref->{"testbr"} = $_[7];
+	$data_ref->{"sumbr"} = $_[8];
+
+	if (defined($_[9])) { $data_ref->{"found"} = $_[9]; }
+	if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; }
+	if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; }
+	if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; }
+	if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; }
+	if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; }
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+#   line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+	my $data1_ref = $_[0];	# Hash 1
+	my $data2_ref = $_[1];	# Hash 2
+	my %result;		# Resulting hash
+	my $line;		# Current line iteration scalar
+	my $data1_count;	# Count of line in hash1
+	my $data2_count;	# Count of line in hash2
+	my $found = 0;		# Total number of lines found
+	my $hit = 0;		# Number of lines with a count > 0
+
+	foreach $line (keys(%$data1_ref))
+	{
+		$data1_count = $data1_ref->{$line};
+		$data2_count = $data2_ref->{$line};
+
+		# Add counts if present in both hashes
+		if (defined($data2_count)) { $data1_count += $data2_count; }
+
+		# Store sum in %result
+		$result{$line} = $data1_count;
+
+		$found++;
+		if ($data1_count > 0) { $hit++; }
+	}
+
+	# Add lines unique to data2_ref
+	foreach $line (keys(%$data2_ref))
+	{
+		# Skip lines already in data1_ref
+		if (defined($data1_ref->{$line})) { next; }
+
+		# Copy count from data2_ref
+		$result{$line} = $data2_ref->{$line};
+
+		$found++;
+		if ($result{$line} > 0) { $hit++; }
+	}
+
+	return (\%result, $found, $hit);
+}
+
+
+#
+# merge_checksums(ref1, ref2, filename)
+#
+# REF1 and REF2 are references to hashes containing a mapping
+#
+#   line number -> checksum
+#
+# Merge checksum lists defined in REF1 and REF2 and return reference to
+# resulting hash. Die if a checksum for a line is defined in both hashes
+# but does not match.
+#
+
+sub merge_checksums($$$)
+{
+	my $ref1 = $_[0];
+	my $ref2 = $_[1];
+	my $filename = $_[2];
+	my %result;
+	my $line;
+
+	foreach $line (keys(%{$ref1}))
+	{
+		if (defined($ref2->{$line}) &&
+		    ($ref1->{$line} ne $ref2->{$line}))
+		{
+			die("ERROR: checksum mismatch at $filename:$line\n");
+		}
+		$result{$line} = $ref1->{$line};
+	}
+
+	foreach $line (keys(%{$ref2}))
+	{
+		$result{$line} = $ref2->{$line};
+	}
+
+	return \%result;
+}
+
+
+#
+# merge_func_data(funcdata1, funcdata2, filename)
+#
+
+sub merge_func_data($$$)
+{
+	my ($funcdata1, $funcdata2, $filename) = @_;
+	my %result;
+	my $func;
+
+	if (defined($funcdata1)) {
+		%result = %{$funcdata1};
+	}
+
+	foreach $func (keys(%{$funcdata2})) {
+		my $line1 = $result{$func};
+		my $line2 = $funcdata2->{$func};
+
+		if (defined($line1) && ($line1 != $line2)) {
+			warn("WARNING: function data mismatch at ".
+			     "$filename:$line2\n");
+			next;
+		}
+		$result{$func} = $line2;
+	}
+
+	return \%result;
+}
+
+
+#
+# add_fnccount(fnccount1, fnccount2)
+#
+# Add function call count data. Return list (fnccount_added, f_found, f_hit)
+#
+
+sub add_fnccount($$)
+{
+	my ($fnccount1, $fnccount2) = @_;
+	my %result;
+	my $f_found;
+	my $f_hit;
+	my $function;
+
+	if (defined($fnccount1)) {
+		%result = %{$fnccount1};
+	}
+	foreach $function (keys(%{$fnccount2})) {
+		$result{$function} += $fnccount2->{$function};
+	}
+	$f_found = scalar(keys(%result));
+	$f_hit = 0;
+	foreach $function (keys(%result)) {
+		if ($result{$function} > 0) {
+			$f_hit++;
+		}
+	}
+
+	return (\%result, $f_found, $f_hit);
+}
+
+#
+# add_testfncdata(testfncdata1, testfncdata2)
+#
+# Add function call count data for several tests. Return reference to
+# added_testfncdata.
+#
+
+sub add_testfncdata($$)
+{
+	my ($testfncdata1, $testfncdata2) = @_;
+	my %result;
+	my $testname;
+
+	foreach $testname (keys(%{$testfncdata1})) {
+		if (defined($testfncdata2->{$testname})) {
+			my $fnccount;
+
+			# Function call count data for this testname exists
+			# in both data sets: merge
+			($fnccount) = add_fnccount(
+				$testfncdata1->{$testname},
+				$testfncdata2->{$testname});
+			$result{$testname} = $fnccount;
+			next;
+		}
+		# Function call count data for this testname is unique to
+		# data set 1: copy
+		$result{$testname} = $testfncdata1->{$testname};
+	}
+
+	# Add count data for testnames unique to data set 2
+	foreach $testname (keys(%{$testfncdata2})) {
+		if (!defined($result{$testname})) {
+			$result{$testname} = $testfncdata2->{$testname};
+		}
+	}
+	return \%result;
+}
+
+
+#
+# brcount_to_db(brcount)
+#
+# Convert brcount data to the following format:
+#
+# db:          line number    -> block hash
+# block hash:  block number   -> branch hash
+# branch hash: branch number  -> taken value
+#
+
+sub brcount_to_db($)
+{
+	my ($brcount) = @_;
+	my $line;
+	my $db = {};
+
+	# Add branches to database
+	foreach $line (keys(%{$brcount})) {
+		my $brdata = $brcount->{$line};
+
+		foreach my $entry (split(/:/, $brdata)) {
+			my ($block, $branch, $taken) = split(/,/, $entry);
+			my $old = $db->{$line}->{$block}->{$branch};
+
+			if (!defined($old) || $old eq "-") {
+				$old = $taken;
+			} elsif ($taken ne "-") {
+				$old += $taken;
+			}
+
+			$db->{$line}->{$block}->{$branch} = $old;
+		}
+	}
+
+	return $db;
+}
+
+
+#
+# db_to_brcount(db[, brcount])
+#
+# Convert branch coverage data back to brcount format. If brcount is specified,
+# the converted data is directly inserted in brcount.
+#
+
+sub db_to_brcount($;$)
+{
+	my ($db, $brcount) = @_;
+	my $line;
+	my $br_found = 0;
+	my $br_hit = 0;
+
+	# Convert database back to brcount format
+	foreach $line (sort({$a <=> $b} keys(%{$db}))) {
+		my $ldata = $db->{$line};
+		my $brdata;
+		my $block;
+
+		foreach $block (sort({$a <=> $b} keys(%{$ldata}))) {
+			my $bdata = $ldata->{$block};
+			my $branch;
+
+			foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) {
+				my $taken = $bdata->{$branch};
+
+				$br_found++;
+				$br_hit++ if ($taken ne "-" && $taken > 0);
+				$brdata .= "$block,$branch,$taken:";
+			}
+		}
+		$brcount->{$line} = $brdata;
+	}
+
+	return ($brcount, $br_found, $br_hit);
+}
+
+
+#
+# brcount_db_combine(db1, db2, op)
+#
+# db1 := db1 op db2, where
+#   db1, db2: brcount data as returned by brcount_to_db
+#   op:       one of $BR_ADD and BR_SUB
+#
+sub brcount_db_combine($$$)
+{
+	my ($db1, $db2, $op) = @_;
+
+	foreach my $line (keys(%{$db2})) {
+		my $ldata = $db2->{$line};
+
+		foreach my $block (keys(%{$ldata})) {
+			my $bdata = $ldata->{$block};
+
+			foreach my $branch (keys(%{$bdata})) {
+				my $taken = $bdata->{$branch};
+				my $new = $db1->{$line}->{$block}->{$branch};
+
+				if (!defined($new) || $new eq "-") {
+					$new = $taken;
+				} elsif ($taken ne "-") {
+					if ($op == $BR_ADD) {
+						$new += $taken;
+					} elsif ($op == $BR_SUB) {
+						$new -= $taken;
+						$new = 0 if ($new < 0);
+					}
+				}
+
+				$db1->{$line}->{$block}->{$branch} = $new;
+			}
+		}
+	}
+}
+
+
+#
+# brcount_db_get_found_and_hit(db)
+#
+# Return (br_found, br_hit) for db.
+#
+
+sub brcount_db_get_found_and_hit($)
+{
+	my ($db) = @_;
+	my ($br_found , $br_hit) = (0, 0);
+
+	foreach my $line (keys(%{$db})) {
+		my $ldata = $db->{$line};
+
+		foreach my $block (keys(%{$ldata})) {
+			my $bdata = $ldata->{$block};
+
+			foreach my $branch (keys(%{$bdata})) {
+				my $taken = $bdata->{$branch};
+
+				$br_found++;
+				$br_hit++ if ($taken ne "-" && $taken > 0);
+			}
+		}
+	}
+
+	return ($br_found, $br_hit);
+}
+
+
+# combine_brcount(brcount1, brcount2, type, inplace)
+#
+# If add is BR_ADD, add branch coverage data and return list brcount_added.
+# If add is BR_SUB, subtract the taken values of brcount2 from brcount1 and
+# return brcount_sub. If inplace is set, the result is inserted into brcount1.
+#
+
+sub combine_brcount($$$;$)
+{
+	my ($brcount1, $brcount2, $type, $inplace) = @_;
+	my ($db1, $db2);
+
+	$db1 = brcount_to_db($brcount1);
+	$db2 = brcount_to_db($brcount2);
+	brcount_db_combine($db1, $db2, $type);
+
+	return db_to_brcount($db1, $inplace ? $brcount1 : undef);
+}
+
+
+#
+# add_testbrdata(testbrdata1, testbrdata2)
+#
+# Add branch coverage data for several tests. Return reference to
+# added_testbrdata.
+#
+
+sub add_testbrdata($$)
+{
+	my ($testbrdata1, $testbrdata2) = @_;
+	my %result;
+	my $testname;
+
+	foreach $testname (keys(%{$testbrdata1})) {
+		if (defined($testbrdata2->{$testname})) {
+			my $brcount;
+
+			# Branch coverage data for this testname exists
+			# in both data sets: add
+			($brcount) = combine_brcount(
+				$testbrdata1->{$testname},
+				$testbrdata2->{$testname}, $BR_ADD);
+			$result{$testname} = $brcount;
+			next;
+		}
+		# Branch coverage data for this testname is unique to
+		# data set 1: copy
+		$result{$testname} = $testbrdata1->{$testname};
+	}
+
+	# Add count data for testnames unique to data set 2
+	foreach $testname (keys(%{$testbrdata2})) {
+		if (!defined($result{$testname})) {
+			$result{$testname} = $testbrdata2->{$testname};
+		}
+	}
+	return \%result;
+}
+
+
+#
+# combine_info_entries(entry_ref1, entry_ref2, filename)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$$)
+{
+	my $entry1 = $_[0];	# Reference to hash containing first entry
+	my $testdata1;
+	my $sumcount1;
+	my $funcdata1;
+	my $checkdata1;
+	my $testfncdata1;
+	my $sumfnccount1;
+	my $testbrdata1;
+	my $sumbrcount1;
+
+	my $entry2 = $_[1];	# Reference to hash containing second entry
+	my $testdata2;
+	my $sumcount2;
+	my $funcdata2;
+	my $checkdata2;
+	my $testfncdata2;
+	my $sumfnccount2;
+	my $testbrdata2;
+	my $sumbrcount2;
+
+	my %result;		# Hash containing combined entry
+	my %result_testdata;
+	my $result_sumcount = {};
+	my $result_funcdata;
+	my $result_testfncdata;
+	my $result_sumfnccount;
+	my $result_testbrdata;
+	my $result_sumbrcount;
+	my $lines_found;
+	my $lines_hit;
+	my $f_found;
+	my $f_hit;
+	my $br_found;
+	my $br_hit;
+
+	my $testname;
+	my $filename = $_[2];
+
+	# Retrieve data
+	($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
+	 $sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1);
+	($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
+	 $sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2);
+
+	# Merge checksums
+	$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
+
+	# Combine funcdata
+	$result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
+
+	# Combine function call count data
+	$result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
+	($result_sumfnccount, $f_found, $f_hit) =
+		add_fnccount($sumfnccount1, $sumfnccount2);
+
+	# Combine branch coverage data
+	$result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2);
+	($result_sumbrcount, $br_found, $br_hit) =
+		combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD);
+
+	# Combine testdata
+	foreach $testname (keys(%{$testdata1}))
+	{
+		if (defined($testdata2->{$testname}))
+		{
+			# testname is present in both entries, requires
+			# combination
+			($result_testdata{$testname}) =
+				add_counts($testdata1->{$testname},
+					   $testdata2->{$testname});
+		}
+		else
+		{
+			# testname only present in entry1, add to result
+			$result_testdata{$testname} = $testdata1->{$testname};
+		}
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+
+	foreach $testname (keys(%{$testdata2}))
+	{
+		# Skip testnames already covered by previous iteration
+		if (defined($testdata1->{$testname})) { next; }
+
+		# testname only present in entry2, add to result hash
+		$result_testdata{$testname} = $testdata2->{$testname};
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+	
+	# Calculate resulting sumcount
+
+	# Store result
+	set_info_entry(\%result, \%result_testdata, $result_sumcount,
+		       $result_funcdata, $checkdata1, $result_testfncdata,
+		       $result_sumfnccount, $result_testbrdata,
+		       $result_sumbrcount, $lines_found, $lines_hit,
+		       $f_found, $f_hit, $br_found, $br_hit);
+
+	return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+	my %hash1 = %{$_[0]};
+	my %hash2 = %{$_[1]};
+	my $filename;
+
+	foreach $filename (keys(%hash2))
+	{
+		if ($hash1{$filename})
+		{
+			# Entry already exists in hash1, combine them
+			$hash1{$filename} =
+				combine_info_entries($hash1{$filename},
+						     $hash2{$filename},
+						     $filename);
+		}
+		else
+		{
+			# Entry is unique in both hashes, simply add to
+			# resulting hash
+			$hash1{$filename} = $hash2{$filename};
+		}
+	}
+
+	return(\%hash1);
+}
+
+
+#
+# add_traces()
+#
+
+sub add_traces()
+{
+	my $total_trace;
+	my $current_trace;
+	my $tracefile;
+	my @result;
+	local *INFO_HANDLE;
+
+	info("Combining tracefiles.\n");
+
+	foreach $tracefile (@add_tracefile)
+	{
+		$current_trace = read_info_file($tracefile);
+		if ($total_trace)
+		{
+			$total_trace = combine_info_files($total_trace,
+							  $current_trace);
+		}
+		else
+		{
+			$total_trace = $current_trace;
+		}
+	}
+
+	# Write combined data
+	if (!$data_stdout)
+	{
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">", $output_filename)
+			or die("ERROR: cannot write to $output_filename!\n");
+		@result = write_info_file(*INFO_HANDLE, $total_trace);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		@result = write_info_file(*STDOUT, $total_trace);
+	}
+
+	return @result;
+}
+
+
+#
+# write_info_file(filehandle, data)
+#
+
+sub write_info_file(*$)
+{
+	local *INFO_HANDLE = $_[0];
+	my %data = %{$_[1]};
+	my $source_file;
+	my $entry;
+	my $testdata;
+	my $sumcount;
+	my $funcdata;
+	my $checkdata;
+	my $testfncdata;
+	my $sumfnccount;
+	my $testbrdata;
+	my $sumbrcount;
+	my $testname;
+	my $line;
+	my $func;
+	my $testcount;
+	my $testfnccount;
+	my $testbrcount;
+	my $found;
+	my $hit;
+	my $f_found;
+	my $f_hit;
+	my $br_found;
+	my $br_hit;
+	my $ln_total_found = 0;
+	my $ln_total_hit = 0;
+	my $fn_total_found = 0;
+	my $fn_total_hit = 0;
+	my $br_total_found = 0;
+	my $br_total_hit = 0;
+
+	foreach $source_file (sort(keys(%data)))
+	{
+		$entry = $data{$source_file};
+		($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
+		 $sumfnccount, $testbrdata, $sumbrcount, $found, $hit,
+		 $f_found, $f_hit, $br_found, $br_hit) =
+			get_info_entry($entry);
+
+		# Add to totals
+		$ln_total_found += $found;
+		$ln_total_hit += $hit;
+		$fn_total_found += $f_found;
+		$fn_total_hit += $f_hit;
+		$br_total_found += $br_found;
+		$br_total_hit += $br_hit;
+
+		foreach $testname (sort(keys(%{$testdata})))
+		{
+			$testcount = $testdata->{$testname};
+			$testfnccount = $testfncdata->{$testname};
+			$testbrcount = $testbrdata->{$testname};
+			$found = 0;
+			$hit   = 0;
+
+			print(INFO_HANDLE "TN:$testname\n");
+			print(INFO_HANDLE "SF:$source_file\n");
+
+			# Write function related data
+			foreach $func (
+				sort({$funcdata->{$a} <=> $funcdata->{$b}}
+				keys(%{$funcdata})))
+			{
+				print(INFO_HANDLE "FN:".$funcdata->{$func}.
+				      ",$func\n");
+			}
+			foreach $func (keys(%{$testfnccount})) {
+				print(INFO_HANDLE "FNDA:".
+				      $testfnccount->{$func}.
+				      ",$func\n");
+			}
+			($f_found, $f_hit) =
+				get_func_found_and_hit($testfnccount);
+			print(INFO_HANDLE "FNF:$f_found\n");
+			print(INFO_HANDLE "FNH:$f_hit\n");
+
+			# Write branch related data
+			$br_found = 0;
+			$br_hit = 0;
+			foreach $line (sort({$a <=> $b}
+					    keys(%{$testbrcount}))) {
+				my $brdata = $testbrcount->{$line};
+
+				foreach my $brentry (split(/:/, $brdata)) {
+					my ($block, $branch, $taken) =
+						split(/,/, $brentry);
+
+					print(INFO_HANDLE "BRDA:$line,$block,".
+					      "$branch,$taken\n");
+					$br_found++;
+					$br_hit++ if ($taken ne '-' &&
+						      $taken > 0);
+				}
+			}
+			if ($br_found > 0) {
+				print(INFO_HANDLE "BRF:$br_found\n");
+				print(INFO_HANDLE "BRH:$br_hit\n");
+			}
+
+			# Write line related data
+			foreach $line (sort({$a <=> $b} keys(%{$testcount})))
+			{
+				print(INFO_HANDLE "DA:$line,".
+				      $testcount->{$line}.
+				      (defined($checkdata->{$line}) &&
+				       $checksum ?
+				       ",".$checkdata->{$line} : "")."\n");
+				$found++;
+				if ($testcount->{$line} > 0)
+				{
+					$hit++;
+				}
+
+			}
+			print(INFO_HANDLE "LF:$found\n");
+			print(INFO_HANDLE "LH:$hit\n");
+			print(INFO_HANDLE "end_of_record\n");
+		}
+	}
+
+	return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit,
+		$br_total_found, $br_total_hit);
+}
+
+
+#
+# transform_pattern(pattern)
+#
+# Transform shell wildcard expression to equivalent Perl regular expression.
+# Return transformed pattern.
+#
+
+sub transform_pattern($)
+{
+	my $pattern = $_[0];
+
+	# Escape special chars
+
+	$pattern =~ s/\\/\\\\/g;
+	$pattern =~ s/\//\\\//g;
+	$pattern =~ s/\^/\\\^/g;
+	$pattern =~ s/\$/\\\$/g;
+	$pattern =~ s/\(/\\\(/g;
+	$pattern =~ s/\)/\\\)/g;
+	$pattern =~ s/\[/\\\[/g;
+	$pattern =~ s/\]/\\\]/g;
+	$pattern =~ s/\{/\\\{/g;
+	$pattern =~ s/\}/\\\}/g;
+	$pattern =~ s/\./\\\./g;
+	$pattern =~ s/\,/\\\,/g;
+	$pattern =~ s/\|/\\\|/g;
+	$pattern =~ s/\+/\\\+/g;
+	$pattern =~ s/\!/\\\!/g;
+
+	# Transform ? => (.) and * => (.*)
+
+	$pattern =~ s/\*/\(\.\*\)/g;
+	$pattern =~ s/\?/\(\.\)/g;
+
+	return $pattern;
+}
+
+
+#
+# extract()
+#
+
+sub extract()
+{
+	my $data = read_info_file($extract);
+	my $filename;
+	my $keep;
+	my $pattern;
+	my @pattern_list;
+	my $extracted = 0;
+	my @result;
+	local *INFO_HANDLE;
+
+	# Need perlreg expressions instead of shell pattern
+	@pattern_list = map({ transform_pattern($_); } @ARGV);
+
+	# Filter out files which do not match any pattern
+	foreach $filename (sort(keys(%{$data})))
+	{
+		$keep = 0;
+
+		foreach $pattern (@pattern_list)
+		{
+			$keep ||= ($filename =~ (/^$pattern$/));
+		}
+
+
+		if (!$keep)
+		{
+			delete($data->{$filename});
+		}
+		else
+		{
+			info("Extracting $filename\n"),
+			$extracted++;
+		}
+	}
+
+	# Write extracted data
+	if (!$data_stdout)
+	{
+		info("Extracted $extracted files\n");
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">", $output_filename)
+			or die("ERROR: cannot write to $output_filename!\n");
+		@result = write_info_file(*INFO_HANDLE, $data);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		@result = write_info_file(*STDOUT, $data);
+	}
+
+	return @result;
+}
+
+
+#
+# remove()
+#
+
+sub remove()
+{
+	my $data = read_info_file($remove);
+	my $filename;
+	my $match_found;
+	my $pattern;
+	my @pattern_list;
+	my $removed = 0;
+	my @result;
+	local *INFO_HANDLE;
+
+	# Need perlreg expressions instead of shell pattern
+	@pattern_list = map({ transform_pattern($_); } @ARGV);
+
+	# Filter out files that match the pattern
+	foreach $filename (sort(keys(%{$data})))
+	{
+		$match_found = 0;
+
+		foreach $pattern (@pattern_list)
+		{
+			$match_found ||= ($filename =~ (/^$pattern$/));
+		}
+
+
+		if ($match_found)
+		{
+			delete($data->{$filename});
+			info("Removing $filename\n"),
+			$removed++;
+		}
+	}
+
+	# Write data
+	if (!$data_stdout)
+	{
+		info("Deleted $removed files\n");
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">", $output_filename)
+			or die("ERROR: cannot write to $output_filename!\n");
+		@result = write_info_file(*INFO_HANDLE, $data);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		@result = write_info_file(*STDOUT, $data);
+	}
+
+	return @result;
+}
+
+
+# get_prefix(max_width, max_percentage_too_long, path_list)
+#
+# Return a path prefix that satisfies the following requirements:
+# - is shared by more paths in path_list than any other prefix
+# - the percentage of paths which would exceed the given max_width length
+#   after applying the prefix does not exceed max_percentage_too_long
+#
+# If multiple prefixes satisfy all requirements, the longest prefix is
+# returned. Return an empty string if no prefix could be found.
+
+sub get_prefix($$@)
+{
+	my ($max_width, $max_long, @path_list) = @_;
+	my $path;
+	my $ENTRY_NUM = 0;
+	my $ENTRY_LONG = 1;
+	my %prefix;
+
+	# Build prefix hash
+	foreach $path (@path_list) {
+		my ($v, $d, $f) = splitpath($path);
+		my @dirs = splitdir($d);
+		my $p_len = length($path);
+		my $i;
+
+		# Remove trailing '/'
+		pop(@dirs) if ($dirs[scalar(@dirs) - 1] eq '');
+		for ($i = 0; $i < scalar(@dirs); $i++) {
+			my $subpath = catpath($v, catdir(@dirs[0..$i]), '');
+			my $entry = $prefix{$subpath};
+
+			$entry = [ 0, 0 ] if (!defined($entry));
+			$entry->[$ENTRY_NUM]++;
+			if (($p_len - length($subpath) - 1) > $max_width) {
+				$entry->[$ENTRY_LONG]++;
+			}
+			$prefix{$subpath} = $entry;
+		}
+	}
+	# Find suitable prefix (sort descending by two keys: 1. number of
+	# entries covered by a prefix, 2. length of prefix)
+	foreach $path (sort {($prefix{$a}->[$ENTRY_NUM] ==
+			      $prefix{$b}->[$ENTRY_NUM]) ?
+				length($b) <=> length($a) :
+				$prefix{$b}->[$ENTRY_NUM] <=>
+				$prefix{$a}->[$ENTRY_NUM]}
+				keys(%prefix)) {
+		my ($num, $long) = @{$prefix{$path}};
+
+		# Check for additional requirement: number of filenames
+		# that would be too long may not exceed a certain percentage
+		if ($long <= $num * $max_long / 100) {
+			return $path;
+		}
+	}
+
+	return "";
+}
+
+
+#
+# shorten_filename(filename, width)
+#
+# Truncate filename if it is longer than width characters.
+#
+
+sub shorten_filename($$)
+{
+	my ($filename, $width) = @_;
+	my $l = length($filename);
+	my $s;
+	my $e;
+
+	return $filename if ($l <= $width);
+	$e = int(($width - 3) / 2);
+	$s = $width - 3 - $e;
+
+	return substr($filename, 0, $s).'...'.substr($filename, $l - $e);
+}
+
+
+sub shorten_number($$)
+{
+	my ($number, $width) = @_;
+	my $result = sprintf("%*d", $width, $number);
+
+	return $result if (length($result) <= $width);
+	$number = $number / 1000;
+	return $result if (length($result) <= $width);
+	$result = sprintf("%*dk", $width - 1, $number);
+	return $result if (length($result) <= $width);
+	$number = $number / 1000;
+	$result = sprintf("%*dM", $width - 1, $number);
+	return $result if (length($result) <= $width);
+	return '#';
+}
+
+sub shorten_rate($$$)
+{
+	my ($hit, $found, $width) = @_;
+	my $result = rate($hit, $found, "%", 1, $width);
+
+	return $result if (length($result) <= $width);
+	$result = rate($hit, $found, "%", 0, $width);
+	return $result if (length($result) <= $width);
+	return "#";
+}
+
+#
+# list()
+#
+
+sub list()
+{
+	my $data = read_info_file($list);
+	my $filename;
+	my $found;
+	my $hit;
+	my $entry;
+	my $fn_found;
+	my $fn_hit;
+	my $br_found;
+	my $br_hit;
+	my $total_found = 0;
+	my $total_hit = 0;
+	my $fn_total_found = 0;
+	my $fn_total_hit = 0;
+	my $br_total_found = 0;
+	my $br_total_hit = 0;
+	my $prefix;
+	my $strlen = length("Filename");
+	my $format;
+	my $heading1;
+	my $heading2;
+	my @footer;
+	my $barlen;
+	my $rate;
+	my $fnrate;
+	my $brrate;
+	my $lastpath;
+	my $F_LN_NUM = 0;
+	my $F_LN_RATE = 1;
+	my $F_FN_NUM = 2;
+	my $F_FN_RATE = 3;
+	my $F_BR_NUM = 4;
+	my $F_BR_RATE = 5;
+	my @fwidth_narrow = (5, 5, 3, 5, 4, 5);
+	my @fwidth_wide = (6, 5, 5, 5, 6, 5);
+	my @fwidth = @fwidth_wide;
+	my $w;
+	my $max_width = $opt_list_width;
+	my $max_long = $opt_list_truncate_max;
+	my $fwidth_narrow_length;
+	my $fwidth_wide_length;
+	my $got_prefix = 0;
+	my $root_prefix = 0;
+
+	# Calculate total width of narrow fields
+	$fwidth_narrow_length = 0;
+	foreach $w (@fwidth_narrow) {
+		$fwidth_narrow_length += $w + 1;
+	}
+	# Calculate total width of wide fields
+	$fwidth_wide_length = 0;
+	foreach $w (@fwidth_wide) {
+		$fwidth_wide_length += $w + 1;
+	}
+	# Get common file path prefix
+	$prefix = get_prefix($max_width - $fwidth_narrow_length, $max_long,
+			     keys(%{$data}));
+	$root_prefix = 1 if ($prefix eq rootdir());
+	$got_prefix = 1 if (length($prefix) > 0);
+	$prefix =~ s/\/$//;
+	# Get longest filename length
+	foreach $filename (keys(%{$data})) {
+		if (!$opt_list_full_path) {
+			if (!$got_prefix || !$root_prefix &&
+			    !($filename =~ s/^\Q$prefix\/\E//)) {
+				my ($v, $d, $f) = splitpath($filename);
+
+				$filename = $f;
+			}
+		}
+		# Determine maximum length of entries
+		if (length($filename) > $strlen) {
+			$strlen = length($filename)
+		}
+	}
+	if (!$opt_list_full_path) {
+		my $blanks;
+
+		$w = $fwidth_wide_length;
+		# Check if all columns fit into max_width characters
+		if ($strlen + $fwidth_wide_length > $max_width) {
+			# Use narrow fields
+			@fwidth = @fwidth_narrow;
+			$w = $fwidth_narrow_length;
+			if (($strlen + $fwidth_narrow_length) > $max_width) {
+				# Truncate filenames at max width
+				$strlen = $max_width - $fwidth_narrow_length;
+			}
+		}
+		# Add some blanks between filename and fields if possible
+		$blanks = int($strlen * 0.5);
+		$blanks = 4 if ($blanks < 4);
+		$blanks = 8 if ($blanks > 8);
+		if (($strlen + $w + $blanks) < $max_width) {
+			$strlen += $blanks;
+		} else {
+			$strlen = $max_width - $w;
+		}
+	}
+	# Filename
+	$w = $strlen;
+	$format		= "%-${w}s|";
+	$heading1 	= sprintf("%*s|", $w, "");
+	$heading2 	= sprintf("%-*s|", $w, "Filename");
+	$barlen		= $w + 1;
+	# Line coverage rate
+	$w = $fwidth[$F_LN_RATE];
+	$format		.= "%${w}s ";
+	$heading1 	.= sprintf("%-*s |", $w + $fwidth[$F_LN_NUM],
+				   "Lines");
+	$heading2 	.= sprintf("%-*s ", $w, "Rate");
+	$barlen		+= $w + 1;
+	# Number of lines
+	$w = $fwidth[$F_LN_NUM];
+	$format		.= "%${w}s|";
+	$heading2	.= sprintf("%*s|", $w, "Num");
+	$barlen		+= $w + 1;
+	# Function coverage rate
+	$w = $fwidth[$F_FN_RATE];
+	$format		.= "%${w}s ";
+	$heading1 	.= sprintf("%-*s|", $w + $fwidth[$F_FN_NUM] + 1,
+				   "Functions");
+	$heading2 	.= sprintf("%-*s ", $w, "Rate");
+	$barlen		+= $w + 1;
+	# Number of functions
+	$w = $fwidth[$F_FN_NUM];
+	$format		.= "%${w}s|";
+	$heading2	.= sprintf("%*s|", $w, "Num");
+	$barlen		+= $w + 1;
+	# Branch coverage rate
+	$w = $fwidth[$F_BR_RATE];
+	$format		.= "%${w}s ";
+	$heading1 	.= sprintf("%-*s", $w + $fwidth[$F_BR_NUM] + 1,
+				   "Branches");
+	$heading2 	.= sprintf("%-*s ", $w, "Rate");
+	$barlen		+= $w + 1;
+	# Number of branches
+	$w = $fwidth[$F_BR_NUM];
+	$format		.= "%${w}s";
+	$heading2	.= sprintf("%*s", $w, "Num");
+	$barlen		+= $w;
+	# Line end
+	$format		.= "\n";
+	$heading1	.= "\n";
+	$heading2	.= "\n";
+
+	# Print heading
+	print($heading1);
+	print($heading2);
+	print(("="x$barlen)."\n");
+
+	# Print per file information
+	foreach $filename (sort(keys(%{$data})))
+	{
+		my @file_data;
+		my $print_filename = $filename;
+
+		$entry = $data->{$filename};
+		if (!$opt_list_full_path) {
+			my $p;
+
+			$print_filename = $filename;
+			if (!$got_prefix || !$root_prefix &&
+			    !($print_filename =~ s/^\Q$prefix\/\E//)) {
+				my ($v, $d, $f) = splitpath($filename);
+
+				$p = catpath($v, $d, "");
+				$p =~ s/\/$//;
+				$print_filename = $f;
+			} else {
+				$p = $prefix;
+			}
+
+			if (!defined($lastpath) || $lastpath ne $p) {
+				print("\n") if (defined($lastpath));
+				$lastpath = $p;
+				print("[$lastpath/]\n") if (!$root_prefix);
+			}
+			$print_filename = shorten_filename($print_filename,
+							   $strlen);
+		}
+
+		(undef, undef, undef, undef, undef, undef, undef, undef,
+		 $found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) =
+			get_info_entry($entry);
+
+		# Assume zero count if there is no function data for this file
+		if (!defined($fn_found) || !defined($fn_hit)) {
+			$fn_found = 0;
+			$fn_hit = 0;
+		}
+		# Assume zero count if there is no branch data for this file
+		if (!defined($br_found) || !defined($br_hit)) {
+			$br_found = 0;
+			$br_hit = 0;
+		}
+
+		# Add line coverage totals
+		$total_found += $found;
+		$total_hit += $hit;
+		# Add function coverage totals
+		$fn_total_found += $fn_found;
+		$fn_total_hit += $fn_hit;
+		# Add branch coverage totals
+		$br_total_found += $br_found;
+		$br_total_hit += $br_hit;
+
+		# Determine line coverage rate for this file
+		$rate = shorten_rate($hit, $found, $fwidth[$F_LN_RATE]);
+		# Determine function coverage rate for this file
+		$fnrate = shorten_rate($fn_hit, $fn_found, $fwidth[$F_FN_RATE]);
+		# Determine branch coverage rate for this file
+		$brrate = shorten_rate($br_hit, $br_found, $fwidth[$F_BR_RATE]);
+
+		# Assemble line parameters
+		push(@file_data, $print_filename);
+		push(@file_data, $rate);
+		push(@file_data, shorten_number($found, $fwidth[$F_LN_NUM]));
+		push(@file_data, $fnrate);
+		push(@file_data, shorten_number($fn_found, $fwidth[$F_FN_NUM]));
+		push(@file_data, $brrate);
+		push(@file_data, shorten_number($br_found, $fwidth[$F_BR_NUM]));
+
+		# Print assembled line
+		printf($format, @file_data);
+	}
+
+	# Determine total line coverage rate
+	$rate = shorten_rate($total_hit, $total_found, $fwidth[$F_LN_RATE]);
+	# Determine total function coverage rate
+	$fnrate = shorten_rate($fn_total_hit, $fn_total_found,
+			       $fwidth[$F_FN_RATE]);
+	# Determine total branch coverage rate
+	$brrate = shorten_rate($br_total_hit, $br_total_found,
+			       $fwidth[$F_BR_RATE]);
+
+	# Print separator
+	print(("="x$barlen)."\n");
+
+	# Assemble line parameters
+	push(@footer, sprintf("%*s", $strlen, "Total:"));
+	push(@footer, $rate);
+	push(@footer, shorten_number($total_found, $fwidth[$F_LN_NUM]));
+	push(@footer, $fnrate);
+	push(@footer, shorten_number($fn_total_found, $fwidth[$F_FN_NUM]));
+	push(@footer, $brrate);
+	push(@footer, shorten_number($br_total_found, $fwidth[$F_BR_NUM]));
+
+	# Print assembled line
+	printf($format, @footer);
+}
+
+
+#
+# get_common_filename(filename1, filename2)
+#
+# Check for filename components which are common to FILENAME1 and FILENAME2.
+# Upon success, return
+#
+#   (common, path1, path2)
+#
+#  or 'undef' in case there are no such parts.
+#
+
+sub get_common_filename($$)
+{
+        my @list1 = split("/", $_[0]);
+        my @list2 = split("/", $_[1]);
+	my @result;
+
+	# Work in reverse order, i.e. beginning with the filename itself
+	while (@list1 && @list2 && ($list1[$#list1] eq $list2[$#list2]))
+	{
+		unshift(@result, pop(@list1));
+		pop(@list2);
+	}
+
+	# Did we find any similarities?
+	if (scalar(@result) > 0)
+	{
+	        return (join("/", @result), join("/", @list1),
+			join("/", @list2));
+	}
+	else
+	{
+		return undef;
+	}
+}
+
+
+#
+# strip_directories($path, $depth)
+#
+# Remove DEPTH leading directory levels from PATH.
+#
+
+sub strip_directories($$)
+{
+	my $filename = $_[0];
+	my $depth = $_[1];
+	my $i;
+
+	if (!defined($depth) || ($depth < 1))
+	{
+		return $filename;
+	}
+	for ($i = 0; $i < $depth; $i++)
+	{
+		$filename =~ s/^[^\/]*\/+(.*)$/$1/;
+	}
+	return $filename;
+}
+
+
+#
+# read_diff(filename)
+#
+# Read diff output from FILENAME to memory. The diff file has to follow the
+# format generated by 'diff -u'. Returns a list of hash references:
+#
+#   (mapping, path mapping)
+#
+#   mapping:   filename -> reference to line hash
+#   line hash: line number in new file -> corresponding line number in old file
+#
+#   path mapping:  filename -> old filename
+#
+# Die in case of error.
+#
+
+sub read_diff($)
+{
+	my $diff_file = $_[0];	# Name of diff file
+	my %diff;		# Resulting mapping filename -> line hash
+	my %paths;		# Resulting mapping old path  -> new path
+	my $mapping;		# Reference to current line hash
+	my $line;		# Contents of current line
+	my $num_old;		# Current line number in old file
+	my $num_new;		# Current line number in new file
+	my $file_old;		# Name of old file in diff section
+	my $file_new;		# Name of new file in diff section
+	my $filename;		# Name of common filename of diff section
+	my $in_block = 0;	# Non-zero while we are inside a diff block
+	local *HANDLE;		# File handle for reading the diff file
+
+	info("Reading diff $diff_file\n");
+
+	# Check if file exists and is readable
+	stat($diff_file);
+	if (!(-r _))
+	{
+		die("ERROR: cannot read file $diff_file!\n");
+	}
+
+	# Check if this is really a plain file
+	if (!(-f _))
+	{
+		die("ERROR: not a plain file: $diff_file!\n");
+	}
+
+	# Check for .gz extension
+	if ($diff_file =~ /\.gz$/)
+	{
+		# Check for availability of GZIP tool
+		system_no_output(1, "gunzip", "-h")
+			and die("ERROR: gunzip command not available!\n");
+
+		# Check integrity of compressed file
+		system_no_output(1, "gunzip", "-t", $diff_file)
+			and die("ERROR: integrity check failed for ".
+				"compressed file $diff_file!\n");
+
+		# Open compressed file
+		open(HANDLE, "-|", "gunzip -c '$diff_file'")
+			or die("ERROR: cannot start gunzip to decompress ".
+			       "file $_[0]!\n");
+	}
+	else
+	{
+		# Open decompressed file
+		open(HANDLE, "<", $diff_file)
+			or die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	# Parse diff file line by line
+	while (<HANDLE>)
+	{
+		chomp($_);
+		$line = $_;
+
+		foreach ($line)
+		{
+			# Filename of old file:
+			# --- <filename> <date>
+			/^--- (\S+)/ && do
+			{
+				$file_old = strip_directories($1, $strip);
+				last;
+			};
+			# Filename of new file:
+			# +++ <filename> <date>
+			/^\+\+\+ (\S+)/ && do
+			{
+				# Add last file to resulting hash
+				if ($filename)
+				{
+					my %new_hash;
+					$diff{$filename} = $mapping;
+					$mapping = \%new_hash;
+				}
+				$file_new = strip_directories($1, $strip);
+				$filename = $file_old;
+				$paths{$filename} = $file_new;
+				$num_old = 1;
+				$num_new = 1;
+				last;
+			};
+			# Start of diff block:
+			# @@ -old_start,old_num, +new_start,new_num @@
+			/^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@$/ && do
+			{
+			$in_block = 1;
+			while ($num_old < $1)
+			{
+				$mapping->{$num_new} = $num_old;
+				$num_old++;
+				$num_new++;
+			}
+			last;
+			};
+			# Unchanged line
+			# <line starts with blank>
+			/^ / && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$mapping->{$num_new} = $num_old;
+				$num_old++;
+				$num_new++;
+				last;
+			};
+			# Line as seen in old file
+			# <line starts with '-'>
+			/^-/ && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$num_old++;
+				last;
+			};
+			# Line as seen in new file
+			# <line starts with '+'>
+			/^\+/ && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$num_new++;
+				last;
+			};
+			# Empty line
+			/^$/ && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$mapping->{$num_new} = $num_old;
+				$num_old++;
+				$num_new++;
+				last;
+			};
+		}
+	}
+
+	close(HANDLE);
+
+	# Add final diff file section to resulting hash
+	if ($filename)
+	{
+		$diff{$filename} = $mapping;
+	}
+
+	if (!%diff)
+	{
+		die("ERROR: no valid diff data found in $diff_file!\n".
+		    "Make sure to use 'diff -u' when generating the diff ".
+		    "file.\n");
+	}
+	return (\%diff, \%paths);
+}
+
+
+#
+# apply_diff($count_data, $line_hash)
+#
+# Transform count data using a mapping of lines:
+#
+#   $count_data: reference to hash: line number -> data
+#   $line_hash:  reference to hash: line number new -> line number old
+#
+# Return a reference to transformed count data.
+#
+
+sub apply_diff($$)
+{
+	my $count_data = $_[0];	# Reference to data hash: line -> hash
+	my $line_hash = $_[1];	# Reference to line hash: new line -> old line
+	my %result;		# Resulting hash
+	my $last_new = 0;	# Last new line number found in line hash
+	my $last_old = 0;	# Last old line number found in line hash
+
+	# Iterate all new line numbers found in the diff
+	foreach (sort({$a <=> $b} keys(%{$line_hash})))
+	{
+		$last_new = $_;
+		$last_old = $line_hash->{$last_new};
+
+		# Is there data associated with the corresponding old line?
+		if (defined($count_data->{$line_hash->{$_}}))
+		{
+			# Copy data to new hash with a new line number
+			$result{$_} = $count_data->{$line_hash->{$_}};
+		}
+	}
+	# Transform all other lines which come after the last diff entry
+	foreach (sort({$a <=> $b} keys(%{$count_data})))
+	{
+		if ($_ <= $last_old)
+		{
+			# Skip lines which were covered by line hash
+			next;
+		}
+		# Copy data to new hash with an offset
+		$result{$_ + ($last_new - $last_old)} = $count_data->{$_};
+	}
+
+	return \%result;
+}
+
+
+#
+# apply_diff_to_brcount(brcount, linedata)
+#
+# Adjust line numbers of branch coverage data according to linedata.
+#
+
+sub apply_diff_to_brcount($$)
+{
+	my ($brcount, $linedata) = @_;
+	my $db;
+
+	# Convert brcount to db format
+	$db = brcount_to_db($brcount);
+	# Apply diff to db format
+	$db = apply_diff($db, $linedata);
+	# Convert db format back to brcount format
+	($brcount) = db_to_brcount($db);
+
+	return $brcount;
+}
+
+
+#
+# get_hash_max(hash_ref)
+#
+# Return the highest integer key from hash.
+#
+
+sub get_hash_max($)
+{
+	my ($hash) = @_;
+	my $max;
+
+	foreach (keys(%{$hash})) {
+		if (!defined($max)) {
+			$max = $_;
+		} elsif ($hash->{$_} > $max) {
+			$max = $_;
+		}
+	}
+	return $max;
+}
+
+sub get_hash_reverse($)
+{
+	my ($hash) = @_;
+	my %result;
+
+	foreach (keys(%{$hash})) {
+		$result{$hash->{$_}} = $_;
+	}
+
+	return \%result;
+}
+
+#
+# apply_diff_to_funcdata(funcdata, line_hash)
+#
+
+sub apply_diff_to_funcdata($$)
+{
+	my ($funcdata, $linedata) = @_;
+	my $last_new = get_hash_max($linedata);
+	my $last_old = $linedata->{$last_new};
+	my $func;
+	my %result;
+	my $line_diff = get_hash_reverse($linedata);
+
+	foreach $func (keys(%{$funcdata})) {
+		my $line = $funcdata->{$func};
+
+		if (defined($line_diff->{$line})) {
+			$result{$func} = $line_diff->{$line};
+		} elsif ($line > $last_old) {
+			$result{$func} = $line + $last_new - $last_old;
+		}
+	}
+
+	return \%result;
+}
+
+
+#
+# get_line_hash($filename, $diff_data, $path_data)
+#
+# Find line hash in DIFF_DATA which matches FILENAME. On success, return list
+# line hash. or undef in case of no match. Die if more than one line hashes in
+# DIFF_DATA match.
+#
+
+sub get_line_hash($$$)
+{
+	my $filename = $_[0];
+	my $diff_data = $_[1];
+	my $path_data = $_[2];
+	my $conversion;
+	my $old_path;
+	my $new_path;
+	my $diff_name;
+	my $common;
+	my $old_depth;
+	my $new_depth;
+
+	# Remove trailing slash from diff path
+	$diff_path =~ s/\/$//;
+	foreach (keys(%{$diff_data}))
+	{
+		my $sep = "";
+
+		$sep = '/' if (!/^\//);
+
+		# Try to match diff filename with filename
+		if ($filename =~ /^\Q$diff_path$sep$_\E$/)
+		{
+			if ($diff_name)
+			{
+				# Two files match, choose the more specific one
+				# (the one with more path components)
+				$old_depth = ($diff_name =~ tr/\///);
+				$new_depth = (tr/\///);
+				if ($old_depth == $new_depth)
+				{
+					die("ERROR: diff file contains ".
+					    "ambiguous entries for ".
+					    "$filename\n");
+				}
+				elsif ($new_depth > $old_depth)
+				{
+					$diff_name = $_;
+				}
+			}
+			else
+			{
+				$diff_name = $_;
+			}
+		};
+	}
+	if ($diff_name)
+	{
+		# Get converted path
+		if ($filename =~ /^(.*)$diff_name$/)
+		{
+			($common, $old_path, $new_path) =
+				get_common_filename($filename,
+					$1.$path_data->{$diff_name});
+		}
+		return ($diff_data->{$diff_name}, $old_path, $new_path);
+	}
+	else
+	{
+		return undef;
+	}
+}
+
+
+#
+# convert_paths(trace_data, path_conversion_data)
+#
+# Rename all paths in TRACE_DATA which show up in PATH_CONVERSION_DATA.
+#
+
+sub convert_paths($$)
+{
+	my $trace_data = $_[0];
+	my $path_conversion_data = $_[1];
+	my $filename;
+	my $new_path;
+
+	if (scalar(keys(%{$path_conversion_data})) == 0)
+	{
+		info("No path conversion data available.\n");
+		return;
+	}
+
+	# Expand path conversion list
+	foreach $filename (keys(%{$path_conversion_data}))
+	{
+		$new_path = $path_conversion_data->{$filename};
+		while (($filename =~ s/^(.*)\/[^\/]+$/$1/) &&
+		       ($new_path =~ s/^(.*)\/[^\/]+$/$1/) &&
+		       ($filename ne $new_path))
+		{
+			$path_conversion_data->{$filename} = $new_path;
+		}
+	}
+
+	# Adjust paths
+	FILENAME: foreach $filename (keys(%{$trace_data}))
+	{
+		# Find a path in our conversion table that matches, starting
+		# with the longest path
+		foreach (sort({length($b) <=> length($a)}
+			      keys(%{$path_conversion_data})))
+		{
+			# Is this path a prefix of our filename?
+			if (!($filename =~ /^$_(.*)$/))
+			{
+				next;
+			}
+			$new_path = $path_conversion_data->{$_}.$1;
+
+			# Make sure not to overwrite an existing entry under
+			# that path name
+			if ($trace_data->{$new_path})
+			{
+				# Need to combine entries
+				$trace_data->{$new_path} =
+					combine_info_entries(
+						$trace_data->{$filename},
+						$trace_data->{$new_path},
+						$filename);
+			}
+			else
+			{
+				# Simply rename entry
+				$trace_data->{$new_path} =
+					$trace_data->{$filename};
+			}
+			delete($trace_data->{$filename});
+			next FILENAME;
+		}
+		info("No conversion available for filename $filename\n");
+	}
+}
+
+#
+# sub adjust_fncdata(funcdata, testfncdata, sumfnccount)
+#
+# Remove function call count data from testfncdata and sumfnccount which
+# is no longer present in funcdata.
+#
+
+sub adjust_fncdata($$$)
+{
+	my ($funcdata, $testfncdata, $sumfnccount) = @_;
+	my $testname;
+	my $func;
+	my $f_found;
+	my $f_hit;
+
+	# Remove count data in testfncdata for functions which are no longer
+	# in funcdata
+	foreach $testname (keys(%{$testfncdata})) {
+		my $fnccount = $testfncdata->{$testname};
+
+		foreach $func (keys(%{$fnccount})) {
+			if (!defined($funcdata->{$func})) {
+				delete($fnccount->{$func});
+			}
+		}
+	}
+	# Remove count data in sumfnccount for functions which are no longer
+	# in funcdata
+	foreach $func (keys(%{$sumfnccount})) {
+		if (!defined($funcdata->{$func})) {
+			delete($sumfnccount->{$func});
+		}
+	}
+}
+
+#
+# get_func_found_and_hit(sumfnccount)
+#
+# Return (f_found, f_hit) for sumfnccount
+#
+
+sub get_func_found_and_hit($)
+{
+	my ($sumfnccount) = @_;
+	my $function;
+	my $f_found;
+	my $f_hit;
+
+	$f_found = scalar(keys(%{$sumfnccount}));
+	$f_hit = 0;
+	foreach $function (keys(%{$sumfnccount})) {
+		if ($sumfnccount->{$function} > 0) {
+			$f_hit++;
+		}
+	}
+	return ($f_found, $f_hit);
+}
+
+#
+# diff()
+#
+
+sub diff()
+{
+	my $trace_data = read_info_file($diff);
+	my $diff_data;
+	my $path_data;
+	my $old_path;
+	my $new_path;
+	my %path_conversion_data;
+	my $filename;
+	my $line_hash;
+	my $new_name;
+	my $entry;
+	my $testdata;
+	my $testname;
+	my $sumcount;
+	my $funcdata;
+	my $checkdata;
+	my $testfncdata;
+	my $sumfnccount;
+	my $testbrdata;
+	my $sumbrcount;
+	my $found;
+	my $hit;
+	my $f_found;
+	my $f_hit;
+	my $br_found;
+	my $br_hit;
+	my $converted = 0;
+	my $unchanged = 0;
+	my @result;
+	local *INFO_HANDLE;
+
+	($diff_data, $path_data) = read_diff($ARGV[0]);
+
+        foreach $filename (sort(keys(%{$trace_data})))
+        {
+		# Find a diff section corresponding to this file
+		($line_hash, $old_path, $new_path) =
+			get_line_hash($filename, $diff_data, $path_data);
+		if (!$line_hash)
+		{
+			# There's no diff section for this file
+			$unchanged++;
+			next;
+		}
+		$converted++;
+		if ($old_path && $new_path && ($old_path ne $new_path))
+		{
+			$path_conversion_data{$old_path} = $new_path;
+		}
+		# Check for deleted files
+		if (scalar(keys(%{$line_hash})) == 0)
+		{
+			info("Removing $filename\n");
+			delete($trace_data->{$filename});
+			next;
+		}
+		info("Converting $filename\n");
+		$entry = $trace_data->{$filename};
+		($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
+		 $sumfnccount, $testbrdata, $sumbrcount) =
+			get_info_entry($entry);
+		# Convert test data
+		foreach $testname (keys(%{$testdata}))
+		{
+			# Adjust line numbers of line coverage data
+			$testdata->{$testname} =
+				apply_diff($testdata->{$testname}, $line_hash);
+			# Adjust line numbers of branch coverage data
+			$testbrdata->{$testname} =
+				apply_diff_to_brcount($testbrdata->{$testname},
+						      $line_hash);
+			# Remove empty sets of test data
+			if (scalar(keys(%{$testdata->{$testname}})) == 0)
+			{
+				delete($testdata->{$testname});
+				delete($testfncdata->{$testname});
+				delete($testbrdata->{$testname});
+			}
+		}
+		# Rename test data to indicate conversion
+		foreach $testname (keys(%{$testdata}))
+		{
+			# Skip testnames which already contain an extension
+			if ($testname =~ /,[^,]+$/)
+			{
+				next;
+			}
+			# Check for name conflict
+			if (defined($testdata->{$testname.",diff"}))
+			{
+				# Add counts
+				($testdata->{$testname}) = add_counts(
+					$testdata->{$testname},
+					$testdata->{$testname.",diff"});
+				delete($testdata->{$testname.",diff"});
+				# Add function call counts
+				($testfncdata->{$testname}) = add_fnccount(
+					$testfncdata->{$testname},
+					$testfncdata->{$testname.",diff"});
+				delete($testfncdata->{$testname.",diff"});
+				# Add branch counts
+				combine_brcount(
+					$testbrdata->{$testname},
+					$testbrdata->{$testname.",diff"},
+					$BR_ADD, 1);
+				delete($testbrdata->{$testname.",diff"});
+			}
+			# Move test data to new testname
+			$testdata->{$testname.",diff"} = $testdata->{$testname};
+			delete($testdata->{$testname});
+			# Move function call count data to new testname
+			$testfncdata->{$testname.",diff"} =
+				$testfncdata->{$testname};
+			delete($testfncdata->{$testname});
+			# Move branch count data to new testname
+			$testbrdata->{$testname.",diff"} =
+				$testbrdata->{$testname};
+			delete($testbrdata->{$testname});
+		}
+		# Convert summary of test data
+		$sumcount = apply_diff($sumcount, $line_hash);
+		# Convert function data
+		$funcdata = apply_diff_to_funcdata($funcdata, $line_hash);
+		# Convert branch coverage data
+		$sumbrcount = apply_diff_to_brcount($sumbrcount, $line_hash);
+		# Update found/hit numbers
+		# Convert checksum data
+		$checkdata = apply_diff($checkdata, $line_hash);
+		# Convert function call count data
+		adjust_fncdata($funcdata, $testfncdata, $sumfnccount);
+		($f_found, $f_hit) = get_func_found_and_hit($sumfnccount);
+		($br_found, $br_hit) = get_br_found_and_hit($sumbrcount);
+		# Update found/hit numbers
+		$found = 0;
+		$hit = 0;
+		foreach (keys(%{$sumcount}))
+		{
+			$found++;
+			if ($sumcount->{$_} > 0)
+			{
+				$hit++;
+			}
+		}
+		if ($found > 0)
+		{
+			# Store converted entry
+			set_info_entry($entry, $testdata, $sumcount, $funcdata,
+				       $checkdata, $testfncdata, $sumfnccount,
+				       $testbrdata, $sumbrcount, $found, $hit,
+				       $f_found, $f_hit, $br_found, $br_hit);
+		}
+		else
+		{
+			# Remove empty data set
+			delete($trace_data->{$filename});
+		}
+        }
+
+	# Convert filenames as well if requested
+	if ($convert_filenames)
+	{
+		convert_paths($trace_data, \%path_conversion_data);
+	}
+
+	info("$converted entr".($converted != 1 ? "ies" : "y")." converted, ".
+	     "$unchanged entr".($unchanged != 1 ? "ies" : "y")." left ".
+	     "unchanged.\n");
+
+	# Write data
+	if (!$data_stdout)
+	{
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">", $output_filename)
+			or die("ERROR: cannot write to $output_filename!\n");
+		@result = write_info_file(*INFO_HANDLE, $trace_data);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		@result = write_info_file(*STDOUT, $trace_data);
+	}
+
+	return @result;
+}
+
+#
+# summary()
+#
+
+sub summary()
+{
+	my $filename;
+	my $current;
+	my $total;
+	my $ln_total_found;
+	my $ln_total_hit;
+	my $fn_total_found;
+	my $fn_total_hit;
+	my $br_total_found;
+	my $br_total_hit;
+
+	# Read and combine trace files
+	foreach $filename (@opt_summary) {
+		$current = read_info_file($filename);
+		if (!defined($total)) {
+			$total = $current;
+		} else {
+			$total = combine_info_files($total, $current);
+		}
+	}
+	# Calculate coverage data
+	foreach $filename (keys(%{$total}))
+	{
+		my $entry = $total->{$filename};
+		my $ln_found;
+		my $ln_hit;
+		my $fn_found;
+		my $fn_hit;
+		my $br_found;
+		my $br_hit;
+
+		(undef, undef, undef, undef, undef, undef, undef, undef,
+			$ln_found, $ln_hit, $fn_found, $fn_hit, $br_found,
+			$br_hit) = get_info_entry($entry);
+
+		# Add to totals
+		$ln_total_found	+= $ln_found;
+		$ln_total_hit	+= $ln_hit;
+		$fn_total_found += $fn_found;
+		$fn_total_hit	+= $fn_hit;
+		$br_total_found += $br_found;
+		$br_total_hit	+= $br_hit;
+	}
+
+
+	return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit,
+		$br_total_found, $br_total_hit);
+}
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+#   MODE & 1: suppress STDOUT
+#   MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+	my $mode = shift;
+	my $result;
+	local *OLD_STDERR;
+	local *OLD_STDOUT;
+
+	# Save old stdout and stderr handles
+	($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT");
+	($mode & 2) && open(OLD_STDERR, ">>&", "STDERR");
+
+	# Redirect to /dev/null
+	($mode & 1) && open(STDOUT, ">", "/dev/null");
+	($mode & 2) && open(STDERR, ">", "/dev/null");
+ 
+	system(@_);
+	$result = $?;
+
+	# Close redirected handles
+	($mode & 1) && close(STDOUT);
+	($mode & 2) && close(STDERR);
+
+	# Restore old handles
+	($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT");
+	($mode & 2) && open(STDERR, ">>&", "OLD_STDERR");
+ 
+	return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+	my $filename = $_[0];
+	my %result;
+	my $key;
+	my $value;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<", $filename))
+	{
+		warn("WARNING: cannot read configuration file $filename\n");
+		return undef;
+	}
+	while (<HANDLE>)
+	{
+		chomp;
+		# Skip comments
+		s/#.*//;
+		# Remove leading blanks
+		s/^\s+//;
+		# Remove trailing blanks
+		s/\s+$//;
+		next unless length;
+		($key, $value) = split(/\s*=\s*/, $_, 2);
+		if (defined($key) && defined($value))
+		{
+			$result{$key} = $value;
+		}
+		else
+		{
+			warn("WARNING: malformed statement in line $. ".
+			     "of configuration file $filename\n");
+		}
+	}
+	close(HANDLE);
+	return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+#   key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hashes CONFIG or OPT_RC contain a value
+# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. 
+#
+
+sub apply_config($)
+{
+	my $ref = $_[0];
+
+	foreach (keys(%{$ref}))
+	{
+		if (defined($opt_rc{$_})) {
+			${$ref->{$_}} = $opt_rc{$_};
+		} elsif (defined($config->{$_})) {
+			${$ref->{$_}} = $config->{$_};
+		}
+	}
+}
+
+sub warn_handler($)
+{
+	my ($msg) = @_;
+
+	warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+	my ($msg) = @_;
+
+	temp_cleanup();
+	die("$tool_name: $msg");
+}
+
+sub abort_handler($)
+{
+	temp_cleanup();
+	exit(1);
+}
+
+sub temp_cleanup()
+{
+	# Ensure temp directory is not in use by current process
+	chdir("/");
+
+	if (@temp_dirs) {
+		info("Removing temporary directories.\n");
+		foreach (@temp_dirs) {
+			rmtree($_);
+		}
+		@temp_dirs = ();
+	}
+}
+
+sub setup_gkv_sys()
+{
+	system_no_output(3, "mount", "-t", "debugfs", "nodev",
+			 "/sys/kernel/debug");
+}
+
+sub setup_gkv_proc()
+{
+	if (system_no_output(3, "modprobe", "gcov_proc")) {
+		system_no_output(3, "modprobe", "gcov_prof");
+	}
+}
+
+sub check_gkv_sys($)
+{
+	my ($dir) = @_;
+
+	if (-e "$dir/reset") {
+		return 1;
+	}
+	return 0;
+}
+
+sub check_gkv_proc($)
+{
+	my ($dir) = @_;
+
+	if (-e "$dir/vmlinux") {
+		return 1;
+	}
+	return 0;
+}
+
+sub setup_gkv()
+{
+	my $dir;
+	my $sys_dir = "/sys/kernel/debug/gcov";
+	my $proc_dir = "/proc/gcov";
+	my @todo;
+
+	if (!defined($gcov_dir)) {
+		info("Auto-detecting gcov kernel support.\n");
+		@todo = ( "cs", "cp", "ss", "cs", "sp", "cp" );
+	} elsif ($gcov_dir =~ /proc/) {
+		info("Checking gcov kernel support at $gcov_dir ".
+		     "(user-specified).\n");
+		@todo = ( "cp", "sp", "cp", "cs", "ss", "cs");
+	} else {
+		info("Checking gcov kernel support at $gcov_dir ".
+		     "(user-specified).\n");
+		@todo = ( "cs", "ss", "cs", "cp", "sp", "cp", );
+	}
+	foreach (@todo) {
+		if ($_ eq "cs") {
+			# Check /sys
+			$dir = defined($gcov_dir) ? $gcov_dir : $sys_dir;
+			if (check_gkv_sys($dir)) {
+				info("Found ".$GKV_NAME[$GKV_SYS]." gcov ".
+				     "kernel support at $dir\n");
+				return ($GKV_SYS, $dir);
+			}
+		} elsif ($_ eq "cp") {
+			# Check /proc
+			$dir = defined($gcov_dir) ? $gcov_dir : $proc_dir;
+			if (check_gkv_proc($dir)) {
+				info("Found ".$GKV_NAME[$GKV_PROC]." gcov ".
+				     "kernel support at $dir\n");
+				return ($GKV_PROC, $dir);
+			}
+		} elsif ($_ eq "ss") {
+			# Setup /sys
+			setup_gkv_sys();
+		} elsif ($_ eq "sp") {
+			# Setup /proc
+			setup_gkv_proc();
+		}
+	}
+	if (defined($gcov_dir)) {
+		die("ERROR: could not find gcov kernel data at $gcov_dir\n");
+	} else {
+		die("ERROR: no gcov kernel data found\n");
+	}
+}
+
+
+#
+# get_overall_line(found, hit, name_singular, name_plural)
+#
+# Return a string containing overall information for the specified
+# found/hit data.
+#
+
+sub get_overall_line($$$$)
+{
+	my ($found, $hit, $name_sn, $name_pl) = @_;
+	my $name;
+
+	return "no data found" if (!defined($found) || $found == 0);
+	$name = ($found == 1) ? $name_sn : $name_pl;
+
+	return rate($hit, $found, "% ($hit of $found $name)");
+}
+
+
+#
+# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do
+#                    br_found, br_hit)
+#
+# Print overall coverage rates for the specified coverage types.
+#
+
+sub print_overall_rate($$$$$$$$$)
+{
+	my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit,
+	    $br_do, $br_found, $br_hit) = @_;
+
+	info("Summary coverage rate:\n");
+	info("  lines......: %s\n",
+	     get_overall_line($ln_found, $ln_hit, "line", "lines"))
+		if ($ln_do);
+	info("  functions..: %s\n",
+	     get_overall_line($fn_found, $fn_hit, "function", "functions"))
+		if ($fn_do);
+	info("  branches...: %s\n",
+	     get_overall_line($br_found, $br_hit, "branch", "branches"))
+		if ($br_do);
+}
+
+
+#
+# rate(hit, found[, suffix, precision, width])
+#
+# Return the coverage rate [0..100] for HIT and FOUND values. 0 is only
+# returned when HIT is 0. 100 is only returned when HIT equals FOUND.
+# PRECISION specifies the precision of the result. SUFFIX defines a
+# string that is appended to the result if FOUND is non-zero. Spaces
+# are added to the start of the resulting string until it is at least WIDTH
+# characters wide.
+#
+
+sub rate($$;$$$)
+{
+        my ($hit, $found, $suffix, $precision, $width) = @_;
+        my $rate; 
+
+	# Assign defaults if necessary
+        $precision	= 1	if (!defined($precision));
+	$suffix		= ""	if (!defined($suffix));
+	$width		= 0	if (!defined($width));
+        
+        return sprintf("%*s", $width, "-") if (!defined($found) || $found == 0);
+        $rate = sprintf("%.*f", $precision, $hit * 100 / $found);
+
+	# Adjust rates if necessary
+        if ($rate == 0 && $hit > 0) {
+		$rate = sprintf("%.*f", $precision, 1 / 10 ** $precision);
+        } elsif ($rate == 100 && $hit != $found) {
+		$rate = sprintf("%.*f", $precision, 100 - 1 / 10 ** $precision);
+	}
+
+	return sprintf("%*s", $width, $rate.$suffix);
+}
diff --git a/ThirdParty/lcov/bin/updateversion.pl b/ThirdParty/lcov/bin/updateversion.pl
new file mode 100755
index 0000000000000000000000000000000000000000..19db81ecd3e3228e0f5115dada99755eae2f611d
--- /dev/null
+++ b/ThirdParty/lcov/bin/updateversion.pl
@@ -0,0 +1,194 @@
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+
+use File::Basename;
+
+sub update_man_page($);
+sub update_bin_tool($);
+sub update_txt_file($);
+sub update_spec_file($);
+sub write_version_file($);
+sub get_file_info($);
+
+our $directory = $ARGV[0];
+our $version = $ARGV[1];
+our $release = $ARGV[2];
+our $full = $ARGV[3];
+
+our @man_pages = ("man/gendesc.1",  "man/genhtml.1",  "man/geninfo.1",
+		  "man/genpng.1", "man/lcov.1", "man/lcovrc.5");
+our @bin_tools = ("bin/gendesc", "bin/genhtml", "bin/geninfo",
+		  "bin/genpng", "bin/lcov");
+our @txt_files = ("README");
+our @spec_files = ("rpm/lcov.spec");
+
+if (!defined($directory) || !defined($version) || !defined($release)) {
+	die("Usage: $0 DIRECTORY|FILE VERSION RELEASE FULL_VERSION\n");
+}
+
+# Determine mode of operation
+if (-f $directory) {
+	my $file = $directory;
+	my $base = basename($file);
+
+	if (grep(/^$base$/, map({ basename($_) } @man_pages))) {
+		print("Updating man page $file\n");
+		update_man_page($file);
+	} elsif (grep(/^$base$/, map({ basename($_) } @bin_tools))) {
+		print("Updating bin tool $file\n");
+		update_bin_tool($file);
+	} elsif (grep(/^$base$/, map({ basename($_) } @txt_files))) {
+		print("Updating text file $file\n");
+		update_txt_file($file);
+	} elsif (grep(/^$base$/, map({ basename($_) } @spec_files))) {
+		print("Updating spec file $file\n");
+		update_spec_file($file);
+	} elsif ($base eq ".version") {
+		print("Updating version file $file\n");
+		write_version_file($file);
+	} else {
+		print("WARNING: Skipping unknown file $file\n");
+	}
+	print("Done.\n");
+	exit(0);
+}
+
+foreach (@man_pages) {
+	print("Updating man page $_\n");
+	update_man_page($directory."/".$_);
+}
+foreach (@bin_tools) {
+	print("Updating bin tool $_\n");
+	update_bin_tool($directory."/".$_);
+}
+foreach (@txt_files) {
+	print("Updating text file $_\n");
+	update_txt_file($directory."/".$_);
+}
+foreach (@spec_files) {
+	print("Updating spec file $_\n");
+	update_spec_file($directory."/".$_);
+}
+print("Updating version file $directory/.version\n");
+write_version_file("$directory/.version");
+print("Done.\n");
+
+sub get_file_info($)
+{
+	my ($filename) = @_;
+	my ($sec, $min, $hour, $year, $month, $day);
+	my @stat;
+	my $gittime;
+
+	return (0, 0, 0) if (!-e $filename);
+	@stat = stat($filename);
+	($sec, $min, $hour, $day, $month, $year) = gmtime($stat[9]);
+	$year += 1900;
+	$month += 1;
+
+	return (sprintf("%04d-%02d-%02d", $year, $month, $day),
+		sprintf("%04d%02d%02d%02d%02d.%02d", $year, $month, $day,
+			$hour, $min, $sec),
+		sprintf("%o", $stat[2] & 07777));
+}
+
+sub update_man_page($)
+{
+	my ($filename) = @_;
+	my @date = get_file_info($filename);
+	my $date_string = $date[0];
+	local *IN;
+	local *OUT;
+
+	$date_string =~ s/-/\\-/g;
+	open(IN, "<$filename") || die ("Error: cannot open $filename\n");
+	open(OUT, ">$filename.new") ||
+		die("Error: cannot create $filename.new\n");
+	while (<IN>) {
+		s/\"LCOV\s+\d+\.\d+\"/\"LCOV $version\"/g;
+		s/\d\d\d\d\\\-\d\d\\\-\d\d/$date_string/g;
+		print(OUT $_);
+	}
+	close(OUT);
+	close(IN);
+	chmod(oct($date[2]), "$filename.new");
+	system("mv", "-f", "$filename.new", "$filename");
+	system("touch", "$filename", "-t", $date[1]);
+}
+
+sub update_bin_tool($)
+{
+	my ($filename) = @_;
+	my @date = get_file_info($filename);
+	local *IN;
+	local *OUT;
+
+	open(IN, "<$filename") || die ("Error: cannot open $filename\n");
+	open(OUT, ">$filename.new") ||
+		die("Error: cannot create $filename.new\n");
+	while (<IN>) {
+		s/^(our\s+\$lcov_version\s*=).*$/$1 "LCOV version $full";/g;
+		print(OUT $_);
+	}
+	close(OUT);
+	close(IN);
+	chmod(oct($date[2]), "$filename.new");
+	system("mv", "-f", "$filename.new", "$filename");
+	system("touch", "$filename", "-t", $date[1]);
+}
+
+sub update_txt_file($)
+{
+	my ($filename) = @_;
+	my @date = get_file_info($filename);
+	local *IN;
+	local *OUT;
+
+	open(IN, "<$filename") || die ("Error: cannot open $filename\n");
+	open(OUT, ">$filename.new") ||
+		die("Error: cannot create $filename.new\n");
+	while (<IN>) {
+		s/(Last\s+changes:\s+)\d\d\d\d-\d\d-\d\d/$1$date[0]/g;
+		print(OUT $_);
+	}
+	close(OUT);
+	close(IN);
+	chmod(oct($date[2]), "$filename.new");
+	system("mv", "-f", "$filename.new", "$filename");
+	system("touch", "$filename", "-t", $date[1]);
+}
+
+sub update_spec_file($)
+{
+	my ($filename) = @_;
+	my @date = get_file_info($filename);
+	local *IN;
+	local *OUT;
+
+	open(IN, "<$filename") || die ("Error: cannot open $filename\n");
+	open(OUT, ">$filename.new") ||
+		die("Error: cannot create $filename.new\n");
+	while (<IN>) {
+		s/^(Version:\s*)\d+\.\d+.*$/$1$version/;
+		s/^(Release:\s*).*$/$1$release/;
+		print(OUT $_);
+	}
+	close(OUT);
+	close(IN);
+	system("mv", "-f", "$filename.new", "$filename");
+	system("touch", "$filename", "-t", $date[1]);
+}
+
+sub write_version_file($)
+{
+	my ($filename) = @_;
+	my $fd;
+
+	open($fd, ">", $filename) or die("Error: cannot write $filename: $!\n");
+	print($fd "VERSION=$version\n");
+	print($fd "RELEASE=$release\n");
+	print($fd "FULL=$full\n");
+	close($fd);
+}
diff --git a/ThirdParty/lcov/example/Makefile b/ThirdParty/lcov/example/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..2f698a1b32f3186a9e7d093742141b9a478f39f9
--- /dev/null
+++ b/ThirdParty/lcov/example/Makefile
@@ -0,0 +1,98 @@
+#
+# Makefile for the LCOV example program.
+#
+# Make targets:
+#   - example: compile the example program
+#   - output:  run test cases on example program and create HTML output
+#   - clean:   clean up directory
+#
+
+CC      := gcc
+CFLAGS  := -Wall -I. -fprofile-arcs -ftest-coverage
+
+LCOV    := ../bin/lcov
+GENHTML := ../bin/genhtml
+GENDESC := ../bin/gendesc
+GENPNG  := ../bin/genpng
+
+# Depending on the presence of the GD.pm perl module, we can use the
+# special option '--frames' for genhtml
+USE_GENPNG := $(shell $(GENPNG) --help >/dev/null 2>/dev/null; echo $$?)
+
+ifeq ($(USE_GENPNG),0)
+  FRAMES := --frames
+else
+  FRAMES :=
+endif
+
+.PHONY: clean output test_noargs test_2_to_2000 test_overflow
+
+all: output
+
+example: example.o iterate.o gauss.o
+	$(CC) example.o iterate.o gauss.o -o example -lgcov
+
+example.o: example.c iterate.h gauss.h
+	$(CC) $(CFLAGS) -c example.c -o example.o
+
+iterate.o: methods/iterate.c iterate.h
+	$(CC) $(CFLAGS) -c methods/iterate.c -o iterate.o
+
+gauss.o: methods/gauss.c gauss.h
+	$(CC) $(CFLAGS) -c methods/gauss.c -o gauss.o
+
+output: example descriptions test_noargs test_2_to_2000 test_overflow
+	@echo
+	@echo '*'
+	@echo '* Generating HTML output'
+	@echo '*'
+	@echo
+	$(GENHTML) trace_noargs.info trace_args.info trace_overflow.info \
+		   --output-directory output --title "Basic example" \
+		   --show-details --description-file descriptions $(FRAMES) \
+		   --legend
+	@echo
+	@echo '*'
+	@echo '* See '`pwd`/output/index.html
+	@echo '*'
+	@echo
+
+descriptions: descriptions.txt
+	$(GENDESC) descriptions.txt -o descriptions
+
+all_tests: example test_noargs test_2_to_2000 test_overflow
+
+test_noargs:
+	@echo
+	@echo '*'
+	@echo '* Test case 1: running ./example without parameters'
+	@echo '*'
+	@echo
+	$(LCOV) --zerocounters --directory .
+	./example
+	$(LCOV) --capture --directory . --output-file trace_noargs.info --test-name test_noargs --no-external
+
+test_2_to_2000:
+	@echo
+	@echo '*'
+	@echo '* Test case 2: running ./example 2 2000'
+	@echo '*'
+	@echo
+	$(LCOV) --zerocounters --directory .
+	./example 2 2000
+	$(LCOV) --capture --directory . --output-file trace_args.info --test-name test_2_to_2000 --no-external
+
+test_overflow:
+	@echo
+	@echo '*'
+	@echo '* Test case 3: running ./example 0 100000 (causes an overflow)'
+	@echo '*'
+	@echo
+	$(LCOV) --zerocounters --directory .
+	./example 0 100000 || true
+	$(LCOV) --capture --directory . --output-file trace_overflow.info --test-name "test_overflow" --no-external
+
+clean:
+	rm -rf *.o *.bb *.bbg *.da *.gcno *.gcda *.info output example \
+	descriptions
+
diff --git a/ThirdParty/lcov/example/README b/ThirdParty/lcov/example/README
new file mode 100644
index 0000000000000000000000000000000000000000..cf6cf2e4c688d32220293484bcfaf9794f16abf8
--- /dev/null
+++ b/ThirdParty/lcov/example/README
@@ -0,0 +1,6 @@
+
+To get an example of how the LCOV generated HTML output looks like,
+type 'make output' and point a web browser to the resulting file
+
+  output/index.html
+
diff --git a/ThirdParty/lcov/example/descriptions.txt b/ThirdParty/lcov/example/descriptions.txt
new file mode 100644
index 0000000000000000000000000000000000000000..47e6021310d3ed1bb55d2a1bf239ff9dab1d2b82
--- /dev/null
+++ b/ThirdParty/lcov/example/descriptions.txt
@@ -0,0 +1,10 @@
+test_noargs
+	Example program is called without arguments so that default range
+	[0..9] is used.
+
+test_2_to_2000
+	Example program is called with "2" and "2000" as arguments.
+
+test_overflow
+	Example program is called with "0" and "100000" as arguments. The
+	resulting sum is too large to be stored as an int variable.
diff --git a/ThirdParty/lcov/example/example.c b/ThirdParty/lcov/example/example.c
new file mode 100644
index 0000000000000000000000000000000000000000..f9049aa64bafad3aad81910c355c625742290008
--- /dev/null
+++ b/ThirdParty/lcov/example/example.c
@@ -0,0 +1,60 @@
+/*
+ *  example.c
+ * 
+ *  Calculate the sum of a given range of integer numbers. The range is
+ *  specified by providing two integer numbers as command line argument.
+ *  If no arguments are specified, assume the predefined range [0..9].
+ *  Abort with an error message if the resulting number is too big to be
+ *  stored as int variable.
+ *
+ *  This program example is similar to the one found in the GCOV documentation.
+ *  It is used to demonstrate the HTML output generated by LCOV.
+ *
+ *  The program is split into 3 modules to better demonstrate the 'directory
+ *  overview' function. There are also a lot of bloated comments inserted to
+ *  artificially increase the source code size so that the 'source code
+ *  overview' function makes at least a minimum of sense.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "iterate.h"
+#include "gauss.h"
+
+static int start = 0;
+static int end = 9;
+
+
+int main (int argc, char* argv[])
+{
+	int total1, total2;
+
+	/* Accept a pair of numbers as command line arguments. */
+
+	if (argc == 3)
+	{
+		start	= atoi(argv[1]);
+		end	= atoi(argv[2]);
+	}
+
+
+	/* Use both methods to calculate the result. */
+
+	total1 = iterate_get_sum (start, end);
+	total2 = gauss_get_sum (start, end);
+
+
+	/* Make sure both results are the same. */
+
+	if (total1 != total2)
+	{
+		printf ("Failure (%d != %d)!\n", total1, total2);
+	}
+	else
+	{
+		printf ("Success, sum[%d..%d] = %d\n", start, end, total1);
+	}
+
+	return 0;
+}
diff --git a/ThirdParty/lcov/example/gauss.h b/ThirdParty/lcov/example/gauss.h
new file mode 100644
index 0000000000000000000000000000000000000000..302a4a980382ed38909900cc941e91069a30b96e
--- /dev/null
+++ b/ThirdParty/lcov/example/gauss.h
@@ -0,0 +1,6 @@
+#ifndef GAUSS_H
+#define GAUSS_H GAUSS_h
+
+extern int gauss_get_sum (int min, int max);
+
+#endif /* GAUSS_H */
diff --git a/ThirdParty/lcov/example/iterate.h b/ThirdParty/lcov/example/iterate.h
new file mode 100644
index 0000000000000000000000000000000000000000..471327951cf470e9e5b1e7286583c15124b28c3e
--- /dev/null
+++ b/ThirdParty/lcov/example/iterate.h
@@ -0,0 +1,6 @@
+#ifndef ITERATE_H
+#define ITERATE_H ITERATE_H
+
+extern int iterate_get_sum (int min, int max);
+
+#endif /* ITERATE_H */
diff --git a/ThirdParty/lcov/example/methods/gauss.c b/ThirdParty/lcov/example/methods/gauss.c
new file mode 100644
index 0000000000000000000000000000000000000000..9da3ce50835b1f32141b1d4ba321abd053ac0aac
--- /dev/null
+++ b/ThirdParty/lcov/example/methods/gauss.c
@@ -0,0 +1,48 @@
+/*
+ *  methods/gauss.c
+ *
+ *  Calculate the sum of a given range of integer numbers.
+ *
+ *  Somewhat of a more subtle way of calculation - and it even has a story
+ *  behind it:
+ *
+ *  Supposedly during math classes in elementary school, the teacher of
+ *  young mathematician Gauss gave the class an assignment to calculate the
+ *  sum of all natural numbers between 1 and 100, hoping that this task would
+ *  keep the kids occupied for some time. The story goes that Gauss had the
+ *  result ready after only a few minutes. What he had written on his black
+ *  board was something like this:
+ *
+ *    1 + 100 = 101
+ *    2 + 99  = 101
+ *    3 + 98  = 101
+ *    .
+ *    .
+ *    100 + 1 = 101
+ *
+ *    s = (1/2) * 100 * 101 = 5050
+ *
+ *  A more general form of this formula would be
+ *  
+ *    s = (1/2) * (max + min) * (max - min + 1)
+ *
+ *  which is used in the piece of code below to implement the requested
+ *  function in constant time, i.e. without dependencies on the size of the
+ *  input parameters.
+ *
+ */
+
+#include "gauss.h"
+
+
+int gauss_get_sum (int min, int max)
+{
+	/* This algorithm doesn't work well with invalid range specifications
+	   so we're intercepting them here. */
+	if (max < min)
+	{
+		return 0;
+	}
+
+	return (int) ((max + min) * (double) (max - min + 1) / 2);
+}
diff --git a/ThirdParty/lcov/example/methods/iterate.c b/ThirdParty/lcov/example/methods/iterate.c
new file mode 100644
index 0000000000000000000000000000000000000000..023d1801c9364f71c994e8c0dbe04b93f3992f34
--- /dev/null
+++ b/ThirdParty/lcov/example/methods/iterate.c
@@ -0,0 +1,45 @@
+/*
+ *  methods/iterate.c
+ *  
+ *  Calculate the sum of a given range of integer numbers.
+ *
+ *  This particular method of implementation works by way of brute force,
+ *  i.e. it iterates over the entire range while adding the numbers to finally
+ *  get the total sum. As a positive side effect, we're able to easily detect
+ *  overflows, i.e. situations in which the sum would exceed the capacity
+ *  of an integer variable.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "iterate.h"
+
+
+int iterate_get_sum (int min, int max)
+{
+	int i, total;
+
+	total = 0;
+
+	/* This is where we loop over each number in the range, including
+	   both the minimum and the maximum number. */
+
+	for (i = min; i <= max; i++)
+	{
+		/* We can detect an overflow by checking whether the new
+		   sum would become negative. */
+
+		if (total + i < total)
+		{
+			printf ("Error: sum too large!\n");
+			exit (1);
+		}
+
+		/* Everything seems to fit into an int, so continue adding. */
+
+		total += i;
+	}
+
+	return total;
+}
diff --git a/ThirdParty/lcov/lcovrc b/ThirdParty/lcov/lcovrc
new file mode 100644
index 0000000000000000000000000000000000000000..40f364f17aa6497f4ccd8af98145367f4b8341ae
--- /dev/null
+++ b/ThirdParty/lcov/lcovrc
@@ -0,0 +1,169 @@
+#
+# /etc/lcovrc - system-wide defaults for LCOV
+#
+# To change settings for a single user, place a customized copy of this file
+# at location ~/.lcovrc
+#
+
+# Specify an external style sheet file (same as --css-file option of genhtml)
+#genhtml_css_file = gcov.css
+
+# Specify coverage rate limits (in %) for classifying file entries
+# HI:   hi_limit <= rate <= 100         graph color: green
+# MED: med_limit <= rate <  hi_limit    graph color: orange
+# LO:         0  <= rate <  med_limit   graph color: red
+genhtml_hi_limit = 90
+genhtml_med_limit = 75
+
+# Width of line coverage field in source code view
+genhtml_line_field_width = 12
+
+# Width of branch coverage field in source code view
+genhtml_branch_field_width = 16
+
+# Width of overview image (used by --frames option of genhtml)
+genhtml_overview_width = 80
+
+# Resolution of overview navigation: this number specifies the maximum
+# difference in lines between the position a user selected from the overview
+# and the position the source code window is scrolled to (used by --frames
+# option of genhtml)
+genhtml_nav_resolution = 4
+
+# Clicking a line in the overview image should show the source code view at
+# a position a bit further up so that the requested line is not the first
+# line in the window. This number specifies that offset in lines (used by
+# --frames option of genhtml)
+genhtml_nav_offset = 10
+
+# Do not remove unused test descriptions if non-zero (same as
+# --keep-descriptions option of genhtml)
+genhtml_keep_descriptions = 0
+
+# Do not remove prefix from directory names if non-zero (same as --no-prefix
+# option of genhtml)
+genhtml_no_prefix = 0
+
+# Do not create source code view if non-zero (same as --no-source option of
+# genhtml)
+genhtml_no_source = 0
+
+# Replace tabs with number of spaces in source view (same as --num-spaces
+# option of genhtml)
+genhtml_num_spaces = 8
+
+# Highlight lines with converted-only data if non-zero (same as --highlight
+# option of genhtml)
+genhtml_highlight = 0
+
+# Include color legend in HTML output if non-zero (same as --legend option of
+# genhtml)
+genhtml_legend = 0
+
+# Use FILE as HTML prolog for generated pages (same as --html-prolog option of
+# genhtml)
+#genhtml_html_prolog = FILE
+
+# Use FILE as HTML epilog for generated pages (same as --html-epilog option of
+# genhtml)
+#genhtml_html_epilog = FILE
+
+# Use custom filename extension for pages (same as --html-extension option of
+# genhtml)
+#genhtml_html_extension = html
+
+# Compress all generated html files with gzip.
+#genhtml_html_gzip = 1
+
+# Include sorted overview pages (can be disabled by the --no-sort option of
+# genhtml)
+genhtml_sort = 1
+
+# Include function coverage data display (can be disabled by the
+# --no-func-coverage option of genhtml)
+#genhtml_function_coverage = 1
+
+# Include branch coverage data display (can be disabled by the
+# --no-branch-coverage option of genhtml)
+#genhtml_branch_coverage = 1
+
+# Specify the character set of all generated HTML pages
+genhtml_charset=UTF-8
+
+# Allow HTML markup in test case description text if non-zero
+genhtml_desc_html=0
+
+# Specify the precision for coverage rates
+#genhtml_precision=1
+
+# Show missed counts instead of hit counts
+#genhtml_missed=1
+
+# Demangle C++ symbols
+#genhtml_demangle_cpp=1
+
+# Location of the gcov tool (same as --gcov-info option of geninfo)
+#geninfo_gcov_tool = gcov
+
+# Adjust test names to include operating system information if non-zero
+#geninfo_adjust_testname = 0
+
+# Calculate checksum for each source code line if non-zero (same as --checksum
+# option of geninfo if non-zero, same as --no-checksum if zero)
+#geninfo_checksum = 1
+
+# Specify whether to capture coverage data for external source files (can
+# be overridden by the --external and --no-external options of geninfo/lcov)
+#geninfo_external = 1
+
+# Enable libtool compatibility mode if non-zero (same as --compat-libtool option
+# of geninfo if non-zero, same as --no-compat-libtool if zero)
+#geninfo_compat_libtool = 0
+
+# Use gcov's --all-blocks option if non-zero
+#geninfo_gcov_all_blocks = 1
+
+# Specify compatiblity modes (same as --compat option of geninfo).
+#geninfo_compat = libtool=on, hammer=auto, split_crc=auto
+
+# Adjust path to source files by removing or changing path components that
+# match the specified pattern (Perl regular expression format)
+#geninfo_adjust_src_path = /tmp/build => /usr/src
+
+# Specify if geninfo should try to automatically determine the base-directory
+# when collecting coverage data.
+geninfo_auto_base = 1
+
+# Directory containing gcov kernel files
+# lcov_gcov_dir = /proc/gcov
+
+# Location of the insmod tool
+lcov_insmod_tool = /sbin/insmod
+
+# Location of the modprobe tool
+lcov_modprobe_tool = /sbin/modprobe
+
+# Location of the rmmod tool
+lcov_rmmod_tool = /sbin/rmmod
+
+# Location for temporary directories
+lcov_tmp_dir = /tmp
+
+# Show full paths during list operation if non-zero (same as --list-full-path
+# option of lcov)
+lcov_list_full_path = 0
+
+# Specify the maximum width for list output. This value is ignored when
+# lcov_list_full_path is non-zero.
+lcov_list_width = 80
+
+# Specify the maximum percentage of file names which may be truncated when
+# choosing a directory prefix in list output. This value is ignored when
+# lcov_list_full_path is non-zero.
+lcov_list_truncate_max = 20
+
+# Specify if function coverage data should be collected and processed.
+lcov_function_coverage = 1
+
+# Specify if branch coverage data should be collected and processed.
+lcov_branch_coverage = 0
diff --git a/ThirdParty/lcov/man/gendesc.1 b/ThirdParty/lcov/man/gendesc.1
new file mode 100644
index 0000000000000000000000000000000000000000..9c9a7084db2f39c8245a19c2a08db3e610acf5b8
--- /dev/null
+++ b/ThirdParty/lcov/man/gendesc.1
@@ -0,0 +1,78 @@
+.TH gendesc 1 "LCOV 1.14" 2019\-02\-28 "User Manuals"
+.SH NAME
+gendesc \- Generate a test case description file
+.SH SYNOPSIS
+.B gendesc
+.RB [ \-h | \-\-help ]
+.RB [ \-v | \-\-version ]
+.RS 8
+.br
+.RB [ \-o | \-\-output\-filename
+.IR filename ]
+.br
+.I inputfile
+.SH DESCRIPTION
+Convert plain text test case descriptions into a format as understood by
+.BR genhtml .
+.I inputfile
+needs to observe the following format:
+
+For each test case:
+.IP "     \-"
+one line containing the test case name beginning at the start of the line
+.RE
+.IP "     \-"
+one or more lines containing the test case description indented with at
+least one whitespace character (tab or space)
+.RE
+
+.B Example input file:
+
+test01
+.RS
+An example test case description.
+.br
+Description continued
+.RE
+
+test42
+.RS
+Supposedly the answer to most of your questions
+.RE
+
+Note: valid test names can consist of letters, decimal digits and the
+underscore character ('_').
+.SH OPTIONS
+.B \-h
+.br
+.B \-\-help
+.RS
+Print a short help text, then exit.
+.RE
+
+.B \-v
+.br
+.B \-\-version
+.RS
+Print version number, then exit.
+.RE
+
+
+.BI "\-o " filename
+.br
+.BI "\-\-output\-filename " filename
+.RS
+Write description data to
+.IR filename .
+
+By default, output is written to STDOUT.
+.RE
+.SH AUTHOR
+Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+
+.SH SEE ALSO
+.BR lcov (1),
+.BR genhtml (1),
+.BR geninfo (1),
+.BR genpng (1),
+.BR gcov (1)
diff --git a/ThirdParty/lcov/man/genhtml.1 b/ThirdParty/lcov/man/genhtml.1
new file mode 100644
index 0000000000000000000000000000000000000000..949bd4c574b977f1824150751763e2c81a45bb95
--- /dev/null
+++ b/ThirdParty/lcov/man/genhtml.1
@@ -0,0 +1,600 @@
+.TH genhtml 1 "LCOV 1.14" 2019\-02\-28 "User Manuals"
+.SH NAME
+genhtml \- Generate HTML view from LCOV coverage data files
+.SH SYNOPSIS
+.B genhtml
+.RB [ \-h | \-\-help ]
+.RB [ \-v | \-\-version ]
+.RS 8
+.br
+.RB [ \-q | \-\-quiet ]
+.RB [ \-s | \-\-show\-details ]
+.RB [ \-f | \-\-frames ]
+.br
+.RB [ \-b | \-\-baseline\-file  ]
+.IR baseline\-file
+.br
+.RB [ \-o | \-\-output\-directory
+.IR output\-directory ]
+.br
+.RB [ \-t | \-\-title
+.IR title ]
+.br
+.RB [ \-d | \-\-description\-file
+.IR description\-file ]
+.br
+.RB [ \-k | \-\-keep\-descriptions ]
+.RB [ \-c | \-\-css\-file
+.IR css\-file ]
+.br
+.RB [ \-p | \-\-prefix
+.IR prefix ]
+.RB [ \-\-no\-prefix ]
+.br
+.RB [ \-\-no\-source ]
+.RB [ \-\-num\-spaces
+.IR num ]
+.RB [ \-\-highlight ]
+.br
+.RB [ \-\-legend ]
+.RB [ \-\-html\-prolog
+.IR prolog\-file ]
+.br
+.RB [ \-\-html\-epilog
+.IR epilog\-file ]
+.RB [ \-\-html\-extension
+.IR extension ]
+.br
+.RB [ \-\-html\-gzip ]
+.RB [ \-\-sort ]
+.RB [ \-\-no\-sort ]
+.br
+.RB [ \-\-function\-coverage ]
+.RB [ \-\-no\-function\-coverage ]
+.br
+.RB [ \-\-branch\-coverage ]
+.RB [ \-\-no\-branch\-coverage ]
+.br
+.RB [ \-\-demangle\-cpp ]
+.RB [ \-\-ignore\-errors
+.IR errors  ]
+.br
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RB [ \-\-precision
+.RB [ \-\-missed ]
+.br
+.IR tracefile(s)
+.RE
+.SH DESCRIPTION
+Create an HTML view of coverage data found in
+.IR tracefile .
+Note that
+.I tracefile
+may also be a list of filenames.
+
+HTML output files are created in the current working directory unless the
+\-\-output\-directory option is used. If 
+.I tracefile
+ends with ".gz", it is assumed to be GZIP\-compressed and the gunzip tool
+will be used to decompress it transparently.
+
+Note that all source code files have to be present and readable at the
+exact file system location they were compiled.
+
+Use option
+.I \--css\-file
+to modify layout and colors of the generated HTML output. Files are
+marked in different colors depending on the associated coverage rate. By
+default, the coverage limits for low, medium and high coverage are set to
+0\-75%, 75\-90% and 90\-100% percent respectively. To change these
+values, use configuration file options
+.IR genhtml_hi_limit " and " genhtml_med_limit .
+
+Also note that when displaying percentages, 0% and 100% are only printed when
+the values are exactly 0% and 100% respectively. Other values which would
+conventionally be rounded to 0% or 100% are instead printed as nearest
+non-boundary value. This behavior is in accordance with that of the
+.BR gcov (1)
+tool.
+
+.SH OPTIONS
+.B \-h
+.br
+.B \-\-help
+.RS
+Print a short help text, then exit.
+
+.RE
+.B \-v
+.br
+.B \-\-version
+.RS
+Print version number, then exit.
+
+.RE
+.B \-q
+.br
+.B \-\-quiet
+.RS
+Do not print progress messages.
+
+Suppresses all informational progress output. When this switch is enabled,
+only error or warning messages are printed.
+
+.RE
+.B \-f
+.br
+.B \-\-frames
+.RS
+Use HTML frames for source code view.
+
+If enabled, a frameset is created for each source code file, providing
+an overview of the source code as a "clickable" image. Note that this
+option will slow down output creation noticeably because each source
+code character has to be inspected once. Note also that the GD.pm Perl
+module has to be installed for this option to work (it may be obtained
+from http://www.cpan.org).
+
+.RE
+.B \-s
+.br
+.B \-\-show\-details
+.RS
+Generate detailed directory view.
+
+When this option is enabled,
+.B genhtml
+generates two versions of each
+file view. One containing the standard information plus a link to a
+"detailed" version. The latter additionally contains information about
+which test case covered how many lines of each source file.
+
+.RE
+.BI "\-b " baseline\-file
+.br
+.BI "\-\-baseline\-file " baseline\-file
+.RS
+Use data in
+.I baseline\-file
+as coverage baseline.
+
+The tracefile specified by
+.I baseline\-file
+is read and all counts found in the original
+.I tracefile
+are decremented by the corresponding counts in 
+.I baseline\-file
+before creating any output.
+
+Note that when a count for a particular line in
+.I baseline\-file
+is greater than the count in the
+.IR tracefile ,
+the result is zero.
+
+.RE
+.BI "\-o " output\-directory
+.br
+.BI "\-\-output\-directory " output\-directory
+.RS
+Create files in 
+.I output\-directory.
+
+Use this option to tell 
+.B genhtml
+to write the resulting files to a directory other than
+the current one. If 
+.I output\-directory
+does not exist, it will be created.
+
+It is advisable to use this option since depending on the
+project size, a lot of files and subdirectories may be created.
+
+.RE
+.BI "\-t " title
+.br
+.BI "\-\-title " title
+.RS
+Display 
+.I title
+in header of all pages.
+
+.I title
+is written to the header portion of each generated HTML page to
+identify the context in which a particular output
+was created. By default this is the name of the tracefile.
+
+.RE
+.BI "\-d " description\-file
+.br
+.BI "\-\-description\-file " description\-file
+.RS
+Read test case descriptions from 
+.IR description\-file .
+
+All test case descriptions found in
+.I description\-file
+and referenced in the input data file are read and written to an extra page
+which is then incorporated into the HTML output.
+
+The file format of
+.IR "description\-file " is:
+
+for each test case:
+.RS
+TN:<testname>
+.br
+TD:<test description>
+
+.RE
+
+Valid test case names can consist of letters, numbers and the underscore
+character ('_').
+.RE
+.B \-k
+.br
+.B \-\-keep\-descriptions
+.RS
+Do not remove unused test descriptions.
+
+Keep descriptions found in the description file even if the coverage data
+indicates that the associated test case did not cover any lines of code.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_keep_descriptions .
+
+.RE
+.BI "\-c " css\-file
+.br
+.BI "\-\-css\-file " css\-file
+.RS
+Use external style sheet file
+.IR css\-file .
+
+Using this option, an extra .css file may be specified which will replace
+the default one. This may be helpful if the default colors make your eyes want
+to jump out of their sockets :)
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_css_file .
+
+.RE
+.BI "\-p " prefix
+.br
+.BI "\-\-prefix " prefix
+.RS
+Remove 
+.I prefix
+from all directory names.
+
+Because lists containing long filenames are difficult to read, there is a
+mechanism implemented that will automatically try to shorten all directory
+names on the overview page beginning with a common prefix. By default,
+this is done using an algorithm that tries to find the prefix which, when
+applied, will minimize the resulting sum of characters of all directory
+names.
+
+Use this option to specify the prefix to be removed by yourself.
+
+.RE
+.B \-\-no\-prefix
+.RS
+Do not remove prefix from directory names.
+
+This switch will completely disable the prefix mechanism described in the
+previous section.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_no_prefix .
+
+.RE
+.B \-\-no\-source
+.RS
+Do not create source code view.
+
+Use this switch if you don't want to get a source code view for each file.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_no_source .
+
+.RE
+.BI "\-\-num\-spaces " spaces
+.RS
+Replace tabs in source view with
+.I num
+spaces.
+
+Default value is 8.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_num_spaces .
+
+.RE
+.B \-\-highlight
+.RS
+Highlight lines with converted\-only coverage data.
+
+Use this option in conjunction with the \-\-diff option of
+.B lcov
+to highlight those lines which were only covered in data sets which were
+converted from previous source code versions.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_highlight .
+
+.RE
+.B \-\-legend
+.RS
+Include color legend in HTML output.
+
+Use this option to include a legend explaining the meaning of color coding
+in the resulting HTML output.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_legend .
+
+.RE
+.BI "\-\-html\-prolog " prolog\-file
+.RS
+Read customized HTML prolog from 
+.IR prolog\-file .
+
+Use this option to replace the default HTML prolog (the initial part of the
+HTML source code leading up to and including the <body> tag) with the contents
+of
+.IR prolog\-file .
+Within the prolog text, the following words will be replaced when a page is generated:
+
+.B "@pagetitle@"
+.br
+The title of the page.
+
+.B "@basedir@"
+.br
+A relative path leading to the base directory (e.g. for locating css\-files).
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_html_prolog .
+
+.RE
+.BI "\-\-html\-epilog " epilog\-file
+.RS
+Read customized HTML epilog from 
+.IR epilog\-file .
+
+Use this option to replace the default HTML epilog (the final part of the HTML
+source including </body>) with the contents of
+.IR epilog\-file .
+
+Within the epilog text, the following words will be replaced when a page is generated:
+
+.B "@basedir@"
+.br
+A relative path leading to the base directory (e.g. for locating css\-files).
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_html_epilog .
+
+.RE
+.BI "\-\-html\-extension " extension
+.RS
+Use customized filename extension for generated HTML pages.
+
+This option is useful in situations where different filename extensions
+are required to render the resulting pages correctly (e.g. php). Note that
+a '.' will be inserted between the filename and the extension specified by
+this option.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_html_extension .
+.RE
+
+.B \-\-html\-gzip
+.RS
+Compress all generated html files with gzip and add a .htaccess file specifying
+gzip\-encoding in the root output directory.
+
+Use this option if you want to save space on your webserver. Requires a
+webserver with .htaccess support and a browser with support for gzip
+compressed html.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_html_gzip .
+
+.RE
+.B \-\-sort
+.br
+.B \-\-no\-sort
+.RS
+Specify whether to include sorted views of file and directory overviews.
+
+Use \-\-sort to include sorted views or \-\-no\-sort to not include them.
+Sorted views are
+.B enabled
+by default.
+
+When sorted views are enabled, each overview page will contain links to
+views of that page sorted by coverage rate.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_sort .
+
+.RE
+.B \-\-function\-coverage
+.br
+.B \-\-no\-function\-coverage
+.RS
+Specify whether to display function coverage summaries in HTML output.
+
+Use \-\-function\-coverage to enable function coverage summaries or
+\-\-no\-function\-coverage to disable it. Function coverage summaries are
+.B enabled
+by default
+
+When function coverage summaries are enabled, each overview page will contain
+the number of functions found and hit per file or directory, together with
+the resulting coverage rate. In addition, each source code view will contain
+a link to a page which lists all functions found in that file plus the
+respective call count for those functions.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_function_coverage .
+
+.RE
+.B \-\-branch\-coverage
+.br
+.B \-\-no\-branch\-coverage
+.RS
+Specify whether to display branch coverage data in HTML output.
+
+Use \-\-branch\-coverage to enable branch coverage display or
+\-\-no\-branch\-coverage to disable it. Branch coverage data display is
+.B enabled
+by default
+
+When branch coverage display is enabled, each overview page will contain
+the number of branches found and hit per file or directory, together with
+the resulting coverage rate. In addition, each source code view will contain
+an extra column which lists all branches of a line with indications of
+whether the branch was taken or not. Branches are shown in the following format:
+
+ ' + ': Branch was taken at least once
+.br
+ ' - ': Branch was not taken
+.br
+ ' # ': The basic block containing the branch was never executed
+.br
+
+Note that it might not always be possible to relate branches to the
+corresponding source code statements: during compilation, GCC might shuffle
+branches around or eliminate some of them to generate better code.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_branch_coverage .
+
+.RE
+.B \-\-demangle\-cpp
+.RS
+Specify whether to demangle C++ function names.
+
+Use this option if you want to convert C++ internal function names to
+human readable format for display on the HTML function overview page.
+This option requires that the c++filt tool is installed (see
+.BR c++filt (1)).
+
+.RE
+.B \-\-ignore\-errors
+.I errors
+.br
+.RS
+Specify a list of errors after which to continue processing.
+
+Use this option to specify a list of one or more classes of errors after which
+geninfo should continue processing instead of aborting.
+
+.I errors
+can be a comma\-separated list of the following keywords:
+
+.B source:
+the source code file for a data set could not be found.
+.RE
+
+.B \-\-config\-file
+.I config\-file
+.br
+.RS
+Specify a configuration file to use.
+
+When this option is specified, neither the system\-wide configuration file
+/etc/lcovrc, nor the per\-user configuration file ~/.lcovrc is read.
+
+This option may be useful when there is a need to run several
+instances of
+.B genhtml
+with different configuration file options in parallel.
+.RE
+
+.B \-\-rc
+.IR keyword = value
+.br
+.RS
+Override a configuration directive.
+
+Use this option to specify a
+.IR keyword = value
+statement which overrides the corresponding configuration statement in
+the lcovrc configuration file. You can specify this option more than once
+to override multiple configuration statements.
+See
+.BR lcovrc (5)
+for a list of available keywords and their meaning.
+.RE
+
+.BI "\-\-precision " num
+.RS
+Show coverage rates with
+.I num
+number of digits after the decimal-point.
+
+Default value is 1.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_precision .
+.RE
+
+.B \-\-missed
+.RS
+Show counts of missed lines, functions, or branches
+
+Use this option to change overview pages to show the count of lines, functions,
+or branches that were not hit. These counts are represented by negative numbers.
+
+When specified together with \-\-sort, file and directory views will be sorted
+by missed counts.
+
+This option can also be configured permanently using the configuration file
+option
+.IR genhtml_missed .
+.RE
+
+.SH FILES
+
+.I /etc/lcovrc
+.RS
+The system\-wide configuration file.
+.RE
+
+.I ~/.lcovrc
+.RS
+The per\-user configuration file.
+.RE
+
+.SH AUTHOR
+Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+
+.SH SEE ALSO
+.BR lcov (1),
+.BR lcovrc (5),
+.BR geninfo (1),
+.BR genpng (1),
+.BR gendesc (1),
+.BR gcov (1)
diff --git a/ThirdParty/lcov/man/geninfo.1 b/ThirdParty/lcov/man/geninfo.1
new file mode 100644
index 0000000000000000000000000000000000000000..2ce917126c413c54919907147202b18141062b2f
--- /dev/null
+++ b/ThirdParty/lcov/man/geninfo.1
@@ -0,0 +1,578 @@
+.TH geninfo 1 "LCOV 1.14" 2019\-02\-28 "User Manuals"
+.SH NAME
+geninfo \- Generate tracefiles from .da files
+.SH SYNOPSIS
+.B geninfo
+.RB [ \-h | \-\-help ]
+.RB [ \-v | \-\-version ]
+.RB [ \-q | \-\-quiet ]
+.br
+.RS 8
+.RB [ \-i | \-\-initial ]
+.RB [ \-t | \-\-test\-name
+.IR test\-name ]
+.br
+.RB [ \-o | \-\-output\-filename
+.IR filename ]
+.RB [ \-f | \-\-follow ]
+.br
+.RB [ \-b | \-\-base\-directory
+.IR directory ]
+.br
+.RB [ \-\-checksum ]
+.RB [ \-\-no\-checksum ]
+.br
+.RB [ \-\-compat\-libtool ]
+.RB [ \-\-no\-compat\-libtool ]
+.br
+.RB [ \-\-gcov\-tool
+.IR tool  ]
+.RB [ \-\-ignore\-errors
+.IR errors  ]
+.br
+.RB [ \-\-no\-recursion ]
+.I directory
+.RB [ \-\-external ]
+.RB [ \-\-no\-external ]
+.br
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-no\-markers ]
+.br
+.RB [ \-\-derive\-func\-data ]
+.RB [ \-\-compat
+.IR  mode =on|off|auto]
+.br
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RB [ \-\-include
+.IR pattern ]
+.RB [ \-\-exclude
+.IR pattern ]
+.RE
+.SH DESCRIPTION
+.B geninfo 
+converts all GCOV coverage data files found in 
+.I directory
+into tracefiles, which the
+.B genhtml
+tool can convert to HTML output.
+
+Unless the \-\-output\-filename option is specified,
+.B geninfo
+writes its
+output to one file per .da file, the name of which is generated by simply
+appending ".info" to the respective .da file name.
+
+Note that the current user needs write access to both
+.I directory
+as well as to the original source code location. This is necessary because
+some temporary files have to be created there during the conversion process.
+
+Note also that
+.B geninfo
+is called from within
+.BR lcov ,
+so that there is usually no need to call it directly.
+
+.B Exclusion markers
+
+To exclude specific lines of code from a tracefile, you can add exclusion
+markers to the source code. Additionally you can exclude specific branches from
+branch coverage without excluding the involved lines from line and function
+coverage. Exclusion markers are keywords which can for example be added in the
+form of a comment.
+See
+.BR lcovrc (5)
+how to override some of them.
+
+The following markers are recognized by geninfo:
+
+LCOV_EXCL_LINE
+.RS
+Lines containing this marker will be excluded.
+.br
+.RE
+LCOV_EXCL_START
+.RS
+Marks the beginning of an excluded section. The current line is part of this
+section.
+.br
+.RE
+LCOV_EXCL_STOP
+.RS
+Marks the end of an excluded section. The current line not part of this
+section.
+.RE
+.br
+LCOV_EXCL_BR_LINE
+.RS
+Lines containing this marker will be excluded from branch coverage.
+.br
+.RE
+LCOV_EXCL_BR_START
+.RS
+Marks the beginning of a section which is excluded from branch coverage. The
+current line is part of this section.
+.br
+.RE
+LCOV_EXCL_BR_STOP
+.RS
+Marks the end of a section which is excluded from branch coverage. The current
+line not part of this section.
+.RE
+.br
+
+.SH OPTIONS
+
+.B \-b
+.I directory
+.br
+.B \-\-base\-directory
+.I directory
+.br
+.RS
+.RI "Use " directory
+as base directory for relative paths.
+
+Use this option to specify the base directory of a build\-environment
+when geninfo produces error messages like:
+
+.RS
+ERROR: could not read source file /home/user/project/subdir1/subdir2/subdir1/subdir2/file.c
+.RE
+
+In this example, use /home/user/project as base directory.
+
+This option is required when using geninfo on projects built with libtool or
+similar build environments that work with a base directory, i.e. environments,
+where the current working directory when invoking the compiler is not the same
+directory in which the source code file is located.
+
+Note that this option will not work in environments where multiple base
+directories are used. In that case use configuration file setting
+.B geninfo_auto_base=1
+(see
+.BR lcovrc (5)).
+.RE
+
+.B \-\-checksum
+.br
+.B \-\-no\-checksum
+.br
+.RS
+Specify whether to generate checksum data when writing tracefiles.
+
+Use \-\-checksum to enable checksum generation or \-\-no\-checksum to
+disable it. Checksum generation is
+.B disabled
+by default.
+
+When checksum generation is enabled, a checksum will be generated for each
+source code line and stored along with the coverage data. This checksum will
+be used to prevent attempts to combine coverage data from different source
+code versions.
+
+If you don't work with different source code versions, disable this option
+to speed up coverage data processing and to reduce the size of tracefiles.
+.RE
+
+.B \-\-compat
+.IR mode = value [, mode = value ,...]
+.br
+.RS
+Set compatibility mode.
+
+Use \-\-compat to specify that geninfo should enable one or more compatibility
+modes when capturing coverage data. You can provide a comma-separated list
+of mode=value pairs to specify the values for multiple modes.
+
+Valid
+.I values
+are:
+
+.B on
+.RS
+Enable compatibility mode.
+.RE
+.B off
+.RS
+Disable compatibility mode.
+.RE
+.B auto
+.RS
+Apply auto-detection to determine if compatibility mode is required. Note that
+auto-detection is not available for all compatibility modes.
+.RE
+
+If no value is specified, 'on' is assumed as default value.
+
+Valid
+.I modes
+are:
+
+.B libtool
+.RS
+Enable this mode if you are capturing coverage data for a project that
+was built using the libtool mechanism. See also
+\-\-compat\-libtool.
+
+The default value for this setting is 'on'.
+
+.RE
+.B hammer
+.RS
+Enable this mode if you are capturing coverage data for a project that
+was built using a version of GCC 3.3 that contains a modification
+(hammer patch) of later GCC versions. You can identify a modified GCC 3.3
+by checking the build directory of your project for files ending in the
+extension '.bbg'. Unmodified versions of GCC 3.3 name these files '.bb'.
+
+The default value for this setting is 'auto'.
+
+.RE
+.B split_crc
+.RS
+Enable this mode if you are capturing coverage data for a project that
+was built using a version of GCC 4.6 that contains a modification
+(split function checksums) of later GCC versions. Typical error messages
+when running geninfo on coverage data produced by such GCC versions are
+\'out of memory' and 'reached unexpected end of file'.
+
+The default value for this setting is 'auto'
+.RE
+
+.RE
+
+.B \-\-compat\-libtool
+.br
+.B \-\-no\-compat\-libtool
+.br
+.RS
+Specify whether to enable libtool compatibility mode.
+
+Use \-\-compat\-libtool to enable libtool compatibility mode or \-\-no\-compat\-libtool
+to disable it. The libtool compatibility mode is
+.B enabled
+by default.
+
+When libtool compatibility mode is enabled, geninfo will assume that the source
+code relating to a .da file located in a directory named ".libs" can be
+found in its parent directory.
+
+If you have directories named ".libs" in your build environment but don't use
+libtool, disable this option to prevent problems when capturing coverage data.
+.RE
+
+.B \-\-config\-file
+.I config\-file
+.br
+.RS
+Specify a configuration file to use.
+
+When this option is specified, neither the system\-wide configuration file
+/etc/lcovrc, nor the per\-user configuration file ~/.lcovrc is read.
+
+This option may be useful when there is a need to run several
+instances of
+.B geninfo
+with different configuration file options in parallel.
+.RE
+
+.B \-\-derive\-func\-data
+.br
+.RS
+Calculate function coverage data from line coverage data.
+
+Use this option to collect function coverage data, even if the version of the
+gcov tool installed on the test system does not provide this data. lcov will
+instead derive function coverage data from line coverage data and
+information about which lines belong to a function.
+.RE
+
+.B \-\-exclude
+.I pattern
+.br
+.RS
+Exclude source files matching
+.IR pattern .
+
+Use this switch if you want to exclude coverage data for a particular set
+of source files matching any of the given patterns. Multiple patterns can be
+specified by using multiple
+.B --exclude
+command line switches. The
+.I patterns
+will be interpreted as shell wildcard patterns (note that they may need to be
+escaped accordingly to prevent the shell from expanding them first).
+
+Can be combined with the
+.B --include
+command line switch. If a given file matches both the include pattern and the
+exclude pattern, the exclude pattern will take precedence.
+.RE
+
+.B \-\-external
+.br
+.B \-\-no\-external
+.br
+.RS
+Specify whether to capture coverage data for external source files.
+
+External source files are files which are not located in one of the directories
+specified by \-\-directory or \-\-base\-directory. Use \-\-external to include
+external source files while capturing coverage data or \-\-no\-external to
+ignore this data.
+
+Data for external source files is
+.B included
+by default.
+.RE
+
+.B \-f
+.br
+.B \-\-follow
+.RS
+Follow links when searching .da files.
+.RE
+
+.B \-\-gcov\-tool
+.I tool
+.br
+.RS
+Specify the location of the gcov tool.
+.RE
+
+.B \-h
+.br
+.B \-\-help
+.RS
+Print a short help text, then exit.
+.RE
+
+.B \-\-include
+.I pattern
+.br
+.RS
+Include source files matching
+.IR pattern .
+
+Use this switch if you want to include coverage data for only a particular set
+of source files matching any of the given patterns. Multiple patterns can be
+specified by using multiple
+.B --include
+command line switches. The
+.I patterns
+will be interpreted as shell wildcard patterns (note that they may need to be
+escaped accordingly to prevent the shell from expanding them first).
+.RE
+
+.B \-\-ignore\-errors
+.I errors
+.br
+.RS
+Specify a list of errors after which to continue processing.
+
+Use this option to specify a list of one or more classes of errors after which
+geninfo should continue processing instead of aborting.
+
+.I errors
+can be a comma\-separated list of the following keywords:
+
+.B gcov:
+the gcov tool returned with a non\-zero return code.
+
+.B source:
+the source code file for a data set could not be found.
+.RE
+
+.B \-i
+.br
+.B \-\-initial
+.RS
+Capture initial zero coverage data.
+
+Run geninfo with this option on the directories containing .bb, .bbg or .gcno
+files before running any test case. The result is a "baseline" coverage data
+file that contains zero coverage for every instrumented line and function.
+Combine this data file (using lcov \-a) with coverage data files captured
+after a test run to ensure that the percentage of total lines covered is
+correct even when not all object code files were loaded during the test.
+
+Note: currently, the \-\-initial option does not generate branch coverage
+information.
+.RE
+
+.B \-\-no\-markers
+.br
+.RS
+Use this option if you want to get coverage data without regard to exclusion
+markers in the source code file.
+.RE
+
+.B \-\-no\-recursion
+.br
+.RS
+Use this option if you want to get coverage data for the specified directory
+only without processing subdirectories.
+.RE
+
+.BI "\-o " output\-filename
+.br
+.BI "\-\-output\-filename " output\-filename
+.RS
+Write all data to
+.IR output\-filename .
+
+If you want to have all data written to a single file (for easier
+handling), use this option to specify the respective filename. By default,
+one tracefile will be created for each processed .da file.
+.RE
+
+.B \-q
+.br
+.B \-\-quiet
+.RS
+Do not print progress messages.
+
+Suppresses all informational progress output. When this switch is enabled,
+only error or warning messages are printed.
+.RE
+
+.B \-\-rc
+.IR keyword = value
+.br
+.RS
+Override a configuration directive.
+
+Use this option to specify a
+.IR keyword = value
+statement which overrides the corresponding configuration statement in
+the lcovrc configuration file. You can specify this option more than once
+to override multiple configuration statements.
+See
+.BR lcovrc (5)
+for a list of available keywords and their meaning.
+.RE
+
+.BI "\-t " testname
+.br
+.BI "\-\-test\-name " testname
+.RS
+Use test case name 
+.I testname
+for resulting data. Valid test case names can consist of letters, decimal
+digits and the underscore character ('_').
+
+This proves useful when data from several test cases is merged (i.e. by
+simply concatenating the respective tracefiles) in which case a test
+name can be used to differentiate between data from each test case.
+.RE
+
+.B \-v
+.br
+.B \-\-version
+.RS
+Print version number, then exit.
+.RE
+
+
+.SH FILES
+
+.I /etc/lcovrc
+.RS
+The system\-wide configuration file.
+.RE
+
+.I ~/.lcovrc
+.RS
+The per\-user configuration file.
+.RE
+
+Following is a quick description of the tracefile format as used by
+.BR genhtml ", " geninfo " and " lcov .
+
+A tracefile is made up of several human\-readable lines of text,
+divided into sections. If available, a tracefile begins with the
+.I testname
+which is stored in the following format:
+
+  TN:<test name>
+
+For each source file referenced in the .da file, there is a section containing
+filename and coverage data:
+
+  SF:<absolute path to the source file>
+
+Following is a list of line numbers for each function name found in the
+source file:
+
+  FN:<line number of function start>,<function name>
+
+Next, there is a list of execution counts for each instrumented function:
+
+  FNDA:<execution count>,<function name>
+
+This list is followed by two lines containing the number of functions found
+and hit:
+
+  FNF:<number of functions found>
+  FNH:<number of function hit>
+
+Branch coverage information is stored which one line per branch:
+
+  BRDA:<line number>,<block number>,<branch number>,<taken>
+
+Block number and branch number are gcc internal IDs for the branch. Taken is
+either '-' if the basic block containing the branch was never executed or
+a number indicating how often that branch was taken.
+
+Branch coverage summaries are stored in two lines:
+
+  BRF:<number of branches found>
+  BRH:<number of branches hit>
+
+Then there is a list of execution counts for each instrumented line
+(i.e. a line which resulted in executable code):
+
+  DA:<line number>,<execution count>[,<checksum>]
+
+Note that there may be an optional checksum present for each instrumented
+line. The current
+.B geninfo
+implementation uses an MD5 hash as checksumming algorithm.
+
+At the end of a section, there is a summary about how many lines
+were found and how many were actually instrumented:
+
+  LH:<number of lines with a non\-zero execution count>
+  LF:<number of instrumented lines>
+
+Each sections ends with:
+
+  end_of_record
+
+In addition to the main source code file there are sections for all
+#included files which also contain executable code.
+
+Note that the absolute path of a source file is generated by interpreting
+the contents of the respective .bb file (see
+.BR "gcov " (1)
+for more information on this file type). Relative filenames are prefixed
+with the directory in which the .bb file is found.
+
+Note also that symbolic links to the .bb file will be resolved so that the
+actual file path is used instead of the path to a link. This approach is
+necessary for the mechanism to work with the /proc/gcov files.
+
+.SH AUTHOR
+Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+
+.SH SEE ALSO
+.BR lcov (1),
+.BR lcovrc (5),
+.BR genhtml (1),
+.BR genpng (1),
+.BR gendesc (1),
+.BR gcov (1)
diff --git a/ThirdParty/lcov/man/genpng.1 b/ThirdParty/lcov/man/genpng.1
new file mode 100644
index 0000000000000000000000000000000000000000..f6a49b8a5d48af9f9b0c10b151592fd703ca01d4
--- /dev/null
+++ b/ThirdParty/lcov/man/genpng.1
@@ -0,0 +1,101 @@
+.TH genpng 1 "LCOV 1.14" 2019\-02\-28 "User Manuals"
+.SH NAME
+genpng \- Generate an overview image from a source file
+.SH SYNOPSIS
+.B genpng
+.RB [ \-h | \-\-help ]
+.RB [ \-v | \-\-version ]
+.RS 7
+.br
+.RB [ \-t | \-\-tab\-size
+.IR tabsize ]
+.RB [ \-w | \-\-width
+.IR width ]
+.br
+.RB [ \-o | \-\-output\-filename
+.IR output\-filename ]
+.br
+.IR source\-file
+.SH DESCRIPTION
+.B genpng
+creates an overview image for a given source code file of either
+plain text or .gcov file format.
+
+Note that the
+.I GD.pm
+Perl module has to be installed for this script to work
+(it may be obtained from
+.IR http://www.cpan.org ).
+
+Note also that
+.B genpng
+is called from within
+.B genhtml
+so that there is usually no need to call it directly.
+
+.SH OPTIONS
+.B \-h
+.br
+.B \-\-help
+.RS
+Print a short help text, then exit.
+.RE
+
+.B \-v
+.br
+.B \-\-version
+.RS
+Print version number, then exit.
+.RE
+
+.BI "\-t " tab\-size
+.br
+.BI "\-\-tab\-size " tab\-size
+.RS
+Use 
+.I tab\-size
+spaces in place of tab.
+
+All occurrences of tabulator signs in the source code file will be replaced
+by the number of spaces defined by
+.I tab\-size
+(default is 4).
+.RE
+
+.BI "\-w " width
+.br
+.BI "\-\-width " width
+.RS
+Set width of output image to 
+.I width
+pixel.
+
+The resulting image will be exactly
+.I width
+pixel wide (default is 80).
+
+Note that source code lines which are longer than
+.I width
+will be truncated.
+.RE
+
+
+.BI "\-o " filename
+.br
+.BI "\-\-output\-filename " filename
+.RS
+Write image to
+.IR filename .
+
+Specify a name for the resulting image file (default is 
+.IR source\-file .png).
+.RE
+.SH AUTHOR
+Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+
+.SH SEE ALSO
+.BR lcov (1),
+.BR genhtml (1),
+.BR geninfo (1),
+.BR gendesc (1),
+.BR gcov (1)
diff --git a/ThirdParty/lcov/man/lcov.1 b/ThirdParty/lcov/man/lcov.1
new file mode 100644
index 0000000000000000000000000000000000000000..e86eb3aa7624a76d122d791eaca2666dd41362de
--- /dev/null
+++ b/ThirdParty/lcov/man/lcov.1
@@ -0,0 +1,930 @@
+.TH lcov 1 "LCOV 1.14" 2019\-02\-28 "User Manuals"
+.SH NAME
+lcov \- a graphical GCOV front\-end
+.SH SYNOPSIS
+.B lcov
+.BR \-c | \-\-capture
+.RS 5
+.br
+.RB [ \-d | \-\-directory
+.IR directory ]
+.RB [ \-k | \-\-kernel\-directory
+.IR directory ]
+.br
+.RB [ \-o | \-\-output\-file
+.IR tracefile ]
+.RB [ \-t | \-\-test\-name
+.IR testname ]
+.br
+.RB [ \-b | \-\-base\-directory
+.IR directory ]
+.RB [ \-i | \-\-initial ]
+.RB [ \-\-gcov\-tool
+.IR tool ]
+.br
+.RB [ \-\-checksum ]
+.RB [ \-\-no\-checksum ]
+.RB [ \-\-no\-recursion ]
+.RB [ \-f | \-\-follow ]
+.br
+.RB [ \-\-compat\-libtool ]
+.RB [ \-\-no\-compat\-libtool ]
+.RB [ \-\-ignore\-errors
+.IR errors ]
+.br
+.RB [ \-\-to\-package
+.IR package ]
+.RB [ \-\-from\-package
+.IR package ]
+.RB [ \-q | \-\-quiet ]
+.br
+.RB [ \-\-no\-markers ]
+.RB [ \-\-external ]
+.RB [ \-\-no\-external ]
+.br
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RB [ \-\-compat
+.IR  mode =on|off|auto]
+.br
+.RB [ \-\-include
+.IR pattern ]
+.RB [ \-\-exclude
+.IR pattern ]
+.br
+.RE
+
+.B lcov
+.BR \-z | \-\-zerocounters
+.RS 5
+.br
+.RB [ \-d | \-\-directory
+.IR directory ]
+.RB [ \-\-no\-recursion ]
+.RB [ \-f | \-\-follow ]
+.br
+.RB [ \-q | \-\-quiet ]
+.br
+.RE
+
+.B lcov
+.BR \-l | \-\-list
+.I tracefile
+.RS 5
+.br
+.RB [ \-q | \-\-quiet ]
+.RB [ \-\-list\-full\-path ]
+.RB [ \-\-no\-list\-full\-path ]
+.br
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RE
+
+.B lcov
+.BR \-a | \-\-add\-tracefile
+.I tracefile
+.RS 5
+.br
+.RB [ \-o | \-\-output\-file
+.IR tracefile ]
+.RB [ \-\-checksum ]
+.RB [ \-\-no\-checksum ]
+.br
+.RB [ \-q | \-\-quiet ]
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RE
+
+.B lcov
+.BR \-e | \-\-extract
+.I tracefile pattern
+.RS 5
+.br
+.RB [ \-o | \-\-output\-file
+.IR tracefile ]
+.RB [ \-\-checksum ]
+.RB [ \-\-no\-checksum ]
+.br
+.RB [ \-q | \-\-quiet ]
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RE
+
+.B lcov
+.BR \-r | \-\-remove
+.I tracefile pattern
+.RS 5
+.br
+.RB [ \-o | \-\-output\-file
+.IR tracefile ]
+.RB [ \-\-checksum ]
+.RB [ \-\-no\-checksum ]
+.br
+.RB [ \-q | \-\-quiet ]
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RE
+
+.B lcov
+.BR \-\-diff
+.IR "tracefile diff"
+.RS 5
+.br
+.RB [ \-o | \-\-output\-file
+.IR tracefile ]
+.RB [ \-\-checksum ]
+.RB [ \-\-no\-checksum ]
+.br
+.RB [ \-\-convert\-filenames ]
+.RB [ \-\-strip
+.IR depth ]
+.RB [ \-\-path
+.IR path ]
+.RB [ \-q | \-\-quiet ]
+.br
+.RB [ \-\-config\-file
+.IR config\-file ]
+.RB [ \-\-rc
+.IR keyword = value ]
+.br
+.RE
+
+.B lcov
+.BR \-\-summary
+.I tracefile
+.RS 5
+.br
+.RB [ \-q | \-\-quiet ]
+.br
+.RE
+
+.B lcov
+.RB [ \-h | \-\-help ]
+.RB [ \-v | \-\-version ]
+.RS 5
+.br
+.RE
+
+.SH DESCRIPTION
+.B lcov
+is a graphical front\-end for GCC's coverage testing tool gcov. It collects
+line, function and branch coverage data for multiple source files and creates
+HTML pages containing the source code annotated with coverage information.
+It also adds overview pages for easy navigation within the file structure.
+
+Use
+.B lcov
+to collect coverage data and
+.B genhtml
+to create HTML pages. Coverage data can either be collected from the
+currently running Linux kernel or from a user space application. To do this,
+you have to complete the following preparation steps:
+
+For Linux kernel coverage:
+.RS
+Follow the setup instructions for the gcov\-kernel infrastructure:
+.I http://ltp.sourceforge.net/coverage/gcov.php
+.br
+
+
+.RE
+For user space application coverage:
+.RS
+Compile the application with GCC using the options
+"\-fprofile\-arcs" and "\-ftest\-coverage".
+.RE
+
+Please note that this man page refers to the output format of
+.B lcov
+as ".info file" or "tracefile" and that the output of GCOV
+is called ".da file".
+
+Also note that when printing percentages, 0% and 100% are only printed when
+the values are exactly 0% and 100% respectively. Other values which would
+conventionally be rounded to 0% or 100% are instead printed as nearest
+non-boundary value. This behavior is in accordance with that of the
+.BR gcov (1)
+tool.
+
+.SH OPTIONS
+
+
+.B \-a
+.I tracefile
+.br
+.B \-\-add\-tracefile
+.I tracefile
+.br
+.RS
+Add contents of
+.IR tracefile .
+
+Specify several tracefiles using the \-a switch to combine the coverage data
+contained in these files by adding up execution counts for matching test and
+filename combinations.
+
+The result of the add operation will be written to stdout or the tracefile
+specified with \-o.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+
+.RE
+
+.B \-b
+.I directory
+.br
+.B \-\-base\-directory
+.I directory
+.br
+.RS
+.RI "Use " directory
+as base directory for relative paths.
+
+Use this option to specify the base directory of a build\-environment
+when lcov produces error messages like:
+
+.RS
+ERROR: could not read source file /home/user/project/subdir1/subdir2/subdir1/subdir2/file.c
+.RE
+
+In this example, use /home/user/project as base directory.
+
+This option is required when using lcov on projects built with libtool or
+similar build environments that work with a base directory, i.e. environments,
+where the current working directory when invoking the compiler is not the same
+directory in which the source code file is located.
+
+Note that this option will not work in environments where multiple base
+directories are used. In that case use configuration file setting
+.B geninfo_auto_base=1
+(see
+.BR lcovrc (5)).
+.RE
+
+.B \-c
+.br
+.B \-\-capture
+.br
+.RS
+Capture coverage data.
+
+By default captures the current kernel execution counts and writes the
+resulting coverage data to the standard output. Use the \-\-directory
+option to capture counts for a user space program.
+
+The result of the capture operation will be written to stdout or the tracefile
+specified with \-o.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.B \-\-checksum
+.br
+.B \-\-no\-checksum
+.br
+.RS
+Specify whether to generate checksum data when writing tracefiles.
+
+Use \-\-checksum to enable checksum generation or \-\-no\-checksum to
+disable it. Checksum generation is
+.B disabled
+by default.
+
+When checksum generation is enabled, a checksum will be generated for each
+source code line and stored along with the coverage data. This checksum will
+be used to prevent attempts to combine coverage data from different source
+code versions.
+
+If you don't work with different source code versions, disable this option
+to speed up coverage data processing and to reduce the size of tracefiles.
+.RE
+
+.B \-\-compat
+.IR mode = value [, mode = value ,...]
+.br
+.RS
+Set compatibility mode.
+
+Use \-\-compat to specify that lcov should enable one or more compatibility
+modes when capturing coverage data. You can provide a comma-separated list
+of mode=value pairs to specify the values for multiple modes.
+
+Valid
+.I values
+are:
+
+.B on
+.RS
+Enable compatibility mode.
+.RE
+.B off
+.RS
+Disable compatibility mode.
+.RE
+.B auto
+.RS
+Apply auto-detection to determine if compatibility mode is required. Note that
+auto-detection is not available for all compatibility modes.
+.RE
+
+If no value is specified, 'on' is assumed as default value.
+
+Valid
+.I modes
+are:
+
+.B libtool
+.RS
+Enable this mode if you are capturing coverage data for a project that
+was built using the libtool mechanism. See also
+\-\-compat\-libtool.
+
+The default value for this setting is 'on'.
+
+.RE
+.B hammer
+.RS
+Enable this mode if you are capturing coverage data for a project that
+was built using a version of GCC 3.3 that contains a modification
+(hammer patch) of later GCC versions. You can identify a modified GCC 3.3
+by checking the build directory of your project for files ending in the
+extension '.bbg'. Unmodified versions of GCC 3.3 name these files '.bb'.
+
+The default value for this setting is 'auto'.
+
+.RE
+.B split_crc
+.RS
+Enable this mode if you are capturing coverage data for a project that
+was built using a version of GCC 4.6 that contains a modification
+(split function checksums) of later GCC versions. Typical error messages
+when running lcov on coverage data produced by such GCC versions are
+\'out of memory' and 'reached unexpected end of file'.
+
+The default value for this setting is 'auto'
+.RE
+
+.RE
+
+.B \-\-compat\-libtool
+.br
+.B \-\-no\-compat\-libtool
+.br
+.RS
+Specify whether to enable libtool compatibility mode.
+
+Use \-\-compat\-libtool to enable libtool compatibility mode or \-\-no\-compat\-libtool
+to disable it. The libtool compatibility mode is
+.B enabled
+by default.
+
+When libtool compatibility mode is enabled, lcov will assume that the source
+code relating to a .da file located in a directory named ".libs" can be
+found in its parent directory.
+
+If you have directories named ".libs" in your build environment but don't use
+libtool, disable this option to prevent problems when capturing coverage data.
+.RE
+
+.B \-\-config\-file
+.I config\-file
+.br
+.RS
+Specify a configuration file to use.
+
+When this option is specified, neither the system\-wide configuration file
+/etc/lcovrc, nor the per\-user configuration file ~/.lcovrc is read.
+
+This option may be useful when there is a need to run several
+instances of
+.B lcov
+with different configuration file options in parallel.
+.RE
+
+.B \-\-convert\-filenames
+.br
+.RS
+Convert filenames when applying diff.
+
+Use this option together with \-\-diff to rename the file names of processed
+data sets according to the data provided by the diff.
+.RE
+
+.B \-\-diff
+.I tracefile
+.I difffile
+.br
+.RS
+Convert coverage data in
+.I tracefile
+using source code diff file
+.IR difffile .
+
+Use this option if you want to merge coverage data from different source code
+levels of a program, e.g. when you have data taken from an older version
+and want to combine it with data from a more current version.
+.B lcov
+will try to map source code lines between those versions and adjust the coverage
+data respectively.
+.I difffile
+needs to be in unified format, i.e. it has to be created using the "\-u" option
+of the
+.B diff
+tool.
+
+Note that lines which are not present in the old version will not be counted
+as instrumented, therefore tracefiles resulting from this operation should
+not be interpreted individually but together with other tracefiles taken
+from the newer version. Also keep in mind that converted coverage data should
+only be used for overview purposes as the process itself introduces a loss
+of accuracy.
+
+The result of the diff operation will be written to stdout or the tracefile
+specified with \-o.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.B \-d
+.I directory
+.br
+.B \-\-directory
+.I  directory
+.br
+.RS
+Use .da files in
+.I directory
+instead of kernel.
+
+If you want to work on coverage data for a user space program, use this
+option to specify the location where the program was compiled (that's
+where the counter files ending with .da will be stored).
+
+Note that you may specify this option more than once.
+.RE
+
+.B \-\-exclude
+.I pattern
+.br
+.RS
+Exclude source files matching
+.IR pattern .
+
+Use this switch if you want to exclude coverage data for a particular set
+of source files matching any of the given patterns. Multiple patterns can be
+specified by using multiple
+.B --exclude
+command line switches. The
+.I patterns
+will be interpreted as shell wildcard patterns (note that they may need to be
+escaped accordingly to prevent the shell from expanding them first).
+
+Can be combined with the
+.B --include
+command line switch. If a given file matches both the include pattern and the
+exclude pattern, the exclude pattern will take precedence.
+.RE
+
+.B \-\-external
+.br
+.B \-\-no\-external
+.br
+.RS
+Specify whether to capture coverage data for external source files.
+
+External source files are files which are not located in one of the directories
+specified by \-\-directory or \-\-base\-directory. Use \-\-external to include
+external source files while capturing coverage data or \-\-no\-external to
+ignore this data.
+
+Data for external source files is
+.B included
+by default.
+.RE
+
+.B \-e
+.I tracefile
+.I pattern
+.br
+.B \-\-extract
+.I tracefile
+.I pattern
+.br
+.RS
+Extract data from
+.IR tracefile .
+
+Use this switch if you want to extract coverage data for only a particular
+set of files from a tracefile. Additional command line parameters will be
+interpreted as shell wildcard patterns (note that they may need to be
+escaped accordingly to prevent the shell from expanding them first).
+Every file entry in
+.I tracefile
+which matches at least one of those patterns will be extracted.
+
+The result of the extract operation will be written to stdout or the tracefile
+specified with \-o.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.B \-f
+.br
+.B \-\-follow
+.br
+.RS
+Follow links when searching for .da files.
+.RE
+
+.B \-\-from\-package
+.I package
+.br
+.RS
+Use .da files in
+.I package
+instead of kernel or directory.
+
+Use this option if you have separate machines for build and test and
+want to perform the .info file creation on the build machine. See
+\-\-to\-package for more information.
+.RE
+
+.B \-\-gcov\-tool
+.I tool
+.br
+.RS
+Specify the location of the gcov tool.
+.RE
+
+.B \-h
+.br
+.B \-\-help
+.br
+.RS
+Print a short help text, then exit.
+.RE
+
+.B \-\-include
+.I pattern
+.br
+.RS
+Include source files matching
+.IR pattern .
+
+Use this switch if you want to include coverage data for only a particular set
+of source files matching any of the given patterns. Multiple patterns can be
+specified by using multiple
+.B --include
+command line switches. The
+.I patterns
+will be interpreted as shell wildcard patterns (note that they may need to be
+escaped accordingly to prevent the shell from expanding them first).
+.RE
+
+.B \-\-ignore\-errors
+.I errors
+.br
+.RS
+Specify a list of errors after which to continue processing.
+
+Use this option to specify a list of one or more classes of errors after which
+lcov should continue processing instead of aborting.
+
+.I errors
+can be a comma\-separated list of the following keywords:
+
+.B gcov:
+the gcov tool returned with a non\-zero return code.
+
+.B source:
+the source code file for a data set could not be found.
+
+.B graph:
+the graph file could not be found or is corrupted.
+.RE
+
+.B \-i
+.br
+.B \-\-initial
+.RS
+Capture initial zero coverage data.
+
+Run lcov with \-c and this option on the directories containing .bb, .bbg
+or .gcno files before running any test case. The result is a "baseline"
+coverage data file that contains zero coverage for every instrumented line.
+Combine this data file (using lcov \-a) with coverage data files captured
+after a test run to ensure that the percentage of total lines covered is
+correct even when not all source code files were loaded during the test.
+
+Recommended procedure when capturing data for a test case:
+
+1. create baseline coverage data file
+.RS
+# lcov \-c \-i \-d appdir \-o app_base.info
+.br
+
+.RE
+2. perform test
+.RS
+# appdir/test
+.br
+
+.RE
+3. create test coverage data file
+.RS
+# lcov \-c \-d appdir \-o app_test.info
+.br
+
+.RE
+4. combine baseline and test coverage data
+.RS
+# lcov \-a app_base.info \-a app_test.info \-o app_total.info
+.br
+
+.RE
+.RE
+
+.B \-k
+.I subdirectory
+.br
+.B \-\-kernel\-directory
+.I subdirectory
+.br
+.RS
+Capture kernel coverage data only from
+.IR subdirectory .
+
+Use this option if you don't want to get coverage data for all of the
+kernel, but only for specific subdirectories. This option may be specified
+more than once.
+
+Note that you may need to specify the full path to the kernel subdirectory
+depending on the version of the kernel gcov support.
+.RE
+
+.B \-l
+.I tracefile
+.br
+.B \-\-list
+.I tracefile
+.br
+.RS
+List the contents of the
+.IR tracefile .
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.B \-\-list\-full\-path
+.br
+.B \-\-no\-list\-full\-path
+.br
+.RS
+Specify whether to show full paths during list operation.
+
+Use \-\-list\-full\-path to show full paths during list operation
+or \-\-no\-list\-full\-path to show shortened paths. Paths are
+.B shortened
+by default.
+.RE
+
+.B \-\-no\-markers
+.br
+.RS
+Use this option if you want to get coverage data without regard to exclusion
+markers in the source code file. See
+.BR "geninfo " (1)
+for details on exclusion markers.
+.RE
+
+.B \-\-no\-recursion
+.br
+.RS
+Use this option if you want to get coverage data for the specified directory
+only without processing subdirectories.
+.RE
+
+.B \-o
+.I tracefile
+.br
+.B \-\-output\-file
+.I tracefile
+.br
+.RS
+Write data to
+.I tracefile
+instead of stdout.
+
+Specify "\-" as a filename to use the standard output.
+
+By convention, lcov\-generated coverage data files are called "tracefiles" and
+should have the filename extension ".info".
+.RE
+
+.B \-\-path
+.I path
+.br
+.RS
+Strip path from filenames when applying diff.
+
+Use this option together with \-\-diff to tell lcov to disregard the specified
+initial path component when matching between tracefile and diff filenames.
+.RE
+
+.B \-q
+.br
+.B \-\-quiet
+.br
+.RS
+Do not print progress messages.
+
+This option is implied when no output filename is specified to prevent
+progress messages to mess with coverage data which is also printed to
+the standard output.
+.RE
+
+.B \-\-rc
+.IR keyword = value
+.br
+.RS
+Override a configuration directive.
+
+Use this option to specify a
+.IR keyword = value
+statement which overrides the corresponding configuration statement in
+the lcovrc configuration file. You can specify this option more than once
+to override multiple configuration statements.
+See
+.BR lcovrc (5)
+for a list of available keywords and their meaning.
+.RE
+
+.B \-r
+.I tracefile
+.I pattern
+.br
+.B \-\-remove
+.I tracefile
+.I pattern
+.br
+.RS
+Remove data from
+.IR tracefile .
+
+Use this switch if you want to remove coverage data for a particular
+set of files from a tracefile. Additional command line parameters will be
+interpreted as shell wildcard patterns (note that they may need to be
+escaped accordingly to prevent the shell from expanding them first).
+Every file entry in
+.I tracefile
+which matches at least one of those patterns will be removed.
+
+The result of the remove operation will be written to stdout or the tracefile
+specified with \-o.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.B \-\-strip
+.I depth
+.br
+.RS
+Strip path components when applying diff.
+
+Use this option together with \-\-diff to tell lcov to disregard the specified
+number of initial directories when matching tracefile and diff filenames.
+.RE
+
+.B \-\-summary
+.I tracefile
+.br
+.RS
+Show summary coverage information for the specified tracefile.
+
+Note that you may specify this option more than once.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.B \-t
+.I testname
+.br
+.B \-\-test\-name
+.I testname
+.br
+.RS
+Specify test name to be stored in the tracefile.
+
+This name identifies a coverage data set when more than one data set is merged
+into a combined tracefile (see option \-a).
+
+Valid test names can consist of letters, decimal digits and the underscore
+character ("_").
+.RE
+
+.B \-\-to\-package
+.I package
+.br
+.RS
+Store .da files for later processing.
+
+Use this option if you have separate machines for build and test and
+want to perform the .info file creation on the build machine. To do this,
+follow these steps:
+
+On the test machine:
+.RS
+.br
+\- run the test
+.br
+\- run lcov \-c [\-d directory] \-\-to-package
+.I file
+.br
+\- copy
+.I file
+to the build machine
+.RE
+.br
+
+On the build machine:
+.RS
+.br
+\- run lcov \-c \-\-from-package
+.I file
+[\-o and other options]
+.RE
+.br
+
+This works for both kernel and user space coverage data. Note that you might
+have to specify the path to the build directory using \-b with
+either \-\-to\-package or \-\-from-package. Note also that the package data
+must be converted to a .info file before recompiling the program or it will
+become invalid.
+.RE
+
+.B \-v
+.br
+.B \-\-version
+.br
+.RS
+Print version number, then exit.
+.RE
+
+.B \-z
+.br
+.B \-\-zerocounters
+.br
+.RS
+Reset all execution counts to zero.
+
+By default tries to reset kernel execution counts. Use the \-\-directory
+option to reset all counters of a user space program.
+
+Only one of  \-z, \-c, \-a, \-e, \-r, \-l, \-\-diff or \-\-summary may be
+specified at a time.
+.RE
+
+.SH FILES
+
+.I /etc/lcovrc
+.RS
+The system\-wide configuration file.
+.RE
+
+.I ~/.lcovrc
+.RS
+The per\-user configuration file.
+.RE
+
+.SH AUTHOR
+Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+
+.SH SEE ALSO
+.BR lcovrc (5),
+.BR genhtml (1),
+.BR geninfo (1),
+.BR genpng (1),
+.BR gendesc (1),
+.BR gcov (1)
diff --git a/ThirdParty/lcov/man/lcovrc.5 b/ThirdParty/lcov/man/lcovrc.5
new file mode 100644
index 0000000000000000000000000000000000000000..f20d273a92c8ebd48ac8a9f68498024124ef0a8f
--- /dev/null
+++ b/ThirdParty/lcov/man/lcovrc.5
@@ -0,0 +1,913 @@
+.TH lcovrc 5 "LCOV 1.14" 2019\-02\-28 "User Manuals"
+
+.SH NAME
+lcovrc \- lcov configuration file
+
+.SH DESCRIPTION
+The
+.I lcovrc
+file contains configuration information for the
+.B lcov
+code coverage tool (see
+.BR lcov (1)).
+.br
+
+The system\-wide configuration file is located at
+.IR /etc/lcovrc .
+To change settings for a single user, place a customized copy of this file at
+location
+.IR ~/.lcovrc .
+Where available, command\-line options override configuration file settings.
+
+Lines in a configuration file can either be:
+.IP "     *"
+empty lines or lines consisting only of white space characters. These lines are
+ignored.
+.IP "     *"
+comment lines which start with a hash sign ('#'). These are treated like empty
+lines and will be ignored.
+.IP "     *"
+statements in the form
+.RI ' key " = " value '.
+A list of valid statements and their description can be found in
+section 'OPTIONS' below.
+.PP
+
+.B Example configuration:
+.IP
+#
+.br
+# Example LCOV configuration file
+.br
+#
+.br
+
+# External style sheet file
+.br
+#genhtml_css_file = gcov.css
+.br
+
+# Coverage rate limits
+.br
+genhtml_hi_limit = 90
+.br
+genhtml_med_limit = 75
+.br
+
+# Width of line coverage field in source code view
+.br
+genhtml_line_field_width = 12
+.br
+
+# Width of branch coverage field in source code view
+.br
+genhtml_branch_field_width = 16
+.br
+
+# Width of overview image
+.br
+genhtml_overview_width = 80
+.br
+
+# Resolution of overview navigation
+.br
+genhtml_nav_resolution = 4
+.br
+
+# Offset for source code navigation
+.br
+genhtml_nav_offset = 10
+.br
+
+# Do not remove unused test descriptions if non\-zero
+.br
+genhtml_keep_descriptions = 0
+.br
+
+# Do not remove prefix from directory names if non\-zero
+.br
+genhtml_no_prefix = 0
+.br
+
+# Do not create source code view if non\-zero
+.br
+genhtml_no_source = 0
+.br
+
+# Specify size of tabs
+.br
+genhtml_num_spaces = 8
+.br
+
+# Highlight lines with converted\-only data if non\-zero
+.br
+genhtml_highlight = 0
+.br
+
+# Include color legend in HTML output if non\-zero
+.br
+genhtml_legend = 0
+.br
+
+# Include HTML file at start of HTML output
+.br
+#genhtml_html_prolog = prolog.html
+.br
+
+# Include HTML file at end of HTML output
+.br
+#genhtml_html_epilog = epilog.html
+.br
+
+# Use custom HTML file extension
+.br
+#genhtml_html_extension = html
+.br
+
+# Compress all generated html files with gzip.
+.br
+#genhtml_html_gzip = 1
+.br
+
+# Include sorted overview pages
+.br
+genhtml_sort = 1
+.br
+
+# Include function coverage data display
+.br
+#genhtml_function_coverage = 1
+.br
+
+# Include branch coverage data display
+.br
+#genhtml_branch_coverage = 1
+.br
+
+# Specify the character set of all generated HTML pages
+.br
+genhtml_charset=UTF\-8
+.br
+
+# Allow HTML markup in test case description text if non\-zero
+.br
+genhtml_desc_html=0
+.br
+
+# Specify the precision for coverage rates
+.br
+#genhtml_precision=1
+.br
+
+# Show missed counts instead of hit counts
+.br
+#genhtml_missed=1
+.br
+
+# Demangle C++ symbols
+.br
+#genhtml_demangle_cpp=1
+.br
+
+# Location of the gcov tool
+.br
+#geninfo_gcov_tool = gcov
+.br
+
+# Adjust test names if non\-zero
+.br
+#geninfo_adjust_testname = 0
+.br
+
+# Calculate a checksum for each line if non\-zero
+.br
+geninfo_checksum = 0
+.br
+
+# Enable libtool compatibility mode if non\-zero
+.br
+geninfo_compat_libtool = 0
+.br
+
+# Specify whether to capture coverage data for external source
+.br
+# files
+.br
+#geninfo_external = 1
+.br
+
+# Use gcov's --all-blocks option if non-zero
+.br
+#geninfo_gcov_all_blocks = 1
+.br
+
+# Specify compatiblity modes (same as \-\-compat option
+.br
+# of geninfo)
+.br
+#geninfo_compat = libtool=on, hammer=auto, split_crc=auto
+.br
+
+# Adjust path to source files by removing or changing path
+.br
+# components that match the specified pattern (Perl regular
+.br
+# expression format)
+.br
+#geninfo_adjust_src_path = /tmp/build => /usr/src
+
+# Specify if geninfo should try to automatically determine
+.br
+# the base-directory when collecting coverage data.
+.br
+geninfo_auto_base = 1
+.br
+
+# Directory containing gcov kernel files
+.br
+lcov_gcov_dir = /proc/gcov
+.br
+
+# Location for temporary directories
+.br
+lcov_tmp_dir = /tmp
+.br
+
+# Show full paths during list operation if non\-zero
+.br
+lcov_list_full_path = 0
+.br
+
+# Specify the maximum width for list output. This value is
+.br
+# ignored when lcov_list_full_path is non\-zero.
+.br
+lcov_list_width = 80
+.br
+
+# Specify the maximum percentage of file names which may be
+.br
+# truncated when choosing a directory prefix in list output.
+.br
+# This value is ignored when lcov_list_full_path is non\-zero.
+.br
+lcov_list_truncate_max = 20
+
+# Specify if function coverage data should be collected and
+.br
+# processed.
+.br
+lcov_function_coverage = 1
+.br
+
+# Specify if branch coverage data should be collected and
+.br
+# processed.
+.br
+lcov_branch_coverage = 0
+.br
+.PP
+
+.SH OPTIONS
+
+.BR genhtml_css_file " ="
+.I filename
+.IP
+Specify an external style sheet file. Use this option to modify the appearance of the HTML output as generated by
+.BR genhtml .
+During output generation, a copy of this file will be placed in the output
+directory.
+.br
+
+This option corresponds to the \-\-css\-file command line option of
+.BR genhtml .
+.br
+
+By default, a standard CSS file is generated.
+.PP
+
+.BR genhtml_hi_limit "  ="
+.I hi_limit
+.br
+.BR genhtml_med_limit " ="
+.I med_limit
+.br
+.IP
+Specify coverage rate limits for classifying file entries. Use this option to
+modify the coverage rates (in percent) for line, function and branch coverage at
+which a result is classified as high, medium or low coverage. This
+classification affects the color of the corresponding entries on the overview
+pages of the HTML output:
+.br
+
+High:   hi_limit  <= rate <= 100        default color: green
+.br
+Medium: med_limit <= rate < hi_limit    default color: orange
+.br
+Low:    0         <= rate < med_limit   default color: red
+.br
+
+Defaults are 90 and 75 percent.
+.PP
+
+.BR genhtml_line_field_width " ="
+.I number_of_characters
+.IP
+Specify the width (in characters) of the source code view column containing
+line coverage information.
+.br
+
+Default is 12.
+.PP
+
+.BR genhtml_branch_field_width " ="
+.I number_of_characters
+.IP
+Specify the width (in characters) of the source code view column containing
+branch coverage information.
+.br
+
+Default is 16.
+.PP
+
+.BR genhtml_overview_width " ="
+.I pixel_size
+.IP
+Specify the width (in pixel) of the overview image created when generating HTML
+output using the \-\-frames option of
+.BR genhtml .
+.br
+
+Default is 80.
+.PP
+
+.BR genhtml_nav_resolution " ="
+.I lines
+.IP
+Specify the resolution of overview navigation when generating HTML output using
+the \-\-frames option of
+.BR genhtml .
+This number specifies the maximum difference in lines between the position a
+user selected from the overview and the position the source code window is
+scrolled to.
+.br
+
+Default is 4.
+.PP
+
+
+.BR genhtml_nav_offset " ="
+.I lines
+.IP
+Specify the overview navigation line offset as applied when generating HTML
+output using the \-\-frames option of
+.BR genhtml.
+.br
+
+Clicking a line in the overview image should show the source code view at
+a position a bit further up, so that the requested line is not the first
+line in the window.  This number specifies that offset.
+.br
+
+Default is 10.
+.PP
+
+
+.BR genhtml_keep_descriptions " ="
+.IR 0 | 1
+.IP
+If non\-zero, keep unused test descriptions when generating HTML output using
+.BR genhtml .
+.br
+
+This option corresponds to the \-\-keep\-descriptions option of
+.BR genhtml .
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_no_prefix " ="
+.IR 0 | 1
+.IP
+If non\-zero, do not try to find and remove a common prefix from directory names.
+.br
+
+This option corresponds to the \-\-no\-prefix option of
+.BR genhtml .
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_no_source " ="
+.IR 0 | 1
+.IP
+If non\-zero, do not create a source code view when generating HTML output using
+.BR genhtml .
+.br
+
+This option corresponds to the \-\-no\-source option of
+.BR genhtml .
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_num_spaces " ="
+.I num
+.IP
+Specify the number of spaces to use as replacement for tab characters in the
+HTML source code view as generated by
+.BR genhtml .
+.br
+
+This option corresponds to the \-\-num\-spaces option of
+.BR genthml .
+.br
+
+Default is 8.
+
+.PP
+
+.BR genhtml_highlight " ="
+.IR 0 | 1
+.IP
+If non\-zero, highlight lines with converted\-only data in
+HTML output as generated by
+.BR genhtml .
+.br
+
+This option corresponds to the \-\-highlight option of
+.BR genhtml .
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_legend " ="
+.IR 0 | 1
+.IP
+If non\-zero, include a legend explaining the meaning of color coding in the HTML
+output as generated by
+.BR genhtml .
+.br
+
+This option corresponds to the \-\-legend option of
+.BR genhtml .
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_html_prolog " ="
+.I filename
+.IP
+If set, include the contents of the specified file at the beginning of HTML
+output.
+
+This option corresponds to the \-\-html\-prolog option of
+.BR genhtml .
+.br
+
+Default is to use no extra prolog.
+.PP
+
+.BR genhtml_html_epilog " ="
+.I filename
+.IP
+If set, include the contents of the specified file at the end of HTML output.
+
+This option corresponds to the \-\-html\-epilog option of
+.BR genhtml .
+.br
+
+Default is to use no extra epilog.
+.PP
+
+.BR genhtml_html_extension " ="
+.I extension
+.IP
+If set, use the specified string as filename extension for generated HTML files.
+
+This option corresponds to the \-\-html\-extension option of
+.BR genhtml .
+.br
+
+Default extension is "html".
+.PP
+
+.BR genhtml_html_gzip " ="
+.IR 0 | 1
+.IP
+If set, compress all html files using gzip.
+
+This option corresponds to the \-\-html\-gzip option of
+.BR genhtml .
+.br
+
+Default extension is 0.
+.PP
+
+.BR genhtml_sort " ="
+.IR 0 | 1
+.IP
+If non\-zero, create overview pages sorted by coverage rates when generating
+HTML output using
+.BR genhtml .
+.br
+
+This option can be set to 0 by using the \-\-no\-sort option of
+.BR genhtml .
+.br
+
+Default is 1.
+.PP
+
+.BR genhtml_function_coverage " ="
+.IR 0 | 1
+.IP
+If non\-zero, include function coverage data when generating HTML output using
+.BR genhtml .
+.br
+
+This option can be set to 0 by using the \-\-no\-function\-coverage option of
+.BR genhtml .
+.br
+
+Default is 1.
+.PP
+
+.BR genhtml_branch_coverage " ="
+.IR 0 | 1
+.IP
+If non\-zero, include branch coverage data when generating HTML output using
+.BR genhtml .
+.br
+
+This option can be set to 0 by using the \-\-no\-branch\-coverage option of
+.BR genhtml .
+.br
+
+Default is 1.
+.PP
+
+.BR genhtml_charset " ="
+.I charset
+.IP
+Specify the character set of all generated HTML pages.
+.br
+
+Use this option if the source code contains characters which are not
+part of the default character set. Note that this option is ignored
+when a custom HTML prolog is specified (see also
+.BR genhtml_html_prolog ).
+.br
+
+Default is UTF-8.
+.PP
+
+.BR genhtml_demangle_cpp " ="
+.IR 0 | 1
+.IP
+If non-zero, demangle C++ function names in function overviews.
+
+Set this option to one if you want to convert C++ internal function
+names to human readable format for display on the HTML function overview
+page.  This option requires that the c++filt tool is installed (see
+.BR c++filt(1)
+).
+.br
+
+This option corresponds to the \-\-demangle\-cpp command line option of
+.BR genhtml .
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_desc_html " ="
+.IR 0 | 1
+.IP
+If non-zero, test case descriptions may contain HTML markup.
+
+Set this option to one if you want to embed HTML markup (for example to
+include links) in test case descriptions. When set to zero, HTML markup
+characters will be escaped to show up as plain text on the test case
+description page.
+.br
+
+Default is 0.
+.PP
+
+.BR genhtml_precision " ="
+.IR  1 | 2 | 3 | 4
+.IP
+Specify how many digits after the decimal-point should be used for
+displaying coverage rates.
+.br
+
+Default is 1.
+.PP
+.BR genhtml_missed " ="
+.IR  0 | 1
+.IP
+If non-zero, the count of missed lines, functions, or branches is shown
+as negative numbers in overview pages.
+.br
+
+Default is 0.
+.PP
+
+.
+.BR geninfo_gcov_tool " ="
+.I path_to_gcov
+.IP
+Specify the location of the gcov tool (see
+.BR gcov (1))
+which is used to generate coverage information from data files. 
+.br
+
+Default is 'gcov'.
+.PP
+
+.BR geninfo_adjust_testname " ="
+.IR 0 | 1
+.IP
+If non\-zero,  adjust test names to include operating system information
+when capturing coverage data.
+.br
+
+Default is 0.
+.PP
+
+.BR geninfo_checksum " ="
+.IR 0 | 1
+.IP
+If non\-zero, generate source code checksums when capturing coverage data.
+Checksums are useful to prevent merging coverage data from incompatible
+source code versions but checksum generation increases the size of coverage
+files and the time used to generate those files.
+.br
+
+This option corresponds to the \-\-checksum and \-\-no\-checksum command line
+option of
+.BR geninfo .
+.br
+
+Default is 0.
+.PP
+
+.BR geninfo_compat_libtool " ="
+.IR 0 | 1
+.IP
+If non\-zero, enable libtool compatibility mode. When libtool compatibility
+mode is enabled, lcov will assume that the source code relating to a .da file
+located in a directory named ".libs" can be found in its parent directory.
+.br
+
+This option corresponds to the \-\-compat\-libtool and \-\-no\-compat\-libtool
+command line option of
+.BR geninfo .
+.br
+
+Default is 1.
+.PP
+
+.BR geninfo_external " ="
+.IR 0 | 1
+.IP
+If non\-zero, capture coverage data for external source files.
+
+External source files are files which are not located in one of the directories
+(including sub-directories)
+specified by the \-\-directory or \-\-base\-directory options of
+.BR lcov / geninfo .
+
+Default is 1.
+.PP
+
+.BR geninfo_gcov_all_blocks " ="
+.IR 0 | 1
+.IP
+If non\-zero, call the gcov tool with option --all-blocks.
+
+Using --all-blocks will produce more detailed branch coverage information for
+each line. Set this option to zero if you do not need detailed branch coverage
+information to speed up the process of capturing code coverage or to work
+around a bug in some versions of gcov which will cause it to endlessly loop
+when analysing some files.
+
+Default is 1.
+.PP
+
+.BR geninfo_compat " ="
+.IR mode = value [, mode = value ,...]
+.IP
+Specify that geninfo should enable one or more compatibility modes
+when capturing coverage data.
+
+This option corresponds to the \-\-compat command line option of
+.BR geninfo .
+
+Default is 'libtool=on, hammer=auto, split_crc=auto'.
+.PP
+
+.BR geninfo_adjust_src_path " ="
+.IR pattern " => " replacement
+.br
+.BR geninfo_adjust_src_path " ="
+.I pattern
+.IP
+Adjust source paths when capturing coverage data.
+
+Use this option in situations where geninfo cannot find the correct
+path to source code files of a project. By providing a
+.I pattern
+in Perl regular expression format (see
+.BR perlre (1))
+and an optional replacement string, you can instruct geninfo to
+remove or change parts of the incorrect source path.
+
+.B Example:
+.br
+
+1. When geninfo reports that it cannot find source file
+.br
+
+    /path/to/src/.libs/file.c
+.br
+
+while the file is actually located in
+.br
+
+    /path/to/src/file.c
+.br
+
+use the following parameter:
+.br
+
+    geninfo_adjust_src_path = /.libs
+
+This will remove all "/.libs" strings from the path.
+
+2. When geninfo reports that it cannot find source file
+.br
+
+    /tmp/build/file.c
+.br
+
+while the file is actually located in
+.br
+
+    /usr/src/file.c
+.br
+
+use the following parameter:
+.br
+
+    geninfo_adjust_src_path = /tmp/build => /usr/src
+.br
+
+This will change all "/tmp/build" strings in the path to "/usr/src".
+.PP
+
+.BR geninfo_auto_base " ="
+.IR 0 | 1
+.IP
+If non\-zero, apply a heuristic to determine the base directory when
+collecting coverage data.
+.br
+
+Use this option when using geninfo on projects built with libtool or
+similar build environments that work with multiple base directories,
+i.e. environments, where the current working directory when invoking the
+compiler ist not the same directory in which the source code file is
+located, and in addition, is different between files of the same project.
+.br
+
+Default is 1.
+.PP
+
+.BR lcov_gcov_dir " ="
+.I path_to_kernel_coverage_data
+.IP
+Specify the path to the directory where kernel coverage data can be found
+or leave undefined for auto-detection.
+.br
+
+Default is auto-detection.
+.PP
+
+.BR lcov_tmp_dir " ="
+.I temp
+.IP
+Specify the location of a directory used for temporary files.
+.br
+
+Default is '/tmp'.
+.PP
+
+.BR lcov_list_full_path " ="
+.IR 0 | 1
+.IP
+If non-zero, print the full path to source code files during a list operation.
+.br
+
+This option corresponds to the \-\-list\-full\-path option of
+.BR lcov .
+.br
+
+Default is 0.
+.PP
+
+.BR lcov_list_max_width " ="
+.IR width
+.IP
+Specify the maximum width for list output. This value is ignored when
+lcov_list_full_path is non\-zero.
+.br
+
+Default is 80.
+.PP
+
+.BR lcov_list_truncate_max
+.B " ="
+.IR percentage
+.IP
+Specify the maximum percentage of file names which may be truncated when
+choosing a directory prefix in list output. This value is ignored when
+lcov_list_full_path is non\-zero.
+.br
+
+Default is 20.
+.PP
+
+.BR lcov_function_coverage " ="
+.IR 0 | 1
+.IP
+Specify whether lcov should handle function coverage data.
+.br
+
+Setting this option to 0 can reduce memory and CPU time consumption
+when lcov is collecting and processing coverage data, as well as
+reduce the size of the resulting data files. Note that setting
+.B genhtml_function_coverage
+will override this option for HTML generation.
+.br
+
+Default is 1.
+.PP
+
+.BR lcov_branch_coverage " ="
+.IR 0 | 1
+.IP
+Specify whether lcov should handle branch coverage data.
+.br
+
+Setting this option to 0 can reduce memory and CPU time consumption
+when lcov is collecting and processing coverage data, as well as
+reduce the size of the resulting data files. Note that setting
+.B genhtml_branch_coverage
+will override this option for HTML generation.
+.br
+
+Default is 0.
+.PP
+
+.BR lcov_excl_line " ="
+.I expression
+.IP
+Specify the regular expression of lines to exclude.
+.br
+
+Default is 'LCOV_EXCL_LINE'.
+.PP
+
+.BR lcov_excl_br_line " ="
+.I expression
+.IP
+Specify the regular expression of lines to exclude from branch coverage.
+.br
+
+Default is 'LCOV_EXCL_BR_LINE'.
+.PP
+
+.SH FILES
+
+.TP
+.I /etc/lcovrc
+The system\-wide
+.B lcov
+configuration file.
+
+.TP
+.I ~/.lcovrc
+The individual per\-user configuration file.
+.PP
+
+.SH SEE ALSO
+.BR lcov (1),
+.BR genhtml (1),
+.BR geninfo (1),
+.BR gcov (1)
diff --git a/ThirdParty/lcov/rpm/lcov.spec b/ThirdParty/lcov/rpm/lcov.spec
new file mode 100644
index 0000000000000000000000000000000000000000..e96c8d47bd08ce9c2c185c0ac417f88b1e25e77f
--- /dev/null
+++ b/ThirdParty/lcov/rpm/lcov.spec
@@ -0,0 +1,59 @@
+Summary: A graphical GCOV front-end
+Name: lcov
+Version: 1.14
+Release: 1
+License: GPLv2+
+Group: Development/Tools
+URL: http://ltp.sourceforge.net/coverage/lcov.php
+Source0: http://downloads.sourceforge.net/ltp/%{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-root
+BuildArch: noarch
+Requires: perl >= 5.8.8
+
+%description
+LCOV is a graphical front-end for GCC's coverage testing tool gcov. It collects
+gcov data for multiple source files and creates HTML pages containing the
+source code annotated with coverage information. It also adds overview pages
+for easy navigation within the file structure.
+
+%prep
+%setup -q -n %{name}-%{version}
+
+%build
+exit 0
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make install DESTDIR=$RPM_BUILD_ROOT PREFIX=/usr CFG_DIR=/etc
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root)
+/usr/bin/*
+/usr/share/man/man*/*
+%config /etc/*
+
+%changelog
+* Mon Aug 22 2016 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- updated "make install" call to work with PREFIX Makefile changes
+
+* Mon May 07 2012 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- added dependency on perl 5.8.8 for >>& open mode support
+
+* Wed Aug 13 2008 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- changed description + summary text
+
+* Mon Aug 20 2007 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- fixed "Copyright" tag
+
+* Mon Jul 14 2003 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- removed variables for version/release to support source rpm building
+- added initial rm command in install section
+
+* Mon Apr 7 2003 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- implemented variables for version/release
+
+* Fri Oct 18 2002 Peter Oberparleiter (Peter.Oberparleiter@de.ibm.com)
+- created initial spec file
diff --git a/ThirdParty/lcov/test/Makefile b/ThirdParty/lcov/test/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..ecb96042aab0bb2aa802cb9316323e2f384e7929
--- /dev/null
+++ b/ThirdParty/lcov/test/Makefile
@@ -0,0 +1,27 @@
+include common.mak
+
+TESTDIRS   := $(sort $(patsubst %/,%,$(dir $(wildcard */Makefile))))
+
+help: info
+
+info:
+	echo "Available make targets:"
+	echo "  test   : perform self-tests"
+	echo "  clean  : remove all temporary files"
+	echo ""
+	echo "Available make variables:"
+	echo "  SIZE   : specify size of test data (small, medium, large)"
+	echo "  V      : specify level of verbosity (0, 1, 2)"
+
+test:
+	for TEST in $(TESTDIRS) ; do \
+		make -C $$TEST test ; \
+	done
+
+clean:
+	rm -rf *.info *.counts test.log src/
+	for TEST in $(TESTDIRS) ; do \
+		make -C $$TEST clean ; \
+	done
+
+.PHONY: help info test clean
diff --git a/ThirdParty/lcov/test/bin/common b/ThirdParty/lcov/test/bin/common
new file mode 100644
index 0000000000000000000000000000000000000000..a8b527deda0567c093e3bceebe286ecbb4c8c6c6
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/common
@@ -0,0 +1,103 @@
+function elapsed_to_ms()
+{
+	local ELAPSED=$1
+	local IFS=:.
+	local MS
+
+	set -- $ELAPSED
+	if [ $# -eq 3 ] ; then
+		let MS=${3#0}*10+${2#0}*1000+$1*60000
+	else
+		let MS=${4#0}*10+${3#0}*1000+${2#0}*60000+$1*3600000
+	fi
+
+	echo $MS
+}
+
+function t_timestamp()
+{
+	date +"%Y-%m-%d %H:%M:%S %z"
+}
+
+function t_marker()
+{
+	echo
+	echo "======================================================================"
+}
+
+function t_detail()
+{
+	local KEY=$1
+	local VALUE=$2
+	local DOTS=" ............"
+
+	printf "%-.12s: %s\n" "$KEY$DOTS" "$VALUE"
+}
+
+function t_announce()
+{
+	local TESTNAME="$1"
+
+	printf "$BOLD%-.30s$RESET " "$TESTNAME .............................."
+	t_marker >> "$LOGFILE"
+	t_detail "DATE" "$(t_timestamp)" >> "$LOGFILE"
+	t_detail "TESTNAME" "$TESTNAME" >> "$LOGFILE"
+}
+
+function t_result()
+{
+	local COLOR="$1"
+	local TEXT="$2"
+
+	printf "[$COLOR$TEXT$RESET]"
+}
+
+function t_pass()
+{
+	local TESTNAME="$1"
+
+	t_result "$GREEN" "pass"
+	echo "pass $TESTNAME" >> "$COUNTFILE"
+}
+
+function t_fail()
+{
+	local TESTNAME="$1"
+
+	t_result "$RED" "fail"
+	echo "fail $TESTNAME" >> "$COUNTFILE"
+}
+
+function t_kill()
+{
+	local TESTNAME="$1"
+
+	t_result "$RED" "kill"
+	echo "fail $TESTNAME" >> "$COUNTFILE"
+}
+
+function t_skip()
+{
+	local TESTNAME="$1"
+
+	t_result "$BLUE" "skip"
+	echo "skip $TESTNAME" >> "$COUNTFILE"
+}
+
+function t_indent()
+{
+	sed -e 's/^/  /'
+}
+
+LOGFILE="$TOPDIR/test.log"
+COUNTFILE="$TOPDIR/test.counts"
+TIMEFILE="$TOPDIR/test.time"
+
+if [ -t 1 ] ; then
+	RED="\e[31m"
+	GREEN="\e[32m"
+	BLUE="\e[34m"
+	BOLD="\e[1m"
+	DEFAULT="\e[39m"
+	RESET="\e[0m"
+fi
diff --git a/ThirdParty/lcov/test/bin/mkinfo b/ThirdParty/lcov/test/bin/mkinfo
new file mode 100755
index 0000000000000000000000000000000000000000..5231aeac5d12ce32f2230581a9741f354a19bdd1
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/mkinfo
@@ -0,0 +1,952 @@
+#!/usr/bin/env perl
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: mkinfo <config_file> [-o <output_dir>] [--seed <seed>]
+#               [<key>=<value>...]
+# 
+# Create a fake lcov code coverage data file and optionally the corresponding
+# source tree. DATA_FILE contains all specifications for creating the data
+# file. Directives can be overridden using KEY=VALUE specifications with KEY
+# being in the form SECTION.KEY. SEED specifies the number used to initialize
+# the pseudo random number generator.
+# 
+# Example:
+# mkinfo profiles/small -o src files.numfiles=12
+#
+
+use strict;
+use warnings;
+
+use Getopt::Long;
+use Cwd qw(abs_path getcwd);
+use File::Path qw(make_path);
+use File::Basename;
+use Data::Dumper;
+
+my $MAX_TAKEN	= 1000;
+my $use_colors	= -t STDIN;
+my $BOLD	= $use_colors ? "\033[1m" : "";
+my $RESET	= $use_colors ? "\033[0m" : "";
+
+sub usage()
+{
+	print(<<EOF)
+Usage: $0 <config_file> [-o <output_dir>] [--seed <seed>] [<key>=<value>...]
+
+Create a fake lcov code coverage data file and optionally the corresponding
+source tree. DATA_FILE contains all specifications for creating the data
+file. Directives can be overridden using KEY=VALUE specifications with KEY
+being in the form SECTION.KEY. SEED specifies the number used to initialize
+the pseudo random number generator.
+ 
+Example:
+$0 profiles/small -o src files.numfiles=12
+EOF
+}
+
+sub read_config($)
+{
+	my ($filename) = @_;
+	my $fd;
+	my %config;
+	my $section;
+
+	open($fd, "<", $filename) or die("Could not open $filename: $!\n");
+	while (my $line = <$fd>) {
+		my ($key, $value);
+
+		$line =~ s/(^\s*|\s*$)//g;
+		next if ($line eq "" || $line =~ /^#/);
+		if ($line =~ /^\[\s*(\S+)\s*]$/) {
+			$section = $1;
+			next;
+		}
+		if ($line !~ /^(\S+)\s*=\s*(.*)$/) {
+			die("$filename:$.: Unknown line format: $line\n");
+		}
+		($key, $value) = ($1, $2);
+		if (!defined($section)) {
+			die("$filename:$.: Directive outside of section\n");
+		}
+		$config{$section}->{$1} = $2;
+	}
+	close($fd);
+
+	return \%config;
+}
+
+sub apply_config($$)
+{
+	my ($config, $directive) = @_;
+
+	for my $dir (@$directive) {
+		if ($dir !~ /^([^\.]+)\.([^=]+)=(.*)$/) {
+			die("Unknown directive format: $dir\n");
+		}
+		$config->{$1}->{$2} = $3;
+	}
+}
+
+sub get_value($$;$)
+{
+	my ($config, $dir, $default) = @_;
+	my ($section, $key, $value);
+
+	if ($dir !~ /^([^\.]+)\.([^=]+)$/) {
+		die("$0: Internal error: Unknown key format: $key\n");
+	}
+	($section, $key) = ($1, $2);
+
+	$value = $config->{$section}->{$key};
+
+	if (!defined($value)) {
+		if (!defined($default)) {
+			die("$0: Missing config value for $dir\n");
+		}
+		$value = $default;
+	}
+
+	return $value;
+}
+
+sub get_int($$;$$$)
+{
+	my ($config, $dir, $default, $min, $max) = @_;
+	my $value = get_value($config, $dir, $default);
+
+	if ($value !~ /^\d+$/) {
+		die("$0: Config value $dir must be an integer: $value\n");
+	}
+	$value = int($value);
+	if (defined($min) && $value < $min) {
+		die("$0: Config value $dir is too low (min $min): $value\n");
+	}
+	if (defined($max) && $value > $max) {
+		die("$0: Config value $dir is too high (max $max): $value\n");
+	}
+
+	return int($value);
+}
+
+sub get_list($$;$)
+{
+	my ($config, $dir, $default) = @_;
+	my $value = get_value($config, $dir, $default);
+	my @list = split(/\s+/, $value);
+
+	return \@list;
+}
+
+sub randlist($)
+{
+	my ($list) = @_;
+
+	return "" if (!@$list);
+	return $list->[int(rand(scalar(@$list)))];
+}
+
+sub randbool()
+{
+	return int(rand(2));
+}
+
+# Reduce LIST to PERCENTAGE of its former size.
+sub reduce_list_per($$)
+{
+	my ($list, $percentage) = @_;
+	my $remove;
+
+	$remove = int((100 - $percentage) * scalar(@$list) / 100);
+
+	for (my $i = 0; $i < $remove; $i++) {
+		splice(@$list, int(rand(scalar(@$list))), 1);
+	}
+}
+
+# Reduce LIST to NUM items.
+sub reduce_list_num($$)
+{
+	my ($list, $num) = @_;
+	my $remove;
+
+	$remove = scalar(@$list) - $num;
+
+	for (my $i = 0; $i < $remove; $i++) {
+		splice(@$list, int(rand(scalar(@$list))), 1);
+	}
+}
+
+sub _gen_filename($$)
+{
+	my ($c, $root) = @_;
+	my $ltop = get_list($c, "files.top", "");
+	my $lsub = get_list($c, "files.sub", "");
+	my $lsubsub = get_list($c, "files.subsub", "");
+	my $lprefix = get_list($c, "files.prefix");
+	my $lsuffix = get_list($c, "files.suffix", "");
+	my $lext = get_list($c, "files.ext");
+	my ($top, $sub, $subsub, $prefix, $suffix, $ext) =
+		("", "", "", "", "", "");
+	my $filename = "";
+
+	$top = randlist($ltop) if (randbool());
+	$sub = randlist($lsub) if (randbool());
+	$subsub = randlist($lsubsub) if (randbool());
+	$prefix = randlist($lprefix);
+	$suffix = randlist($lsuffix) if (randbool());
+	$ext = randlist($lext);
+
+	$filename = $root;
+	$filename .= "/".$top if ($top ne "");
+	$filename .= "/".$sub if ($sub ne "");
+	$filename .= "/".$subsub if ($subsub ne "");
+	$filename .= "/".$prefix;
+	$filename .= "_".$suffix if ($suffix ne "");
+	$filename .= $ext;
+	$filename =~ s#^//#/#;
+
+	return $filename;
+}
+
+sub gen_filename($$$)
+{
+	my ($c, $root, $filenames) = @_;
+	my $filename;
+
+	do {
+		$filename = _gen_filename($c, $root);
+	} while ($filenames->{$filename});
+	$filenames->{$filename} = 1;
+
+	return $filename;
+}
+
+sub gen_lines($$)
+{
+	my ($c, $length) = @_;
+	my @lines = 1 .. $length;
+	my $percent = get_int($c, "lines.instrumented", undef, 0, 100);
+
+	reduce_list_per(\@lines, $percent);
+
+	return \@lines;
+}
+
+sub gen_fnname($$)
+{
+	my ($c, $hash) = @_;
+	my $lverb = get_list($c, "functions.verb");
+	my $ladj = get_list($c, "functions.adj", "");
+	my $lnoun = get_list($c, "functions.noun", "");
+	my ($verb, $adj, $noun) = ("", "", "");
+	my $fnname;
+
+	$verb = randlist($lverb);
+	$adj = randlist($ladj) if (randbool());
+	$noun = randlist($lnoun) if (randbool());
+
+	$fnname = $verb;
+	$fnname .= "_".$adj if ($adj ne "");
+	$fnname .= "_".$noun if ($noun ne "");
+
+	if (exists($hash->{$fnname})) {
+		my $i = 2;
+
+		while (exists($hash->{$fnname.$i})) {
+			$i++;
+		}
+		$fnname .= $i;
+	}
+	$hash->{$fnname} = 1;
+
+	return $fnname;
+}
+
+sub gen_functions($$)
+{
+	my ($c, $lines) = @_;
+	my @fnlines;
+	my @functions;
+	my %names;
+	my $percent = get_int($c, "functions.perinstrumented", undef, 0, 100);
+
+	@fnlines = @$lines;
+	reduce_list_per(\@fnlines, $percent);
+
+	foreach my $fnline (@fnlines) {
+		push(@functions, [ $fnline, gen_fnname($c, \%names) ]);
+	}
+
+	return \@functions;
+}
+
+
+# Returns a value distribution object. This object can be used to randomly
+# choose one element from a list of elements with a given relative distribution.
+#
+# dist: [ sumprob, probs]
+# sumprob: Sum of all probabilities
+# probs: [ prob1, prob2, ... ]
+# prob: [ num, x ]
+# num: Value
+sub get_dist($$;$)
+{
+	my ($c, $dir, $default) = @_;
+	my $list = get_list($c, $dir, $default);
+	my $sumprob = 0;
+	my @probs;
+
+	foreach my $spec (@$list) {
+		my ($n, $p);
+
+		if ($spec =~ /^(\d+):(\d+)$/) {
+			($n, $p) = ($1, $2);
+		} elsif ($spec =~ /^(\d+)$/) {
+			$n = $1;
+			$p = 1;
+		} else {
+			die("$0: Config value $dir must be a distribution ".
+			    "list (a:p1 b:p2 ...)\n");
+		}
+		$sumprob += $p;
+		push(@probs, [ $n, $sumprob ]);
+	}
+
+	return [ $sumprob, \@probs ];
+}
+
+sub rand_dist($)
+{
+	my ($dist) = @_;
+	my ($sumprob, $probs) = @$dist;
+	my $r = int(rand($sumprob));
+
+	foreach my $prob (@$probs) {
+		my ($num, $x) = @$prob;
+		return $num if ($r < $x);
+	}
+
+	die("Internal error: Incomplete distribution list\n");
+}
+
+sub gen_branches($$)
+{
+	my ($c, $lines) = @_;
+	my $percent = get_int($c, "branches.perinstrumented", undef, 0, 100);
+	my @allblocks = @{get_list($c, "branches.blocks", "0")};
+	my $branchdist = get_dist($c, "branches.branchdist", "2");
+	my @brlines;
+	my @branches;
+
+	@brlines = @$lines;
+	reduce_list_per(\@brlines, $percent);
+
+	foreach my $brline (@brlines) {
+		my @blocks = @allblocks;
+		my $numblocks = int(rand(scalar(@blocks))) + 1;
+
+		reduce_list_num(\@blocks, $numblocks);
+
+		foreach my $block (@blocks) {
+			my $numbranch = rand_dist($branchdist);
+
+			for (my $branch = 0; $branch < $numbranch; $branch++) {
+				push(@branches, [ $brline, $block, $branch]);
+			}
+		}
+	}
+
+	return \@branches;
+}
+
+sub gen_filesrc($)
+{
+	my ($c) = @_;
+	my ($length, $lines, $functions, $branches);
+	my $do_ln = get_int($c, "lines.enabled");
+	my $do_fn = get_int($c, "functions.enabled");
+	my $do_br = get_int($c, "branches.enabled");
+
+	$length		= 1 + int(rand(get_int($c, "lines.maxlines")));
+	$lines		= gen_lines($c, $length);
+	$functions	= gen_functions($c, $lines) if ($do_fn);
+	$branches	= gen_branches($c, $lines) if ($do_br);
+
+	return [ $length, $lines, $functions, $branches ];
+}
+
+# Generate fake source tree.
+#
+# returns:	[ files, numlns, numfns, numbrs ]
+# files:	filename -> filesrc
+# filesrc:	[ length, lines, functions, branches ]
+# length:	Total number of lines in file
+#
+# lines:	[ line1, line2, ... ]
+#
+# functions:	[ fn1, fn2, ... ]
+# fn:		[ fnline, fnname ]
+# fnline:	Starting line of function
+# fnname:	Function name
+#
+# branches:	[ brdata1, brdata2, ...]
+# brdata:	[ brline, block, branch ]
+# brline:	Line number containing branches
+# block:	Block ID
+# branch:	Branch ID
+#
+sub gen_src($$)
+{
+	my ($c, $root) = @_;
+	my %files;
+	my $numfiles = get_int($c, "files.numfiles");
+	my %filenames;
+	my ($numlns, $numfns, $numbrs) = (0, 0, 0);
+
+	for (my $i = 0; $i < $numfiles; $i++) {
+		my $filename = gen_filename($c, $root, \%filenames);
+		my $filesrc = gen_filesrc($c);
+
+		$files{$filename} = $filesrc;
+		$numlns += scalar(@{$filesrc->[1]}) if (defined($filesrc->[1]));
+		$numfns += scalar(@{$filesrc->[2]}) if (defined($filesrc->[2]));
+		$numbrs += scalar(@{$filesrc->[3]}) if (defined($filesrc->[3]));
+	}
+
+	return [ \%files, $numlns, $numfns, $numbrs ];
+}
+
+sub write_src($)
+{
+	my ($src) = @_;
+	my ($files, $numlns, $numfns, $numbrs) = @$src;
+
+	foreach my $filename (sort(keys(%{$files}))) {
+		my $filesrc = $files->{$filename};
+		my $length = $filesrc->[0];
+		my $dir = dirname($filename);
+		my $fd;
+
+		if (!-d $dir) {
+			make_path($dir) or
+				die("Could not create directory $dir\n");
+		}
+
+		open($fd, ">", $filename) or
+			die("Could not create file $filename: $!\n");
+		for (my $i = 0; $i < $length; $i++) {
+			print($fd "\n");
+		}
+		close($fd);
+	}
+}
+
+sub write_branches($$$$)
+{
+	my ($fd, $branches, $brhits, $iref) = @_;
+	my ($found, $hit) = (0, 0);
+
+	# Line coverage data
+	foreach my $brdata (@$branches) {
+		my $brhit = $brhits->[$$iref++];
+		my ($brline, $block, $branch) = @$brdata;
+
+		$found++;
+		$hit++ if ($brhit ne "-" && $brhit > 0);
+		print($fd "BRDA:$brline,$block,$branch,$brhit\n");
+	}
+	if ($found > 0) {
+		print($fd "BRF:$found\n");
+		print($fd "BRH:$hit\n");
+	}
+}
+
+sub write_lines($$$$)
+{
+	my ($fd, $lines, $lnhist, $iref) = @_;
+	my ($found, $hit) = (0, 0);
+
+	# Line coverage data
+	foreach my $line (@$lines) {
+		my $lnhit = $lnhist->[$$iref++];
+
+		$found++;
+		$hit++ if ($lnhit > 0);
+		print($fd "DA:$line,$lnhit\n");
+	}
+	print($fd "LF:$found\n");
+	print($fd "LH:$hit\n");
+}
+
+sub write_functions($$$$)
+{
+	my ($fd, $functions, $fnhits, $iref) = @_;
+	my ($found, $hit) = (0, 0);
+
+	# Function coverage data
+	foreach my $fn (@$functions) {
+		my ($fnline, $fnname) = @$fn;
+
+		print($fd "FN:$fnline,$fnname\n");
+	}
+	foreach my $fn (@$functions) {
+		my ($fnline, $fnname) = @$fn;
+		my $fnhit = $fnhits->[$$iref++];
+
+		$found++;
+		$hit++ if ($fnhit > 0);
+		print($fd "FNDA:$fnhit,$fnname\n");
+	}
+	print($fd "FNF:$found\n");
+	print($fd "FNH:$hit\n");
+}
+
+sub write_filesrc($$$$$)
+{
+	my ($c, $fd, $filesrc, $hits, $iter) = @_;
+	my ($length, $lines, $functions, $branches) = @$filesrc;
+	my $do_ln = get_int($c, "lines.enabled");
+	my $do_fn = get_int($c, "functions.enabled");
+	my $do_br = get_int($c, "branches.enabled");
+
+	write_functions($fd, $functions, $hits->[1], \$iter->[1]) if ($do_fn);
+	write_branches($fd, $branches, $hits->[2], \$iter->[2]) if ($do_br);
+	write_lines($fd, $lines, $hits->[0], \$iter->[0]) if ($do_ln);
+}
+
+sub write_info($$$$)
+{
+	my ($c, $filename, $src, $hits) = @_;
+	my $files = $src->[0];
+	my $fd;
+	my %iters;
+
+	foreach my $testname (keys(%{$hits})) {
+		$iters{$testname} = [ 0, 0, 0 ];
+	}
+
+	open($fd, ">", $filename) or die("Could not create $filename: $!\n");
+
+	foreach my $filename (sort(keys(%{$files}))) {
+		my $filesrc = $files->{$filename};
+
+		foreach my $testname (sort(keys(%{$hits}))) {
+			my $testhits = $hits->{$testname};
+			my $iter = $iters{$testname};
+
+			print($fd "TN:$testname\n");
+			print($fd "SF:$filename\n");
+
+			write_filesrc($c, $fd, $filesrc, $testhits, $iter);
+
+			print($fd "end_of_record\n");
+		}
+	}
+
+	close($fd);
+}
+
+sub get_hit_found($)
+{
+	my ($list) = @_;
+	my ($hit, $found) = (0, 0);
+
+	foreach my $e (@$list) {
+		$hit++ if ($e ne "-" && $e > 0);
+		$found++;
+	}
+	return ($hit, $found);
+}
+
+sub write_counts($$)
+{
+	my ($filename, $hits) = @_;
+	my $fd;
+	my (@tlnhits, @tfnhits, @tbrhits);
+
+	foreach my $testname (keys(%{$hits})) {
+		my $testhits = $hits->{$testname};
+		my ($lnhits, $fnhits, $brhits) = @$testhits;
+
+		for (my $i = 0; $i < scalar(@$lnhits); $i++) {
+			$tlnhits[$i] += $lnhits->[$i];
+		}
+		for (my $i = 0; $i < scalar(@$fnhits); $i++) {
+			$tfnhits[$i] += $fnhits->[$i];
+		}
+		for (my $i = 0; $i < scalar(@$brhits); $i++) {
+			my $h = $brhits->[$i];
+
+			$h = 0 if ($h eq "-");
+			$tbrhits[$i] += $h;
+		}
+	}
+
+	open($fd, ">", $filename) or die("Could not create $filename: $!\n");
+	print($fd join(" ", get_hit_found(\@tlnhits), get_hit_found(\@tfnhits),
+			    get_hit_found(\@tbrhits))."\n");
+	close($fd);
+}
+
+# A branch hit value for a block that was not hit must be "-". A branch hit
+# value for a block that was hit cannot be "-", but must be "0" if not hit.
+sub sanitize_brhits($)
+{
+	my ($brhits) = @_;
+	my $block_hit = 0;
+
+	foreach my $brhit_ref (@$brhits) {
+		if ($$brhit_ref ne "-" && $$brhit_ref > 0) {
+			$block_hit = 1;
+			last;
+		}
+	}
+	foreach my $brhit_ref (@$brhits) {
+		if (!$block_hit) {
+			$$brhit_ref = "-";
+		} elsif ($$brhit_ref eq "-") {
+			$$brhit_ref = 0;
+		}
+	}
+}
+
+# Ensure coverage rate interdependencies are met
+sub sanitize_hits($$)
+{
+	my ($src, $hits) = @_;
+	my $files = $src->[0];
+
+	foreach my $hits (values(%{$hits})) {
+		my $brhits = $hits->[2];
+		my $i = 0;
+
+		foreach my $filename (sort(keys(%{$files}))) {
+			my $filesrc = $files->{$filename};
+			my $branches = $filesrc->[3];
+			my $lastblock;
+			my $lastline;
+			my @blist;
+
+			foreach my $brdata (@$branches) {
+				my ($brline, $block, $branch) = @$brdata;
+
+				if (!defined($lastblock) ||
+				    $block != $lastblock ||
+				    $brline != $lastline) {
+					sanitize_brhits(\@blist);
+					@blist = ();
+					$lastblock = $block;
+					$lastline = $brline;
+				}
+				push(@blist, \$brhits->[$i++]);
+			}
+			sanitize_brhits(\@blist);
+		}
+	}
+}
+
+# Generate random coverage data
+#
+# returns:	testname -> testhits
+# testhits:	[ lnhits, fnhits, brhits ]
+# lnhits:	[ ln1hit, ln2hit, ... ]
+# lnhit:	Number of times a line was hit by a specific test
+# fnhits:	[ fn1hit, fn2hit, ... ]
+# fnhit:	Number of times a function was hit by a specific test
+# brhits:	[ br1hit, br2hit, ... ]
+# brhit:	Number of times a branch was hit by a specific test
+sub gen_hits($$)
+{
+	my ($c, $src) = @_;
+	my (@lnhits, @fnhits, @brhits);
+	my ($files, $numlns, $numfns, $numbrs) = @$src;
+	my $testnames = get_list($c, "tests.names", "");
+	my %hits;
+
+	$testnames = [ "" ] if (!@$testnames);
+
+	foreach my $testname (@$testnames) {
+		my (@lnhits, @fnhits, @brhits);
+
+		for (my $i = 0; $i < $numlns; $i++) {
+			push(@lnhits, 1 + int(rand($MAX_TAKEN)));
+		}
+
+		for (my $i = 0; $i < $numfns; $i++) {
+			push(@fnhits, 1 + int(rand($MAX_TAKEN)));
+		}
+
+		for (my $i = 0; $i < $numbrs; $i++) {
+			push(@brhits, 1 + int(rand($MAX_TAKEN)));
+		}
+
+		$hits{$testname} = [ \@lnhits, \@fnhits, \@brhits ];
+	}
+
+	sanitize_hits($src, \%hits);
+
+	return \%hits;
+}
+
+# Return a hash containing RATE percent of indices [0..NUM-1].
+sub gen_filter($$)
+{
+	my ($num, $rate) = @_;
+	my @list = (0 .. ($num - 1));
+	my %hash;
+
+	reduce_list_per(\@list, $rate);
+	foreach my $i (@list) {
+		$hash{$i} = 1;
+	}
+
+	return \%hash;
+}
+
+# Zero all entries in LIST identified by the indices in FILTER.
+sub zero_by_filter($$)
+{
+	my ($list, $filter) = @_;
+
+	foreach my $i (keys(%{$filter})) {
+		$list->[$i] = 0;
+	}
+}
+
+# Add a random number of indices between [0..NUM-1] to FILTER.
+sub widen_filter($$)
+{
+	my ($filter, $num) = @_;
+	my @list;
+
+	for (my $i = 0; $i < $num; $i++) {
+		push(@list, $i) if (!exists($filter->{$i}));
+	}
+	reduce_list_per(\@list, int(rand(101)));
+
+	foreach my $i (@list) {
+		$filter->{$i} = 1;
+	}
+}
+
+# Zero coverage data in HITS until the combined coverage rates reach the
+# specified RATEs.
+sub reduce_hits($$$$$)
+{
+	my ($src, $hits, $lnrate, $fnrate, $brrate) = @_;
+	my ($files, $numlns, $numfns, $numbrs) = @$src;
+	my ($lnfilter, $fnfilter, $brfilter);
+
+	$lnfilter = gen_filter($numlns, 100 - $lnrate);
+	$fnfilter = gen_filter($numfns, 100 - $fnrate);
+	$brfilter = gen_filter($numbrs, 100 - $brrate);
+
+	foreach my $testhits (values(%{$hits})) {
+		my ($lnhits, $fnhits, $brhits) = @$testhits;
+
+		zero_by_filter($lnhits, $lnfilter);
+		zero_by_filter($fnhits, $fnfilter);
+		zero_by_filter($brhits, $brfilter);
+
+		# Provide some variation between tests
+		widen_filter($lnfilter, $numlns);
+		widen_filter($fnfilter, $numfns);
+		widen_filter($brfilter, $numbrs);
+	}
+
+	sanitize_hits($src, $hits);
+}
+
+sub zero_list($)
+{
+	my ($list) = @_;
+
+	foreach my $i (@$list) {
+		$i = 0;
+	}
+}
+
+# Zero all coverage in HITS.
+sub zero_hits($$)
+{
+	my ($src, $hits) = @_;
+
+	foreach my $testhits (values(%{$hits})) {
+		my ($lnhits, $fnhits, $brhits) = @$testhits;
+
+		zero_list($lnhits);
+		zero_list($fnhits);
+		zero_list($brhits);
+	}
+
+	sanitize_hits($src, $hits);
+}
+
+# Distribute items from LIST to A and B depending on whether the index for
+# an item is found in FILTER.
+sub split_by_filter($$$$)
+{
+	my ($list, $filter, $a, $b) = @_;
+
+	for (my $i = 0; $i < scalar(@$list); $i++) {
+		if (exists($filter->{$i})) {
+			push(@$a, $list->[$i]);
+			push(@$b, 0);
+		} else {
+			push(@$a, 0);
+			push(@$b, $list->[$i]);
+		}
+	}
+}
+
+sub split_hits($$$)
+{
+	my ($c, $src, $hits) = @_;
+	my ($files, $numlns, $numfns, $numbrs) = @$src;
+	my ($lnsplit, $fnsplit, $brsplit);
+	my (%a, %b);
+
+	$lnsplit = gen_filter($numlns, int(rand(101)));
+	$fnsplit = gen_filter($numfns, int(rand(101)));
+	$brsplit = gen_filter($numbrs, int(rand(101)));
+
+	foreach my $testname (keys(%{$hits})) {
+		my $testhits = $hits->{$testname};
+		my ($lnhits, $fnhits, $brhits) = @$testhits;
+		my (@lnhitsa, @fnhitsa, @brhitsa);
+		my (@lnhitsb, @fnhitsb, @brhitsb);
+
+		split_by_filter($lnhits, $lnsplit, \@lnhitsa, \@lnhitsb);
+		split_by_filter($fnhits, $fnsplit, \@fnhitsa, \@fnhitsb);
+		split_by_filter($brhits, $brsplit, \@brhitsa, \@brhitsb);
+
+		$a{$testname} = [ \@lnhitsa, \@fnhitsa, \@brhitsa ];
+		$b{$testname} = [ \@lnhitsb, \@fnhitsb, \@brhitsb ];
+	}
+
+	sanitize_hits($src, \%a);
+	sanitize_hits($src, \%b);
+
+	return (\%a, \%b);
+}
+
+sub plural($$$)
+{
+	my ($num, $sing, $plur) = @_;
+
+	return $num <= 1 ? $sing : $plur;
+}
+
+sub print_intro($)
+{
+	my ($c) = @_;
+	my $numtests = scalar(@{get_list($c, "tests.names")});
+	my $numfiles = get_int($c, "files.numfiles");
+
+	$numtests = 1 if ($numtests < 1);
+
+	print($BOLD."Creating coverage files ($numtests ".
+	      plural($numtests, "test", "tests").", $numfiles ".
+	      plural($numfiles, "source file", "source files").")\n".$RESET);
+}
+
+sub main()
+{
+	my $opt_help;
+	my $opt_output;
+	my $opt_configfile;
+	my $opt_seed = 0;
+	my $c;
+	my $src;
+	my $hits;
+	my $root;
+	my $enum;
+	my ($a, $b);
+
+	# Parse options
+	if (!GetOptions("output|o=s" => \$opt_output,
+			"seed=s" => \$opt_seed,
+			"help|h" => \$opt_help,
+	)) {
+		print(STDERR "Use $0 --help to get usage information\n");
+		exit(2);
+	}
+
+	if ($opt_help) {
+		usage();
+		exit(0);
+	}
+
+	$opt_configfile = shift(@ARGV);
+	if (!defined($opt_configfile)) {
+		print(STDERR "Please specify a config file\n");
+		exit(2);
+	}
+
+	if (defined($opt_output)) {
+		if (! -d $opt_output) {
+			mkdir($opt_output) or
+				die("$0: Could not create directory ".
+				    "$opt_output: $!\n");
+		}
+		$root = abs_path($opt_output)
+	} else {
+		$root = "/";
+	}
+
+	srand($opt_seed);
+
+	# Get config
+	$c = read_config($opt_configfile);
+	apply_config($c, \@ARGV) if (@ARGV);
+
+	print_intro($c);
+	# Show lines on STDOUT without newline
+	$| = 1;
+
+	# Create source tree
+	print("  Source tree ......... ");
+	$src = gen_src($c, $root);
+	# Write out source code if requested
+	write_src($src) if (defined($opt_output));
+	print("done (");
+	print($src->[1]." lines, ");
+	print($src->[2]." functions, ");
+	print($src->[3]." branches)\n");
+
+	# Write out full-coverage data files
+	print("  Full coverage ....... ");
+	$hits = gen_hits($c, $src);
+	write_info($c, "full.info", $src, $hits);
+	write_counts("full.counts", $hits);
+	print("done\n");
+
+	# Write out data files with target coverage rates
+	print("  Target coverage ..... ");
+	reduce_hits($src, $hits, get_int($c, "lines.covered"),
+				 get_int($c, "functions.covered"),
+				 get_int($c, "branches.covered"));
+	write_info($c, "target.info", $src, $hits);
+	write_counts("target.counts", $hits);
+	print("done\n");
+
+	# Write out partial data files
+	print("  Partial coverage .... ");
+	($a, $b) = split_hits($c, $src, $hits);
+	write_info($c, "part1.info", $src, $a);
+	write_counts("part1.counts", $a);
+	write_info($c, "part2.info", $src, $b);
+	write_counts("part2.counts", $b);
+	print("done\n");
+
+	# Write out zero-coverage data files
+	print("  Zero coverage ....... ");
+	zero_hits($src, $hits);
+	write_info($c, "zero.info", $src, $hits);
+	write_counts("zero.counts", $hits);
+	print("done\n");
+}
+
+main();
+exit(0);
diff --git a/ThirdParty/lcov/test/bin/norminfo b/ThirdParty/lcov/test/bin/norminfo
new file mode 100755
index 0000000000000000000000000000000000000000..9fe0ef2f00dfc7771e9eb636b931f3f41f1ffec4
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/norminfo
@@ -0,0 +1,243 @@
+#!/usr/bin/env perl
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: norminfo <coverage-data-file> [<multiplier>]
+#
+# Normalize coverage data file (ensure stable order), perform some sanity
+# checks, and apply optional multiplier to execution counts.
+#
+
+use strict;
+use warnings;
+
+sub ferr($$$)
+{
+	my ($pos, $filename, $msg) = @_;
+
+	if (defined($pos)) {
+		$pos .= ":";
+	} else {
+		$pos = "";
+	}
+
+	die("$0:$filename:$pos $msg");
+}
+
+sub print_sorted($$$)
+{
+	my ($fd, $info, $multi) = @_;
+	my (%fn, %fns, %fnda, %brda, %da);
+	my ($fnf, $fnh, $brf, $brh, $lf, $lh);
+
+	while (my $line = <$fd>) {
+		$line =~ s/(^\s*|\s*$)//g;
+
+		if ($line =~ /^end_of_record$/) {
+			last;
+		} elsif ($line =~ /^FN:(\d+),(.*)$/) {
+			my ($lineno, $fnname) = ($1, $2);
+
+			if (exists($fn{$lineno})) {
+				ferr($., $info, "Duplicate FN: entry\n");
+			}
+			$fn{$lineno} = $fnname;
+			if (exists($fns{$fnname})) {
+				ferr($., $info, "Duplicate function name\n");
+			}
+			$fns{$fnname} = $lineno;
+		} elsif ($line =~ /^FNDA:(\d+),(.*)$/) {
+			my ($count, $fnname) = ($1, $2);
+
+			if (exists($fnda{$fnname})) {
+				ferr($., $info, "Duplicate FNDA: entry\n");
+			}
+			$fnda{$fnname} = int($count * $multi);
+		} elsif ($line =~ /^FNF:(\d+)$/) {
+			if (defined($fnf)) {
+				ferr($., $info, "Duplicate FNF: entry\n");
+			}
+			$fnf = $1;
+		} elsif ($line =~ /^FNH:(\d+)$/) {
+			if (defined($fnh)) {
+				ferr($., $info, "Duplicate FNH: entry\n");
+			}
+			$fnh = $1;
+		} elsif ($line =~ /^BRDA:(\d+),(\d+),(\d+),(\d+|-)$/) {
+			my ($lineno, $block, $branch, $count) = ($1, $2, $3, $4);
+
+			if (exists($brda{$lineno}->{$block}->{$branch})) {
+				ferr($., $info, "Duplicate BRDA: entry\n");
+			}
+			$count = int($count * $multi) if ($count ne "-");
+			$brda{$lineno}->{$block}->{$branch} = $count;
+
+		} elsif ($line =~ /^BRF:(\d+)$/) {
+			if (defined($brf)) {
+				ferr($., $info, "Duplicate BRF: entry\n");
+			}
+			$brf = $1;
+		} elsif ($line =~ /^BRH:(\d+)$/) {
+			if (defined($brh)) {
+				ferr($., $info, "Duplicate BRH: entry\n");
+			}
+			$brh = $1;
+		} elsif ($line =~ /^DA:(\d+),(\d+)$/) {
+			my ($lineno, $count) = ($1, $2);
+
+			if (exists($da{$lineno})) {
+				ferr($., $info, "Duplicate FNDA: entry\n");
+			}
+			$da{$lineno} = int($count * $multi);
+		} elsif ($line =~ /^LF:(\d+)$/) {
+			if (defined($lf)) {
+				ferr($., $info, "Duplicate LF: entry\n");
+			}
+			$lf = $1;
+		} elsif ($line =~ /^LH:(\d+)$/) {
+			if (defined($lh)) {
+				ferr($., $info, "Duplicate LH: entry\n");
+			}
+			$lh = $1;
+		} else {
+			ferr($., $info, "Unknown line: $line\n");
+		}
+	}
+
+	# FN:<line>,<fnname>
+	foreach my $lineno (sort({ $a <=> $b } keys(%fn))) {
+		my $fnname = $fn{$lineno};
+		print("FN:$lineno,$fnname\n");
+	}
+
+	# FNDA:<counts>,<fnname>
+	foreach my $fnname (keys(%fnda)) {
+		if (!exists($fns{$fnname})) {
+			ferr(undef, $info, "FNDA entry without FN: $fnname\n");
+		}
+	}
+	foreach my $fnname (sort({ $fns{$a} <=> $fns{$b} } keys(%fnda))) {
+		my $count = $fnda{$fnname};
+		print("FNDA:$count,$fnname\n");
+	}
+	# FNF:<counts>
+	print("FNF:$fnf\n") if (defined($fnf));
+	# FNH:<counts>
+	if (defined($fnh)) {
+		$fnh = 0 if ($multi == 0);
+		print("FNH:$fnh\n");
+	}
+	# BRDA:<line>,<block>,<branch>,<count>
+	foreach my $lineno (sort({ $a <=> $b } keys(%brda))) {
+		my $blocks = $brda{$lineno};
+
+		foreach my $block (sort({ $a <=> $b } keys(%{$blocks}))) {
+			my $branches = $blocks->{$block};
+
+			foreach my $branch (sort({ $a <=> $b }
+					    keys(%{$branches}))) {
+				my $count = $branches->{$branch};
+
+				$count = "-" if ($multi == 0);
+				print("BRDA:$lineno,$block,$branch,$count\n");
+			}
+		}
+
+	}
+	# BRF:<counts>
+	print("BRF:$brf\n") if (defined($brf));
+	# BRH:<counts>
+	if (defined($brh)) {
+		$brh = 0 if ($multi == 0);
+		print("BRH:$brh\n");
+	}
+	# DA:<line>,<counts>
+	foreach my $lineno (sort({ $a <=> $b } keys(%da))) {
+		my $count = $da{$lineno};
+
+		print("DA:$lineno,$count\n");
+	}
+	# LF:<counts>
+	print("LF:$lf\n") if (defined($lf));
+	# LH:<count>
+	if (defined($lh)) {
+		$lh = 0 if ($multi == 0);
+		print("LH:$lh\n");
+	}
+}
+
+sub main()
+{
+	my $infofile = $ARGV[0];
+	my $multi = $ARGV[1];
+	# info: testname -> files
+	# files: infofile -> data
+	# data: [ starting offset, starting line ]
+	my %info;
+	my $fd;
+	my $tn = "";
+	my %allfiles;
+
+	$multi = 1 if (!defined($multi));
+	if (!defined($infofile)) {
+		$infofile = "standard input";
+		warn("$0: Reading data from standard input\n");
+		open($fd, "<&STDIN") or
+			die("$0: Could not duplicated stdin: $!\n");
+	} else {
+		open($fd, "<", $infofile) or
+			die("$0: Could not open $infofile: $!\n");
+	}
+
+	# Register starting positions of data sets
+	while (my $line = <$fd>) {
+		if ($line =~ /^TN:(.*)$/) {
+			$tn = $1;
+		} elsif ($line =~ /^SF:(.*)$/) {
+			my $sf = $1;
+			my $pos = tell($fd);
+
+			die("$0: Could not get file position: $!\n")
+				if ($pos == -1);
+			if (exists($info{$tn}->{$sf})) {
+				ferr($., $infofile,
+				     "Duplicate entry for $tn:$sf\n");
+			}
+			$info{$tn}->{$sf} = [ $pos, $. ];
+			$allfiles{$sf} = 1;
+		}
+	}
+
+	# Print data sets in normalized order
+	foreach my $filename (sort(keys(%allfiles))) {
+		foreach my $testname (sort(keys(%info))) {
+			my $pos = $info{$testname}->{$filename};
+			my ($cpos, $lpos) = @$pos;
+
+			next if (!defined($pos));
+
+			if (seek($fd, $cpos, 0) != 1) {
+				die("$0: Could not seek in $infofile: $!\n");
+			}
+			printf("TN:$testname\n");
+			printf("SF:$filename\n");
+
+			$. = $lpos;
+			print_sorted($fd, $infofile, $multi);
+
+			printf("end_of_record\n");
+
+		}
+	}
+	foreach my $testname (sort(keys(%info))) {
+		my $files = $info{$testname};
+
+		foreach my $filename (sort(keys(%{$files}))) {
+		}
+	}
+
+	close($fd);
+}
+
+main();
+exit(0);
diff --git a/ThirdParty/lcov/test/bin/test_run b/ThirdParty/lcov/test/bin/test_run
new file mode 100755
index 0000000000000000000000000000000000000000..23e69d0f4e9534b424f9a2ecdf1ae712d1ff38df
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/test_run
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: test_run <testname> <cmdline>
+#
+# Announce a test case, run it, and record the resulting output in the
+# test log file. Must be run after testsuite_init.
+#
+
+TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common"
+EXCERPTLEN=10
+TESTNAME="$1"
+shift
+
+TIME=$(which time 2>/dev/null)
+if [ ! -z "$TIME" ] ; then
+	TIME="$TIME -v -o $TIMEFILE"
+	if ! $TIME true 2>/dev/null ; then
+		TIME=""
+	fi
+fi
+
+t_announce "$TESTNAME"
+
+let POS=$(stat -c %s "$LOGFILE")+1
+
+t_detail "COMMAND" "\"$*\"" >>"$LOGFILE"
+t_detail "OUTPUT" "" >>"$LOGFILE"
+
+# Run command
+$TIME bash -c "$*" 2>&1 | t_indent >>"$LOGFILE"
+RC=$?
+
+# Evaluate output of time command
+ELAPSED=
+RESIDENT=
+SIGNAL=
+if [ ! -z "$TIME" ] ; then
+	while read LINE ; do
+		case "$LINE" in
+		"Command terminated by signal"*) SIGNAL=${LINE##* } ;;
+		"Elapsed"*) ELAPSED=$(elapsed_to_ms ${LINE##* }) ;;
+		"Maximum resident"*) RESIDENT=${LINE##* } ;;
+		"Exit status"*) RC=${LINE##* } ;;
+		esac
+	done < "$TIMEFILE"
+	rm -f "$TIMEFILE"
+fi
+
+t_detail "EXITCODE" "$RC" >>"$LOGFILE"
+
+# Show result
+if [ $RC -eq 0 -a -z "$SIGNAL" ] ; then
+	RESULT="pass"
+	t_pass "$TESTNAME"
+else
+	if [ -z "$SIGNAL" ] ; then
+		RESULT="fail"
+		t_fail "$TESTNAME"
+	else
+		RESULT="kill"
+		t_kill "$TESTNAME"
+	fi
+fi
+
+if [ ! -z "$SIGNAL" ] ; then
+	t_detail "SIGNAL" "$SIGNAL" >>"$LOGFILE"
+fi
+
+if [ ! -z "$ELAPSED" ] ; then
+	echo -n " (time $(($ELAPSED/1000)).$(($ELAPSED%1000/100))s, "
+	echo "elapsed $TESTNAME $ELAPSED" >> "$COUNTFILE"
+fi
+
+if [ ! -z "$RESIDENT" ] ; then
+	echo -n "mem $(($RESIDENT/1024)).$((($RESIDENT%1024)/100))MB)"
+	echo "resident $TESTNAME $RESIDENT" >> "$COUNTFILE"
+fi
+
+echo
+
+# Show log excerpt on failure or if requested
+if [ $RC -ne 0 -o "$V" == "1" ] ; then
+	LEN=$(tail -c "+$POS" "$LOGFILE" | wc -l)
+	if [ "$LEN" -gt "$EXCERPTLEN" -a "$V" != "1" ] ; then
+		tail -c "+$POS" "$LOGFILE" | head -n $EXCERPTLEN | t_indent
+		let LEN=$LEN-$EXCERPTLEN
+		echo "    ..."
+		echo "    Skipping $LEN more lines (see $LOGFILE)"
+	else
+		tail -c "+$POS" "$LOGFILE" | t_indent
+	fi
+fi
+
+# Log more details
+[ ! -z "$ELAPSED" ] && t_detail "TIME" "${ELAPSED}ms" >>"$LOGFILE"
+[ ! -z "$RESIDENT" ] && t_detail "MEM" "${RESIDENT}kB" >>"$LOGFILE"
+t_detail "RESULT" "$RESULT" >> "$LOGFILE"
diff --git a/ThirdParty/lcov/test/bin/test_skip b/ThirdParty/lcov/test/bin/test_skip
new file mode 100755
index 0000000000000000000000000000000000000000..202606f4f9acb5db95410ae06814d81952f14ad3
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/test_skip
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: test_skip <testname> <reason>
+#
+# Announce and record that a single test case was skipped, including an
+# optional reason text. Must be run after testsuite_init.
+#
+
+TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common"
+TESTNAME="$1"
+REASON="${*:2}" ; [ -z "$REASON" ] && REASON="<no reason given>"
+
+t_announce "$TESTNAME"
+t_skip "$TESTNAME"
+echo
+t_detail "REASON" "$REASON" >>"$LOGFILE"
+t_detail "REASON" "$REASON" | t_indent
diff --git a/ThirdParty/lcov/test/bin/testsuite_exit b/ThirdParty/lcov/test/bin/testsuite_exit
new file mode 100755
index 0000000000000000000000000000000000000000..6720df99f20bd924700a127cd903f2de1117ea36
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/testsuite_exit
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: testsuite_exit
+#
+# Announce end of test suite and show aggregate results.
+#
+
+TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common"
+
+echo "end_time $(date +%s.%N)" >>"$COUNTFILE"
+
+SUCCESS=0
+FAILED=0
+SKIPPED=0
+TOTAL_TIME=0
+TOTAL_MEM=0
+HAVE_EXT=0
+
+# Get results
+while read LINE ; do
+	set -- $LINE
+	case "$1" in
+	start_time)	START_TIME=$2 ;;
+	end_time)	END_TIME=$2 ;;
+	pass)		let SUCCESS=$SUCCESS+1 ;;
+	fail)		let FAILED=$FAILED+1 ;;
+	skip)		let SKIPPED=$SKIPPED+1 ;;
+	elapsed)	let TOTAL_TIME=$TOTAL_TIME+$3 ; HAVE_EXT=1 ;;
+	resident)	let TOTAL_MEM=$TOTAL_MEM+$3 ; HAVE_EXT=1 ;;
+	esac
+done < "$COUNTFILE"
+
+exec 3>&1
+exec >>"$LOGFILE" 2>&1
+
+t_marker
+t_detail "DATE" "$(t_timestamp)"
+
+let TOTAL=$SUCCESS+$SKIPPED+$FAILED
+t_detail "EXECUTED" "$TOTAL"
+t_detail "PASSED" "$SUCCESS"
+t_detail "FAILED" "$FAILED"
+t_detail "SKIPPED" "$SKIPPED"
+[ $HAVE_EXT -eq 1 ] && t_detail "TIME" "${TOTAL_TIME}ms"
+[ $HAVE_EXT -eq 1 ] && t_detail "MEM" "${TOTAL_MEM}kB"
+
+TOTAL_TIME=$(($TOTAL_TIME/1000)).$(($TOTAL_TIME%1000/100))
+TOTAL_MEM=$(($TOTAL_MEM/1024)).$((($TOTAL_MEM%1024)/100))
+TOTAL="$BOLD$TOTAL tests executed$RESET"
+PASS="$SUCCESS passed"
+FAIL="$FAILED failed"
+SKIP="$SKIPPED skipped"
+TIME="time ${TOTAL_TIME}s"
+MEM="mem ${TOTAL_MEM}MB"
+
+[ "$SUCCESS" -gt 0 ] && PASS="$GREEN$PASS$DEFAULT"
+[ "$FAILED"  -gt 0 ] && FAIL="$RED$FAIL$DEFAULT"
+[ "$SKIPPED" -gt 0 ] && SKIP="$BLUE$SKIP$DEFAULT"
+
+echo -en "$TOTAL, $PASS, $FAIL, $SKIP$RESET" >&3
+[ $HAVE_EXT -eq 1 ] && echo -n " ($TIME, $MEM)" >&3
+echo >&3
+echo "Result log stored in $LOGFILE" >&3
+
+if [ "$FAILED" -gt 0 ] ; then
+	exit 1
+fi
+
+exit 0
diff --git a/ThirdParty/lcov/test/bin/testsuite_init b/ThirdParty/lcov/test/bin/testsuite_init
new file mode 100755
index 0000000000000000000000000000000000000000..f901e35f13998caf371717a4946c53e1c49aafa2
--- /dev/null
+++ b/ThirdParty/lcov/test/bin/testsuite_init
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: testsuite_init
+#
+# Announce start of test suite and prepare log files.
+#
+
+TOPDIR=$(realpath $(dirname $0)/..) && source "$TOPDIR/bin/common"
+
+echo -e $BOLD"Starting tests"$RESET
+echo "start_time $(date +%s.%N)" >"$COUNTFILE"
+exec >"$LOGFILE" 2>&1
+
+t_detail "DATE" "$(t_timestamp)"
+
+t_detail "LCOV" ""
+lcov --version 2>&1 | t_indent
+
+t_detail "GCOV" ""
+gcov --version 2>&1 | t_indent
+
+t_detail "CPUINFO" ""
+t_indent < /proc/cpuinfo
+
+t_detail "MEMINFO" ""
+t_indent < /proc/meminfo
diff --git a/ThirdParty/lcov/test/common.mak b/ThirdParty/lcov/test/common.mak
new file mode 100644
index 0000000000000000000000000000000000000000..55f31eb99c32453e3a4325c3d7434bce4b0d3c1c
--- /dev/null
+++ b/ThirdParty/lcov/test/common.mak
@@ -0,0 +1,50 @@
+TOPDIR       := $(dir $(realpath $(lastword $(MAKEFILE_LIST))))
+TESTDIR      := $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
+PARENTDIR    := $(dir $(patsubst %/,%,$(TOPDIR)))
+RELDIR       := $(TESTDIR:$(PARENTDIR)%=%)
+ZEROINFO     := $(TOPDIR)zero.info
+ZEROCOUNTS   := $(TOPDIR)zero.counts
+FULLINFO     := $(TOPDIR)full.info
+FULLCOUNTS   := $(TOPDIR)full.counts
+TARGETINFO   := $(TOPDIR)target.info
+TARGETCOUNTS := $(TOPDIR)target.counts
+PART1INFO    := $(TOPDIR)part1.info
+PART1COUNTS  := $(TOPDIR)part1.counts
+PART2INFO    := $(TOPDIR)part2.info
+PART2COUNTS  := $(TOPDIR)part2.counts
+INFOFILES    := $(ZEROINFO) $(FULLINFO) $(TARGETINFO) $(PART1INFO) $(PART2INFO)
+COUNTFILES   := $(ZEROCOUNTS) $(FULLCOUNTS) $(TARGETCOUNTS) $(PART1COUNTS) \
+		$(PART2COUNTS)
+LCOVRC       := $(TOPDIR)lcovrc
+LCOVFLAGS    := --config-file $(LCOVRC)
+SIZE         := small
+CC           := gcc
+
+export LCOV := lcov $(LCOVFLAGS)
+export GENHTML := genhtml $(LCOVFLAGS)
+export PATH := $(TOPDIR)/../bin:$(TOPDIR)/bin:$(PATH)
+export LANG := C
+
+all: prepare init test exit
+
+init:
+	testsuite_init
+
+exit:
+	testsuite_exit
+
+prepare: $(INFOFILES) $(COUNTFILES)
+
+clean: clean_common
+
+clean_common:
+	echo "  CLEAN   $(patsubst %/,%,$(RELDIR))"
+
+$(INFOFILES) $(COUNTFILES):
+	cd $(TOPDIR) && mkinfo profiles/$(SIZE) -o src/
+
+ifneq ($(V),2)
+.SILENT:
+endif
+
+.PHONY: all init exit prepare clean clean_common
diff --git a/ThirdParty/lcov/test/genhtml_output/Makefile b/ThirdParty/lcov/test/genhtml_output/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..0fbd88267074b651e8bb94146136e8ed162dce13
--- /dev/null
+++ b/ThirdParty/lcov/test/genhtml_output/Makefile
@@ -0,0 +1,31 @@
+include ../common.mak
+
+GENHTML_TEST := ./genhtml_test
+
+TESTS := genhtml_output_zero genhtml_output_full genhtml_output_target \
+	 genhtml_output_part1 genhtml_output_part2 genhtml_output_combined
+
+test: $(TESTS)
+
+genhtml_output_zero:
+	@test_run genhtml_output_zero $(GENHTML) $(ZEROINFO) -o out_zero/
+
+genhtml_output_full:
+	@test_run genhtml_output_full $(GENHTML) $(FULLINFO) -o out_full/
+
+genhtml_output_target:
+	@test_run genhtml_output_target $(GENHTML) $(TARGETINFO) -o out_target/
+
+genhtml_output_part1:
+	@test_run genhtml_output_part1 $(GENHTML) $(PART1INFO) -o out_part1/
+
+genhtml_output_part2:
+	@test_run genhtml_output_part2 $(GENHTML) $(PART2INFO) -o out_part2/
+
+genhtml_output_combined: genhtml_output_target
+	@test_run genhtml_output_combined $(GENHTML_TEST) $(TARGETINFO) $(PART1INFO) $(PART2INFO)
+
+clean:
+	rm -rf out_*/
+
+.PHONY: test $(TESTS) clean
diff --git a/ThirdParty/lcov/test/genhtml_output/genhtml_test b/ThirdParty/lcov/test/genhtml_output/genhtml_test
new file mode 100755
index 0000000000000000000000000000000000000000..0b0f834918e5eb92076d98a592aaea028aa0ab86
--- /dev/null
+++ b/ThirdParty/lcov/test/genhtml_output/genhtml_test
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: genhtml_test <ref-file> <file1> [<file2>...]
+#
+# Compare genhtml output of a reference coverage data file with that of
+# a combination of multiple files.
+#
+
+function die()
+{
+	echo "Error: $@" >&2
+	exit 1
+}
+
+GENHTMLFLAGS="-t title"
+REFFILE=$1
+shift
+
+if [ -z "$REFFILE" -o -z "$*" ] ; then
+	echo "Usage: $0 <ref-file> <file1> [<file2>...]" >&2
+	exit 2
+fi
+
+OUTREF="out_$(basename $REFFILE .info)"
+OUTCOMBINED="out_combined"
+
+$GENHTML $GENHTMLFLAGS "$REFFILE" -o "$OUTREF" || \
+	die "Could not generate HTML for reference file"
+
+$GENHTML $GENHTMLFLAGS "$@" -o "$OUTCOMBINED" || \
+	die "Could not generate HTML for combined files"
+
+diff -ur "$OUTREF" "$OUTCOMBINED" -I "headerValue" || \
+	die "Mismatch in generated output"
diff --git a/ThirdParty/lcov/test/lcov_add_files/Makefile b/ThirdParty/lcov/test/lcov_add_files/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..87937a155845f676e6352fe5c161c5f4cb0d31ae
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_add_files/Makefile
@@ -0,0 +1,47 @@
+include ../common.mak
+
+ADDTEST := ./add_test
+
+TESTS := lcov_add_zero lcov_add_zero2 lcov_add_full lcov_add_full2 \
+	 lcov_add_part lcov_add_part2 lcov_add_concatenated4
+
+
+test: $(TESTS)
+
+lcov_add_zero:
+	# Add single zero coverage file - output should be same as input
+	test_run lcov_add_zero $(ADDTEST) 1 "$(ZEROINFO)" "$(ZEROINFO)"
+
+lcov_add_zero2:
+	# Add two zero coverage files - output should be same as input
+	test_run lcov_add_zero2 $(ADDTEST) 1 "$(ZEROINFO)" "$(ZEROINFO)" "$(ZEROINFO)"
+
+lcov_add_full:
+	# Add single 100% coverage file - output should be same as input
+	test_run lcov_add_full $(ADDTEST) 1 "$(FULLINFO)" "$(FULLINFO)"
+
+lcov_add_full2:
+	# Add two 100% coverage file and reduce counts to 1/2 - output should
+	# be same as input
+	test_run lcov_add_full2 $(ADDTEST) 0.5 "$(FULLINFO)" "$(FULLINFO)" "$(FULLINFO)"
+
+lcov_add_part:
+	# Add single coverage file with random coverage rate - output should
+	# be same as input
+	test_run lcov_add_part $(ADDTEST) 1 "$(PART1INFO)" "$(PART1INFO)"
+
+lcov_add_part2:
+	# Add two coverage files that were split from target file - output
+	# should be same as target file
+	test_run lcov_add_part2 $(ADDTEST) 1 "$(TARGETINFO)" "$(PART1INFO)" "$(PART2INFO)"
+
+lcov_add_concatenated4:
+	# Add coverage file that consists of 4 concatenation of target files 
+	# and reduce counts to 1/4 - output should be the same as input
+	cat $(TARGETINFO) $(TARGETINFO) $(TARGETINFO) $(TARGETINFO) >concatenated.info
+	test_run lcov_add_concatenated4 $(ADDTEST) 0.25 $(TARGETINFO) concatenated.info
+
+clean:
+	rm -f *.info
+
+.PHONY: test $(TESTS) clean
diff --git a/ThirdParty/lcov/test/lcov_add_files/add_test b/ThirdParty/lcov/test/lcov_add_files/add_test
new file mode 100755
index 0000000000000000000000000000000000000000..4ff5ffeb6c74270e2acdceceb93556334281a014
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_add_files/add_test
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: add_test <multiplier> <reference_file> <add_file> [<add_file>...]
+#
+# Add multiple coverage data files, normalize the output and multiply counts
+# with multiplier. Compare against reference file. Report deviations.
+#
+
+MULTI=$1
+REFFILE=$2
+shift 2
+
+ADD=
+for INFO in $* ; do
+	ADD="$ADD -a $INFO"
+done
+
+if [ -z "$MULTI" -o -z "$REFFILE" -o -z "$ADD" ] ; then
+	echo "Usage: $0 <multiplier> <reference_file> <add_file> [<add_file>...]" >&2
+	exit 1
+fi
+
+OUTFILE="add_"$(basename "$REFFILE")
+SORTFILE="norm_$OUTFILE"
+
+set -x
+
+echo "Adding files..."
+if ! $LCOV $ADD -o "$OUTFILE" ; then
+	echo "Error: lcov returned with non-zero exit code $?" >&2
+	exit 1
+fi
+
+echo "Normalizing result..."
+if ! norminfo "$OUTFILE" "$MULTI" > "$SORTFILE" ; then
+	echo "Error: Normalization of lcov result file failed" >&2
+	exit 1
+fi
+
+echo "Comparing with reference..."
+if ! diff -u "$REFFILE" "$SORTFILE" ; then
+	echo "Error: Result of combination differs from reference file" >&2
+	exit 1
+fi
diff --git a/ThirdParty/lcov/test/lcov_diff/Makefile b/ThirdParty/lcov/test/lcov_diff/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d2d4dd6621bd5b0c1b858ce811450385625fa8b5
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_diff/Makefile
@@ -0,0 +1,9 @@
+include ../common.mak
+
+test:
+	test_run lcov_diff_apply ./diff_test
+
+clean:
+	make -C old clean
+	make -C new clean
+	rm -f *.info diff
diff --git a/ThirdParty/lcov/test/lcov_diff/diff_test b/ThirdParty/lcov/test/lcov_diff/diff_test
new file mode 100755
index 0000000000000000000000000000000000000000..e0f8c0b3081845a795960514b5f2b33a443bc4b9
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_diff/diff_test
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: diff_test
+#
+# Check lcov's diff function:
+# - Compile two slightly different test programs
+# - Run the programs and collect coverage data
+# - Generate a patch containing the difference between the source code
+# - Apply the patch to the coverage data
+# - Compare the resulting patched coverage data file with the data from the
+#   patched source file
+#
+
+function die()
+{
+	echo "Error: $@" >&2
+	exit 1
+}
+
+make -C old || die "Failed to compile old source"
+make -C new || die "Failed to compile new source"
+diff -u $PWD/old/prog.c $PWD/new/prog.c > diff
+
+$LCOV --diff old/prog.info diff --convert-filenames -o patched.info -t bla || \
+	die "Failed to apply patch to coverage data file"
+norminfo new/prog.info > new_normalized.info
+norminfo patched.info > patched_normalized.info
+sed -i -e 's/^TN:.*$/TN:/' patched_normalized.info
+
+diff -u patched_normalized.info new_normalized.info || \
+	die "Mismatch in patched coverage data file"
+
+echo "Patched coverage data file matches expected file"
diff --git a/ThirdParty/lcov/test/lcov_diff/new/Makefile b/ThirdParty/lcov/test/lcov_diff/new/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..51005c71dd74b21133d48f5c64ee1998e2473d67
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_diff/new/Makefile
@@ -0,0 +1,17 @@
+prog.info:
+
+include ../../common.mak
+
+prog.info: prog.gcda
+	$(LCOV) -c -d . -o prog.info
+
+prog.gcda: prog
+	./prog || true
+
+prog: prog.c
+	$(CC) prog.c -o prog --coverage
+
+clean:
+	rm -f prog prog.gcda prog.gcno prog.info
+
+.PHONY: all clean
diff --git a/ThirdParty/lcov/test/lcov_diff/new/prog.c b/ThirdParty/lcov/test/lcov_diff/new/prog.c
new file mode 100644
index 0000000000000000000000000000000000000000..6f4607cc4fd8d6b537f19eac54080e3a09f506de
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_diff/new/prog.c
@@ -0,0 +1,41 @@
+
+
+
+int fn(int x)
+{
+	switch (x) {
+	case -1: return 0;
+
+
+	case 0:  return 2;
+	case 2:  return 3;
+
+
+	case 12: return 7;
+	default: return 255;
+	}
+
+
+
+}
+
+int fn2()
+{
+
+
+	return 7;
+}
+
+
+
+int main(int argc, char *argv[])
+{
+
+
+	if (argc > 1)
+		return fn(argc);
+
+	return fn2();
+
+
+}
diff --git a/ThirdParty/lcov/test/lcov_diff/old/Makefile b/ThirdParty/lcov/test/lcov_diff/old/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..51005c71dd74b21133d48f5c64ee1998e2473d67
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_diff/old/Makefile
@@ -0,0 +1,17 @@
+prog.info:
+
+include ../../common.mak
+
+prog.info: prog.gcda
+	$(LCOV) -c -d . -o prog.info
+
+prog.gcda: prog
+	./prog || true
+
+prog: prog.c
+	$(CC) prog.c -o prog --coverage
+
+clean:
+	rm -f prog prog.gcda prog.gcno prog.info
+
+.PHONY: all clean
diff --git a/ThirdParty/lcov/test/lcov_diff/old/prog.c b/ThirdParty/lcov/test/lcov_diff/old/prog.c
new file mode 100644
index 0000000000000000000000000000000000000000..a4eda2555769efe16e0d7bba542fc3dfb31d28b5
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_diff/old/prog.c
@@ -0,0 +1,22 @@
+int fn(int x)
+{
+	switch (x) {
+	case -1: return 0;
+	case 0:  return 2;
+	case 2:  return 3;
+	case 12: return 7;
+	default: return 255;
+	}
+}
+
+int fn2()
+{
+	return 7;
+}
+
+int main(int argc, char *argv[])
+{
+	if (argc > 1)
+		return fn(argc);
+	return fn2();
+}
diff --git a/ThirdParty/lcov/test/lcov_misc/Makefile b/ThirdParty/lcov/test/lcov_misc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d3bcc4ab28582cd7a34a0fdc1a54aceaecf08ca0
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_misc/Makefile
@@ -0,0 +1,5 @@
+include ../common.mak
+
+test:
+	@test_run lcov_version lcov --version
+	@test_run lcov_help lcov --help
diff --git a/ThirdParty/lcov/test/lcov_summary/Makefile b/ThirdParty/lcov/test/lcov_summary/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..f48d0bc7667d759e2cf04597095ad012626ff647
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_summary/Makefile
@@ -0,0 +1,41 @@
+include ../common.mak
+
+CHECK := ./check_counts
+TESTS := lcov_summary_zero lcov_summary_full lcov_summary_target \
+	 lcov_summary_part1 lcov_summary_part2 lcov_summary_concatenated \
+	 lcov_summary_concatenated2
+
+test: $(TESTS)
+
+lcov_summary_zero:
+	# Compare output of lcov --summary with generated counts
+	test_run lcov_summary_zero $(CHECK) $(ZEROCOUNTS) $(ZEROINFO)
+lcov_summary_full:
+	# Compare output of lcov --summary with generated counts
+	test_run lcov_summary_full $(CHECK) $(FULLCOUNTS) $(FULLINFO)
+lcov_summary_target:
+	# Compare output of lcov --summary with generated counts
+	test_run lcov_summary_target $(CHECK) $(TARGETCOUNTS) $(TARGETINFO)
+lcov_summary_part1:
+	# Compare output of lcov --summary with generated counts
+	test_run lcov_summary_part1 $(CHECK) $(PART1COUNTS) $(PART1INFO)
+lcov_summary_part2:
+	# Compare output of lcov --summary with generated counts
+	test_run lcov_summary_part2 $(CHECK) $(PART2COUNTS) $(PART2INFO)
+lcov_summary_concatenated:
+	# Compare output of lcov --summary with generated counts for a
+	# concatenated coverage data file
+	cat $(TARGETINFO) $(TARGETINFO) > concatenated.info
+	test_run lcov_summary_concatenated $(CHECK) $(TARGETCOUNTS) concatenated.info
+lcov_summary_concatenated2:
+	# Compare output of lcov --summary with generated counts for a
+	# concatenated coverage data file (part1+part2=target)
+	cat $(PART1INFO) $(PART2INFO) > concatenated2.info
+	test_run lcov_summary_concatenated2 $(CHECK) $(TARGETCOUNTS) concatenated2.info
+
+
+
+clean:
+	rm -f *.info
+
+.PHONY: test $(TESTS) clean
diff --git a/ThirdParty/lcov/test/lcov_summary/check_counts b/ThirdParty/lcov/test/lcov_summary/check_counts
new file mode 100755
index 0000000000000000000000000000000000000000..32d454230ccc4682f532524bce4638e9dfe0be49
--- /dev/null
+++ b/ThirdParty/lcov/test/lcov_summary/check_counts
@@ -0,0 +1,70 @@
+#!/usr/bin/env perl
+#
+# Copyright IBM Corp. 2017
+#
+# Usage: check_counts <counts_file> <coverage_data_file>
+#
+# Compare the output of "lcov --summary" for <coverage_data_file> with the
+# coverage data counts specified in <counts_file>. This file has the following
+# format (all in a single line):
+#
+#   lnhit lnfound fnhit fnfound brhit brfound2
+#
+
+use strict;
+use warnings;
+
+sub do_cmp($$$)
+{
+	my ($title, $a, $b) = @_;
+
+	if ($a == $b) {
+		print("$title: $a == $b\n");
+		return 0;
+	} else {
+		print("$title: $a != $b => mismatch!\n");
+		return 1;
+	}
+}
+
+my $lcov = $ENV{"LCOV"};
+my ($counts, $info) = @ARGV;
+my $fd;
+my $cmdline;
+my ($lnhit, $lnfound, $fnhit, $fnfound, $brhit, $brfound) = (0, 0, 0, 0, 0, 0);
+my ($lnhit2, $lnfound2, $fnhit2, $fnfound2, $brhit2, $brfound2);
+my $rc = 0;
+
+die("$0: LCOV environment variable not defined\n") if (!defined($lcov));
+if (!defined($counts) || !defined($info)) {
+	die("Usage: $0 <counts_file> <coverage_data_file>\n");
+}
+
+$cmdline = "$lcov --summary $info";
+open($fd, "-|", $cmdline) or die("$0: Could not run $cmdline: $!\n");
+while (<$fd>) {
+	($lnhit, $lnfound) = ($1, $2) if (/(\d+) of (\d+) lines/);
+	($fnhit, $fnfound) = ($1, $2) if (/(\d+) of (\d+) functions/);
+	($brhit, $brfound) = ($1, $2) if (/(\d+) of (\d+) branches/);
+}
+close($fd);
+
+die("$0: Non-zero result code ($?) of command: $cmdline\n") if ($? != 0);
+
+open($fd, "<", $counts) or die("$0: Could not open $counts: $!\n");
+if (<$fd> !~ /^(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)/) {
+	die("$0: Invalid count file: $counts\n");
+}
+($lnhit2, $lnfound2, $fnhit2, $fnfound2, $brhit2, $brfound2) =
+	($1, $2, $3, $4, $5, $6);
+close($fd);
+
+print("Comparing --summary output for $info and $counts:\n");
+$rc |= do_cmp("line hit", $lnhit, $lnhit2);
+$rc |= do_cmp("line found", $lnfound, $lnfound2);
+$rc |= do_cmp("functions hit", $fnhit, $fnhit2);
+$rc |= do_cmp("functions found", $fnfound, $fnfound2);
+$rc |= do_cmp("branches hit", $brhit, $brhit2);
+$rc |= do_cmp("branches found", $brfound, $brfound2);
+
+exit($rc);
diff --git a/ThirdParty/lcov/test/lcovrc b/ThirdParty/lcov/test/lcovrc
new file mode 100644
index 0000000000000000000000000000000000000000..5005f637d5f7f2d434b54ab88067152fb12f208f
--- /dev/null
+++ b/ThirdParty/lcov/test/lcovrc
@@ -0,0 +1,4 @@
+# lcovrc file used during tests
+
+lcov_function_coverage = 1
+lcov_branch_coverage = 1
diff --git a/ThirdParty/lcov/test/profiles/large b/ThirdParty/lcov/test/profiles/large
new file mode 100644
index 0000000000000000000000000000000000000000..31788b040933f33b044a6f7591a83fa310d910bb
--- /dev/null
+++ b/ThirdParty/lcov/test/profiles/large
@@ -0,0 +1,51 @@
+# Profile of a large source code project. Use with mkinfo to generate fake test
+# source code and coverage data.
+
+[tests]
+# List of test names
+names	= test1 test2
+
+[files]
+# Create this many files
+numfiles = 500
+# Generate paths from these components (top/sub/subsub/prefix_suffix.ext)
+top	= lib tools test bin img scripts
+sub	= build debug release include target sys config
+subsub	= work www utils gui info log basic
+prefix	= main misc report tune mem list 
+suffix	= a b c top work proto final fast
+ext	= .c .h
+
+[lines]
+# Generate line coverage data
+enabled	= 1
+# Line coverage rate
+covered	= 80
+# Percentage of lines covered
+instrumented = 80
+# Maximum number of lines per file
+maxlines = 2000
+
+[functions]
+# Generate function coverage data
+enabled	= 1
+# Function coverage rate
+covered	= 60
+# Percent of instrumented lines containing function definitions
+perinstrumented = 10
+# Generate function names from these components (verb_adj_noun)
+verb	= get set find read write stat add sub combine
+adj	= first last best min max avg
+noun	= bit byte file str num obj data
+
+[branches]
+# Generate branch coverage data
+enabled	= 1
+# Branch coverage rate
+covered = 20
+# Percent of instrumented lines containing branches
+perinstrumented = 5
+# List of blocks to use
+blocks	= 0 4294967295
+# Distribution of number of branches per block (num:probability)
+branchdist = 2:50 3:25 5:20 100:5
diff --git a/ThirdParty/lcov/test/profiles/medium b/ThirdParty/lcov/test/profiles/medium
new file mode 100644
index 0000000000000000000000000000000000000000..56598e868271a98498b88f7361f9b5b5a109f4d4
--- /dev/null
+++ b/ThirdParty/lcov/test/profiles/medium
@@ -0,0 +1,51 @@
+# Profile of a medium-sized source code project. Use with mkinfo to generate
+# fake test source code and coverage data.
+
+[tests]
+# List of test names
+names	= test1 test2 test3
+
+[files]
+# Create this many files
+numfiles = 50
+# Generate paths from these components (top/sub/subsub/prefix_suffix.ext)
+top	= lib tools test bin img scripts
+sub	= build debug release include target sys config
+subsub	= work www utils gui info log basic
+prefix	= main misc report tune mem list 
+suffix	= a b c top work proto final fast
+ext	= .c .h
+
+[lines]
+# Generate line coverage data
+enabled	= 1
+# Line coverage rate
+covered	= 80
+# Percentage of lines covered
+instrumented = 50
+# Maximum number of lines per file
+maxlines = 1000
+
+[functions]
+# Generate function coverage data
+enabled	= 1
+# Function coverage rate
+covered	= 60
+# Percent of instrumented lines containing function definitions
+perinstrumented = 5
+# Generate function names from these components (verb_adj_noun)
+verb	= get set find read write stat add sub combine
+adj	= first last best min max avg
+noun	= bit byte file str num obj data
+
+[branches]
+# Generate branch coverage data
+enabled	= 1
+# Branch coverage rate
+covered = 20
+# Percent of instrumented lines containing branches
+perinstrumented = 50
+# List of blocks to use
+blocks	= 0 4294967295
+# Distribution of number of branches per block (num:probability)
+branchdist = 2:50 3:50
diff --git a/ThirdParty/lcov/test/profiles/small b/ThirdParty/lcov/test/profiles/small
new file mode 100644
index 0000000000000000000000000000000000000000..388d2a3bb55926031ed85164fa2250bd096191c3
--- /dev/null
+++ b/ThirdParty/lcov/test/profiles/small
@@ -0,0 +1,51 @@
+# Profile of a small source code project. Use with mkinfo to generate fake test
+# source code and coverage data.
+
+[tests]
+# List of test names
+names	= test1 test2
+
+[files]
+# Create this many files
+numfiles = 5
+# Generate paths from these components (top/sub/subsub/prefix_suffix.ext)
+top	= lib tools test bin img scripts
+sub	= build debug release include target sys config
+subsub	= work www utils gui info log basic
+prefix	= main misc report tune mem list 
+suffix	= a b c top work proto final fast
+ext	= .c .h
+
+[lines]
+# Generate line coverage data
+enabled	= 1
+# Line coverage rate
+covered	= 80
+# Percentage of lines covered
+instrumented = 50
+# Maximum number of lines per file
+maxlines = 500
+
+[functions]
+# Generate function coverage data
+enabled	= 1
+# Function coverage rate
+covered	= 60
+# Percent of instrumented lines containing function definitions
+perinstrumented = 5
+# Generate function names from these components (verb_adj_noun)
+verb	= get set find read write stat add sub combine
+adj	= first last best min max avg
+noun	= bit byte file str num obj data
+
+[branches]
+# Generate branch coverage data
+enabled	= 1
+# Branch coverage rate
+covered = 20
+# Percent of instrumented lines containing branches
+perinstrumented = 50
+# List of blocks to use
+blocks	= 0 4294967295
+# Distribution of number of branches per block (num:probability)
+branchdist = 2:50 3:45 50:5
diff --git a/Tools/coverage.sh b/Tools/coverage.sh
index 2e2223c4efa85f43625cb413830d934a276cc7dd..75feeffcaf95b59379b03723bccca9785497ad72 100755
--- a/Tools/coverage.sh
+++ b/Tools/coverage.sh
@@ -4,23 +4,21 @@ if [ -z $GCOV ]; then
   GCOV=gcov
 fi
 
-LCOV_VERSION="1.13"
-LCOV_DIR="lcov-${LCOV_VERSION}"
-
-if [ ! -e $LCOV_DIR ]; then
-  curl -L https://github.com/linux-test-project/lcov/releases/download/v${LCOV_VERSION}/lcov-${LCOV_VERSION}.tar.gz | tar zxf -
-fi
+PROJECT_DIR="$(dirname $0)/../"
+LCOV_DIR="${PROJECT_DIR}/ThirdParty/lcov"
 
 # LCOV="$LCOV_EXE --gcov-tool=${GCOV} --rc lcov_branch_coverage=1"
 LCOV="${LCOV_DIR}/bin/lcov --gcov-tool=${GCOV}" # no branch coverage
 
 # collect raw data
-$LCOV --base-directory `pwd` \
-  --directory `pwd` \
-  --capture --output-file coverage.info
+if [ ! -e coverage.info ]; then
+  $LCOV --directory `pwd` \
+    --capture --output-file coverage.info
+fi
 
 # remove uninteresting entries
-$LCOV --extract coverage.info "*/corsika/*" --output-file coverage.info
+$LCOV --remove coverage.info "*/usr/*" --output-file coverage2.info
+$LCOV --remove coverage2.info "*/ThirdParty/*" --output-file coverage3.info
 
 # if [ $CI ]; then
 #   # upload if on CI
@@ -30,4 +28,4 @@ $LCOV --extract coverage.info "*/corsika/*" --output-file coverage.info
 #   $LCOV_DIR/bin/genhtml coverage.info -o coverage-report
 # fi
 
-$LCOV_DIR/bin/genhtml coverage.info -o coverage-report
+$LCOV_DIR/bin/genhtml coverage3.info -o coverage-report