#!/usr/bin/perl -w # # Copyright (c) International Business Machines Corp., 2002 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # # genhtml # # This script generates HTML output from .info files as created by the # geninfo script. Call it with --help and refer to the genhtml man page # to get information on usage and available options. # # # History: # 2002-08-23 created by Peter Oberparleiter # IBM Lab Boeblingen # based on code by Manoj Iyer and # Megan Bock # IBM Austin # 2002-08-27 / Peter Oberparleiter: implemented frame view # 2002-08-29 / Peter Oberparleiter: implemented test description filtering # so that by default only descriptions for test cases which # actually hit some source lines are kept # 2002-09-05 / Peter Oberparleiter: implemented --no-sourceview # 2002-09-05 / Mike Kobler: One of my source file paths includes a "+" in # the directory name. I found that genhtml.pl died when it # encountered it. I was able to fix the problem by modifying # the string with the escape character before parsing it. # 2002-10-26 / Peter Oberparleiter: implemented --num-spaces # 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error # when trying to combine .info files containing data without # a test name # 2003-04-10 / Peter Oberparleiter: extended fix by Mike to also cover # other special characters # 2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT # 2003-07-10 / Peter Oberparleiter: added line checksum support # 2004-08-09 / Peter Oberparleiter: added configuration file support # use strict; use File::Basename; use Getopt::Long; use Digest::MD5 qw(md5_base64); # Global constants our $title = "LTP GCOV extension - code coverage report"; our $lcov_version = "LTP GCOV extension version 1.4"; our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php"; # Specify coverage rate limits (in %) for classifying file entries # HI: $hi_limit <= rate <= 100 graph color: green # MED: $med_limit <= rate < $hi_limit graph color: orange # LO: 0 <= rate < $med_limit graph color: red our $hi_limit = 50; our $med_limit = 15; # Width of overview image our $overview_width = 80; # Resolution of overview navigation: this number specifies the maximum # difference in lines between the position a user selected from the overview # and the position the source code window is scrolled to. our $nav_resolution = 4; # Clicking a line in the overview image should show the source code view at # a position a bit further up so that the requested line is not the first # line in the window. This number specifies that offset in lines. our $nav_offset = 10; our $overview_title = "directory"; # Data related prototypes sub print_usage(*); sub gen_html(); sub process_dir($); sub process_file($$$); sub info(@); sub read_info_file($); sub get_info_entry($); sub set_info_entry($$$$$;$$); sub get_prefix(@); sub shorten_prefix($); sub get_dir_list(@); sub get_relative_base_path($); sub read_testfile($); sub get_date_string(); sub split_filename($); sub create_sub_dir($); sub subtract_counts($$); sub add_counts($$); sub apply_baseline($$); sub remove_unused_descriptions(); sub get_found_and_hit($); sub get_affecting_tests($); sub combine_info_files($$); sub merge_checksums($$$); sub combine_info_entries($$$); sub apply_prefix($$); sub system_no_output($@); sub read_config($); sub apply_config($); # HTML related prototypes sub escape_html($); sub get_bar_graph_code($$$); sub write_png_files(); sub write_css_file(); sub write_description_file($$$); sub write_html(*$); sub write_html_prolog(*$$); sub write_html_epilog(*$;$); sub write_header(*$$$$$); sub write_header_prolog(*$); sub write_header_line(*$$;$$); sub write_header_epilog(*$); sub write_file_table(*$$$$); sub write_file_table_prolog(*$$); sub write_file_table_entry(*$$$$); sub write_file_table_detail_heading(*$$); sub write_file_table_detail_entry(*$$$); sub write_file_table_epilog(*); sub write_test_table_prolog(*$); sub write_test_table_entry(*$$); sub write_test_table_epilog(*); sub write_source($$$$$); sub write_source_prolog(*); sub write_source_line(*$$$$); sub write_source_epilog(*); sub write_frameset(*$$$); sub write_overview_line(*$$$); sub write_overview(*$$$$); # External prototype (defined in genpng) sub gen_png($$$@); # Global variables & initialization our %info_data; # Hash containing all data from .info file our $dir_prefix; # Prefix to remove from all sub directories our %test_description; # Hash containing test descriptions if available our $date = get_date_string(); our @info_filenames; # List of .info files to use as data source our $test_title; # Title for output as written to each page header our $output_directory; # Name of directory in which to store output our $base_filename; # Optional name of file containing baseline data our $desc_filename; # Name of file containing test descriptions our $css_filename; # Optional name of external stylesheet file to use our $quiet; # If set, suppress information messages our $help; # Help option flag our $version; # Version option flag our $show_details; # If set, generate detailed directory view our $no_prefix; # If set, do not remove filename prefix our $frames; # If set, use frames for source code view our $keep_descriptions; # If set, do not remove unused test case descriptions our $no_sourceview; # If set, do not create a source code view for each file our $highlight; # If set, highlight lines covered by converted data only our $tab_size = 8; # Number of spaces to use in place of tab our $config; # Configuration file contents our $cwd = `pwd`; # Current working directory chomp($cwd); our $tool_dir = dirname($0); # Directory where genhtml tool is installed # # Code entry point # # Add current working directory if $tool_dir is not already an absolute path if (! ($tool_dir =~ /^\/(.*)$/)) { $tool_dir = "$cwd/$tool_dir"; } # Read configuration file if available if (-r $ENV{"HOME"}."/.lcovrc") { $config = read_config($ENV{"HOME"}."/.lcovrc"); } elsif (-r "/etc/lcovrc") { $config = read_config("/etc/lcovrc"); } if ($config) { # Copy configuration file values to variables apply_config({ "genhtml_css_file" => \$css_filename, "genhtml_hi_limit" => \$hi_limit, "genhtml_med_limit" => \$med_limit, "genhtml_overview_width" => \$overview_width, "genhtml_nav_resolution" => \$nav_resolution, "genhtml_nav_offset" => \$nav_offset, "genhtml_keep_descriptions" => \$keep_descriptions, "genhtml_no_prefix" => \$no_prefix, "genhtml_no_source" => \$no_sourceview, "genhtml_num_spaces" => \$tab_size, "genhtml_highlight" => \$highlight}); } # Parse command line options if (!GetOptions("output-directory=s" => \$output_directory, "title=s" => \$test_title, "description-file=s" => \$desc_filename, "keep-descriptions" => \$keep_descriptions, "css-file=s" => \$css_filename, "baseline-file=s" => \$base_filename, "prefix=s" => \$dir_prefix, "num-spaces=i" => \$tab_size, "no-prefix" => \$no_prefix, "no-sourceview" => \$no_sourceview, "show-details" => \$show_details, "frames" => \$frames, "highlight" => \$highlight, "quiet" => \$quiet, "help|h" => \$help, "version" => \$version )) { print_usage(*STDERR); exit(1); } @info_filenames = @ARGV; # Check for help option if ($help) { print_usage(*STDOUT); exit(0); } # Check for version option if ($version) { print($lcov_version."\n"); exit(0); } # Check for info filename if (!@info_filenames) { print(STDERR "No filename specified\n"); print_usage(*STDERR); exit(1); } # Generate a title if none is specified if (!$test_title) { if (scalar(@info_filenames) == 1) { # Only one filename specified, use it as title $test_title = basename($info_filenames[0]); } else { # More than one filename specified, used default title $test_title = "unnamed"; } } # Make sure css_filename is an absolute path (in case we're changing # directories) if ($css_filename) { if (!($css_filename =~ /^\/(.*)$/)) { $css_filename = $cwd."/".$css_filename; } } # Make sure tab_size is within valid range if ($tab_size < 1) { print(STDERR "ERROR: invalid number of spaces specified: ". "$tab_size!\n"); exit(1); } # Issue a warning if --no-sourceview is enabled together with --frames if ($no_sourceview && $frames) { warn("WARNING: option --frames disabled because --no-sourceview ". "was specified!\n"); $frames = undef; } if ($frames) { # Include genpng code needed for overview image generation do("$tool_dir/genpng"); } # Make sure output_directory exists, create it if necessary if ($output_directory) { stat($output_directory); if (! -e _) { system("mkdir", "-p", $output_directory) and die("ERROR: cannot create directory $_!\n"); } } # Do something gen_html(); exit(0); # # print_usage(handle) # # Print usage information. # sub print_usage(*) { local *HANDLE = $_[0]; my $executable_name = basename($0); print(HANDLE <index.html") or die("ERROR: cannot open index.html for writing!\n"); write_html_prolog(*HTML_HANDLE, "", "LCOV - $test_title"); write_header(*HTML_HANDLE, 0, "", "", $overall_found, $overall_hit); write_file_table(*HTML_HANDLE, "", \%overview, {}, 0); write_html_epilog(*HTML_HANDLE, ""); close(*HTML_HANDLE); # Check if there are any test case descriptions to write out if (%test_description) { info("Writing test case description file.\n"); write_description_file( \%test_description, $overall_found, $overall_hit); } if ($overall_found == 0) { info("Warning: No lines found!\n"); } else { info("Overall coverage rate: %d of %d lines (%.1f%%)\n", $overall_hit, $overall_found, $overall_hit*100/$overall_found); } chdir($cwd); } # # process_dir(dir_name) # sub process_dir($) { my $abs_dir = $_[0]; my $trunc_dir; my $rel_dir = $abs_dir; my $base_dir; my $filename; my %overview; my $lines_found; my $lines_hit; my $overall_found=0; my $overall_hit=0; my $base_name; my $extension; my $testdata; my %testhash; local *HTML_HANDLE; # Remove prefix if applicable if (!$no_prefix) { # Match directory name beginning with $dir_prefix $rel_dir = apply_prefix($rel_dir, $dir_prefix); } $trunc_dir = $rel_dir; # Remove leading / if ($rel_dir =~ /^\/(.*)$/) { $rel_dir = substr($rel_dir, 1); } $base_dir = get_relative_base_path($rel_dir); create_sub_dir($rel_dir); # Match filenames which specify files in this directory, not including # sub-directories foreach $filename (grep(/^\Q$abs_dir\E\/[^\/]*$/,keys(%info_data))) { ($lines_found, $lines_hit, $testdata) = process_file($trunc_dir, $rel_dir, $filename); $base_name = basename($filename); if ($no_sourceview) { # User asked as not to create source code view, do not # provide a page link $overview{$base_name} = "$lines_found,$lines_hit"; } elsif ($frames) { # Link to frameset page $overview{$base_name} = "$lines_found,$lines_hit,". "$base_name.gcov.frameset.html"; } else { # Link directory to source code view page $overview{$base_name} = "$lines_found,$lines_hit,". "$base_name.gcov.html"; } $testhash{$base_name} = $testdata; $overall_found += $lines_found; $overall_hit += $lines_hit; } # Generate directory overview page (without details) open(*HTML_HANDLE, ">$rel_dir/index.html") or die("ERROR: cannot open $rel_dir/index.html ". "for writing!\n"); write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $test_title - $trunc_dir"); write_header(*HTML_HANDLE, 1, $trunc_dir, $rel_dir, $overall_found, $overall_hit); write_file_table(*HTML_HANDLE, $base_dir, \%overview, {}, 1); write_html_epilog(*HTML_HANDLE, $base_dir); close(*HTML_HANDLE); if ($show_details) { # Generate directory overview page including details open(*HTML_HANDLE, ">$rel_dir/index-detail.html") or die("ERROR: cannot open $rel_dir/". "index-detail.html for writing!\n"); write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $test_title - $trunc_dir"); write_header(*HTML_HANDLE, 1, $trunc_dir, $rel_dir, $overall_found, $overall_hit); write_file_table(*HTML_HANDLE, $base_dir, \%overview, \%testhash, 1); write_html_epilog(*HTML_HANDLE, $base_dir); close(*HTML_HANDLE); } # Calculate resulting line counts return ($overall_found, $overall_hit); } # # get_converted_lines(testdata) # # Return hash of line numbers of those lines which were only covered in # converted data sets. # sub get_converted_lines($) { my $testdata = $_[0]; my $testcount; my %converted; my %nonconverted; my $hash; my $testcase; my $line; my %result; # Get a hash containing line numbers with positive counts both for # converted and original data sets foreach $testcase (keys(%{$testdata})) { # Check to see if this is a converted data set if ($testcase =~ /,diff$/) { $hash = \%converted; } else { $hash = \%nonconverted; } $testcount = $testdata->{$testcase}; # Add lines with a positive count to hash foreach $line (keys%{$testcount}) { if ($testcount->{$line} > 0) { $hash->{$line} = 1; } } } # Combine both hashes to resulting list foreach $line (keys(%converted)) { if (!defined($nonconverted{$line})) { $result{$line} = 1; } } return \%result; } # # process_file(trunc_dir, rel_dir, filename) # sub process_file($$$) { info("Processing file ".apply_prefix($_[2], $dir_prefix)."\n"); my $trunc_dir = $_[0]; my $rel_dir = $_[1]; my $filename = $_[2]; my $base_name = basename($filename); my $base_dir = get_relative_base_path($rel_dir); my $testdata; my $testcount; my $sumcount; my $funcdata; my $checkdata; my $lines_found; my $lines_hit; my $converted; my @source; my $pagetitle; local *HTML_HANDLE; ($testdata, $sumcount, $funcdata, $checkdata, $lines_found, $lines_hit) = get_info_entry($info_data{$filename}); # Return after this point in case user asked us not to generate # source code view if ($no_sourceview) { return ($lines_found, $lines_hit, $testdata); } $converted = get_converted_lines($testdata); # Generate source code view for this file open(*HTML_HANDLE, ">$rel_dir/$base_name.gcov.html") or die("ERROR: cannot open $rel_dir/$base_name.gcov.html ". "for writing!\n"); $pagetitle = "LCOV - $test_title - $trunc_dir/$base_name"; write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle); write_header(*HTML_HANDLE, 2, "$trunc_dir/$base_name", "$rel_dir/$base_name", $lines_found, $lines_hit); @source = write_source(*HTML_HANDLE, $filename, $sumcount, $checkdata, $converted); write_html_epilog(*HTML_HANDLE, $base_dir, 1); close(*HTML_HANDLE); # Additional files are needed in case of frame output if (!$frames) { return ($lines_found, $lines_hit, $testdata); } # Create overview png file gen_png("$rel_dir/$base_name.gcov.png", $overview_width, $tab_size, @source); # Create frameset page open(*HTML_HANDLE, ">$rel_dir/$base_name.gcov.frameset.html") or die("ERROR: cannot open ". "$rel_dir/$base_name.gcov.frameset.html". " for writing!\n"); write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle); close(*HTML_HANDLE); # Write overview frame open(*HTML_HANDLE, ">$rel_dir/$base_name.gcov.overview.html") or die("ERROR: cannot open ". "$rel_dir/$base_name.gcov.overview.html". " for writing!\n"); write_overview(*HTML_HANDLE, $base_dir, $base_name, $pagetitle, scalar(@source)); close(*HTML_HANDLE); return ($lines_found, $lines_hit, $testdata); } # # read_info_file(info_filename) # # Read in the contents of the .info file specified by INFO_FILENAME. Data will # be returned as a reference to a hash containing the following mappings: # # %result: for each filename found in file -> \%data # # %data: "test" -> \%testdata # "sum" -> \%sumcount # "func" -> \%funcdata # "found" -> $lines_found (number of instrumented lines found in file) # "hit" -> $lines_hit (number of executed lines in file) # "check" -> \%checkdata # # %testdata: name of test affecting this file -> \%testcount # # %testcount: line number -> execution count for a single test # %sumcount : line number -> execution count for all tests # %funcdata : line number -> name of function beginning at that line # %checkdata: line number -> checksum of source code line # # Note that .info file sections referring to the same file and test name # will automatically be combined by adding all execution counts. # # Note that if INFO_FILENAME ends with ".gz", it is assumed that the file # is compressed using GZIP. If available, GUNZIP will be used to decompress # this file. # # Die on error. # sub read_info_file($) { my $tracefile = $_[0]; # Name of tracefile my %result; # Resulting hash: file -> data my $data; # Data handle for current entry my $testdata; # " " my $testcount; # " " my $sumcount; # " " my $funcdata; # " " my $checkdata; # " " my $line; # Current line read from .info file my $testname; # Current test name my $filename; # Current filename my $hitcount; # Count for lines hit my $count; # Execution count of current line my $negative; # If set, warn about negative counts my $checksum; # Checksum of current line local *INFO_HANDLE; # Filehandle for .info file # Check if file exists and is readable stat($_[0]); if (!(-r _)) { die("ERROR: cannot read file $_[0]!\n"); } # Check if this is really a plain file if (!(-f _)) { die("ERROR: not a plain file: $_[0]!\n"); } # Check for .gz extension if ($_[0] =~ /\.gz$/) { # Check for availability of GZIP tool system_no_output(1, "gunzip", "-h") and die("ERROR: gunzip command not available!\n"); # Check integrity of compressed file system_no_output(1, "gunzip", "-t", $_[0]) and die("ERROR: integrity check failed for ". "compressed file $_[0]!\n"); # Open compressed file open(INFO_HANDLE, "gunzip -c $_[0]|") or die("ERROR: cannot start gunzip to uncompress ". "file $_[0]!\n"); } else { # Open uncompressed file open(INFO_HANDLE, $_[0]) or die("ERROR: cannot read file $_[0]!\n"); } $testname = ""; while () { chomp($_); $line = $_; # Switch statement foreach ($line) { /^TN:(\w*(,\w+)?)/ && do { # Test name information found $testname = defined($1) ? $1 : ""; last; }; /^[SK]F:(.*)/ && do { # Filename information found # Retrieve data for new entry $filename = $1; $data = $result{$filename}; ($testdata, $sumcount, $funcdata, $checkdata) = get_info_entry($data); if (defined($testname)) { $testcount = $testdata->{$testname}; } else { my %new_hash; $testcount = \%new_hash; } last; }; /^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do { # Fix negative counts $count = $2 < 0 ? 0 : $2; if ($2 < 0) { $negative = 1; } # Execution count found, add to structure # Add summary counts $sumcount->{$1} += $count; # Add test-specific counts if (defined($testname)) { $testcount->{$1} += $count; } # Store line checksum if available if (defined($3)) { $checksum = substr($3, 1); # Does it match a previous definition if (defined($checkdata->{$1}) && ($checkdata->{$1} ne $checksum)) { die("ERROR: checksum mismatch ". "at $filename:$1\n"); } $checkdata->{$1} = $checksum; } last; }; /^FN:(\d+),([^,]+)/ && do { # Function data found, add to structure $funcdata->{$1} = $2; last; }; /^end_of_record/ && do { # Found end of section marker if ($filename) { # Store current section data if (defined($testname)) { $testdata->{$testname} = $testcount; } set_info_entry($data, $testdata, $sumcount, $funcdata, $checkdata); $result{$filename} = $data; } }; # default last; } } close(INFO_HANDLE); # Calculate lines_found and lines_hit for each file foreach $filename (keys(%result)) { $data = $result{$filename}; ($testdata, $sumcount, $funcdata) = get_info_entry($data); $data->{"found"} = scalar(keys(%{$sumcount})); $hitcount = 0; foreach (keys(%{$sumcount})) { if ($sumcount->{$_} > 0) { $hitcount++; } } $data->{"hit"} = $hitcount; $result{$filename} = $data; } if (scalar(keys(%result)) == 0) { die("ERROR: no valid records found in tracefile $tracefile\n"); } if ($negative) { warn("WARNING: negative counts found in tracefile ". "$tracefile\n"); } return(\%result); } # # get_info_entry(hash_ref) # # Retrieve data from an entry of the structure generated by read_info_file(). # Return a list of references to hashes: # (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash # ref, lines found, lines hit) # sub get_info_entry($) { my $testdata_ref = $_[0]->{"test"}; my $sumcount_ref = $_[0]->{"sum"}; my $funcdata_ref = $_[0]->{"func"}; my $checkdata_ref = $_[0]->{"check"}; my $lines_found = $_[0]->{"found"}; my $lines_hit = $_[0]->{"hit"}; return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref, $lines_found, $lines_hit); } # # set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref, # checkdata_ref[,lines_found, lines_hit]) # # Update the hash referenced by HASH_REF with the provided data references. # sub set_info_entry($$$$$;$$) { my $data_ref = $_[0]; $data_ref->{"test"} = $_[1]; $data_ref->{"sum"} = $_[2]; $data_ref->{"func"} = $_[3]; $data_ref->{"check"} = $_[4]; if (defined($_[5])) { $data_ref->{"found"} = $_[5]; } if (defined($_[6])) { $data_ref->{"hit"} = $_[6]; } } # # get_prefix(filename_list) # # Search FILENAME_LIST for a directory prefix which is common to as many # list entries as possible, so that removing this prefix will minimize the # sum of the lengths of all resulting shortened filenames. # sub get_prefix(@) { my @filename_list = @_; # provided list of filenames my %prefix; # mapping: prefix -> sum of lengths my $current; # Temporary iteration variable # Find list of prefixes foreach (@filename_list) { # Need explicit assignment to get a copy of $_ so that # shortening the contained prefix does not affect the list $current = shorten_prefix($_); while ($current = shorten_prefix($current)) { # Skip rest if the remaining prefix has already been # added to hash if ($prefix{$current}) { last; } # Initialize with 0 $prefix{$current}="0"; } } # Calculate sum of lengths for all prefixes foreach $current (keys(%prefix)) { foreach (@filename_list) { # Add original length $prefix{$current} += length($_); # Check whether prefix matches if (substr($_, 0, length($current)) eq $current) { # Subtract prefix length for this filename $prefix{$current} -= length($current); } } } # Find and return prefix with minimal sum $current = (keys(%prefix))[0]; foreach (keys(%prefix)) { if ($prefix{$_} < $prefix{$current}) { $current = $_; } } return($current); } # # shorten_prefix(prefix) # # Return PREFIX shortened by last directory component. # sub shorten_prefix($) { my @list = split("/", $_[0]); pop(@list); return join("/", @list); } # # get_dir_list(filename_list) # # Return sorted list of directories for each entry in given FILENAME_LIST. # sub get_dir_list(@) { my %result; foreach (@_) { $result{shorten_prefix($_)} = ""; } return(sort(keys(%result))); } # # get_relative_base_path(subdirectory) # # Return a relative path string which references the base path when applied # in SUBDIRECTORY. # # Example: get_relative_base_path("fs/mm") -> "../../" # sub get_relative_base_path($) { my $result = ""; my $index; # Make an empty directory path a special case if (!$_[0]) { return(""); } # Count number of /s in path $index = ($_[0] =~ s/\//\//g); # Add a ../ to $result for each / in the directory path + 1 for (; $index>=0; $index--) { $result .= "../"; } return $result; } # # read_testfile(test_filename) # # Read in file TEST_FILENAME which contains test descriptions in the format: # # TN: # TD: # # for each test case. Return a reference to a hash containing a mapping # # test name -> test description. # # Die on error. # sub read_testfile($) { my %result; my $test_name; local *TEST_HANDLE; open(TEST_HANDLE, "<".$_[0]) or die("ERROR: cannot open $_[0]!\n"); while () { chomp($_); # Match lines beginning with TN: if (/^TN:\s+(.*?)\s*$/) { # Store name for later use $test_name = $1; } # Match lines beginning with TD: if (/^TD:\s+(.*?)\s*$/) { # Check for empty line if ($1) { # Add description to hash $result{$test_name} .= " $1"; } else { # Add empty line $result{$test_name} .= "\n\n"; } } } close(TEST_HANDLE); return \%result; } # # escape_html(STRING) # # Return a copy of STRING in which all occurrences of HTML special characters # are escaped. # sub escape_html($) { my $string = $_[0]; if (!$string) { return ""; } $string =~ s/&/&/g; # & -> & $string =~ s/ < $string =~ s/>/>/g; # > -> > $string =~ s/\"/"/g; # " -> " while ($string =~ /^([^\t]*)(\t)/) { my $replacement = " "x($tab_size - (length($1) % $tab_size)); $string =~ s/^([^\t]*)(\t)/$1$replacement/; } $string =~ s/\n/
/g; # \n ->
return $string; } # # get_date_string() # # Return the current date in the form: yyyy-mm-dd # sub get_date_string() { my $year; my $month; my $day; ($year, $month, $day) = (localtime())[5, 4, 3]; return sprintf("%d-%02d-%02d", $year+1900, $month+1, $day); } # # create_sub_dir(dir_name) # # Create subdirectory DIR_NAME if it does not already exist, including all its # parent directories. # # Die on error. # sub create_sub_dir($) { system("mkdir", "-p" ,$_[0]) and die("ERROR: cannot create directory $_!\n"); } # # write_description_file(descriptions, overall_found, overall_hit) # # Write HTML file containing all test case descriptions. DESCRIPTIONS is a # reference to a hash containing a mapping # # test case name -> test case description # # Die on error. # sub write_description_file($$$) { my %description = %{$_[0]}; my $found = $_[1]; my $hit = $_[2]; my $test_name; local *HTML_HANDLE; open(HTML_HANDLE, ">descriptions.html") or die("ERROR: cannot open descriptions.html for writing!\n"); write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions"); write_header(*HTML_HANDLE, 3, "", "", $found, $hit); write_test_table_prolog(*HTML_HANDLE, "Test case descriptions - alphabetical list"); foreach $test_name (sort(keys(%description))) { write_test_table_entry(*HTML_HANDLE, $test_name, escape_html($description{$test_name})); } write_test_table_epilog(*HTML_HANDLE); write_html_epilog(*HTML_HANDLE, ""); close(HTML_HANDLE); } # # write_png_files() # # Create all necessary .png files for the HTML-output in the current # directory. .png-files are used as bar graphs. # # Die on error. # sub write_png_files() { my %data; local *PNG_HANDLE; $data{"ruby.png"} = [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57, 0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f, 0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82]; $data{"amber.png"} = [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb, 0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50, 0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82]; $data{"emerald.png"} = [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5, 0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59, 0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82]; $data{"snow.png"} = [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc, 0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82]; $data{"glass.png"} = [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66, 0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, 0x00, 0x88, 0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, 0x19, 0xc4, 0x40, 0x56, 0x10, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0x48, 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82]; foreach (keys(%data)) { open(PNG_HANDLE, ">".$_) or die("ERROR: cannot create $_!\n"); binmode(PNG_HANDLE); print(PNG_HANDLE map(chr,@{$data{$_}})); close(PNG_HANDLE); } } # # write_css_file() # # Write the cascading style sheet file gcov.css to the current directory. # This file defines basic layout attributes of all generated HTML pages. # sub write_css_file() { local *CSS_HANDLE; # Check for a specified external style sheet file if ($css_filename) { # Simply copy that file system("cp", $css_filename, "gcov.css") and die("ERROR: cannot copy file $css_filename!\n"); return; } open(CSS_HANDLE, ">gcov.css") or die ("ERROR: cannot open gcov.css for writing!\n"); # ************************************************************* my $css_data = ($_=<<"END_OF_CSS") /* All views: initial background and text color */ body { color: #000000; background-color: #FFFFFF; } /* All views: standard link format*/ a:link { color: #284FA8; text-decoration: underline; } /* All views: standard link - visited format */ a:visited { color: #00CB40; text-decoration: underline; } /* All views: standard link - activated format */ a:active { color: #FF0040; text-decoration: underline; } /* All views: main title format */ td.title { text-align: center; padding-bottom: 10px; font-family: sans-serif; font-size: 20pt; font-style: italic; font-weight: bold; } /* All views: header item format */ td.headerItem { text-align: right; padding-right: 6px; font-family: sans-serif; font-weight: bold; } /* All views: header item value format */ td.headerValue { text-align: left; color: #284FA8; font-family: sans-serif; font-weight: bold; } /* All views: color of horizontal ruler */ td.ruler { background-color: #6688D4; } /* All views: version string format */ td.versionInfo { text-align: center; padding-top: 2px; font-family: sans-serif; font-style: italic; } /* Directory view/File view (all)/Test case descriptions: table headline format */ td.tableHead { text-align: center; color: #FFFFFF; background-color: #6688D4; font-family: sans-serif; font-size: 120%; font-weight: bold; } /* Directory view/File view (all): filename entry format */ td.coverFile { text-align: left; padding-left: 10px; padding-right: 20px; color: #284FA8; background-color: #DAE7FE; font-family: monospace; } /* Directory view/File view (all): bar-graph entry format*/ td.coverBar { padding-left: 10px; padding-right: 10px; background-color: #DAE7FE; } /* Directory view/File view (all): bar-graph outline color */ td.coverBarOutline { background-color: #000000; } /* Directory view/File view (all): percentage entry for files with high coverage rate */ td.coverPerHi { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #DAE7FE; font-weight: bold; } /* Directory view/File view (all): line count entry for files with high coverage rate */ td.coverNumHi { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #DAE7FE; } /* Directory view/File view (all): percentage entry for files with medium coverage rate */ td.coverPerMed { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #FFEA20; font-weight: bold; } /* Directory view/File view (all): line count entry for files with medium coverage rate */ td.coverNumMed { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #FFEA20; } /* Directory view/File view (all): percentage entry for files with low coverage rate */ td.coverPerLo { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #FF0000; font-weight: bold; } /* Directory view/File view (all): line count entry for files with low coverage rate */ td.coverNumLo { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #FF0000; } /* File view (all): "show/hide details" link format */ a.detail:link { color: #B8D0FF; } /* File view (all): "show/hide details" link - visited format */ a.detail:visited { color: #B8D0FF; } /* File view (all): "show/hide details" link - activated format */ a.detail:active { color: #FFFFFF; } /* File view (detail): test name table headline format */ td.testNameHead { text-align: right; padding-right: 10px; background-color: #DAE7FE; font-family: sans-serif; font-weight: bold; } /* File view (detail): test lines table headline format */ td.testLinesHead { text-align: center; background-color: #DAE7FE; font-family: sans-serif; font-weight: bold; } /* File view (detail): test name entry */ td.testName { text-align: right; padding-right: 10px; background-color: #DAE7FE; } /* File view (detail): test percentage entry */ td.testPer { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #DAE7FE; } /* File view (detail): test lines count entry */ td.testNum { text-align: right; padding-left: 10px; padding-right: 10px; background-color: #DAE7FE; } /* Test case descriptions: test name format*/ dt { font-family: sans-serif; font-weight: bold; } /* Test case descriptions: description table body */ td.testDescription { padding-top: 10px; padding-left: 30px; padding-bottom: 10px; padding-right: 30px; background-color: #DAE7FE; } /* Source code view: source code format */ pre.source { font-family: monospace; white-space: pre; } /* Source code view: line number format */ span.lineNum { background-color: #EFE383; } /* Source code view: format for lines which were executed */ span.lineCov { background-color: #CAD7FE; } /* Source code view: format for lines which were not executed */ span.lineNoCov { background-color: #FF6230; } /* Source code view: format for lines which were executed only in a previous version */ span.lineDiffCov { background-color: #B5F7AF; } END_OF_CSS ; # ************************************************************* # Remove leading tab from all lines $css_data =~ s/^\t//gm; print(CSS_HANDLE $css_data); close(CSS_HANDLE); } # # get_bar_graph_code(base_dir, cover_found, cover_hit) # # Return a string containing HTML code which implements a bar graph display # for a coverage rate of cover_hit * 100 / cover_found. # sub get_bar_graph_code($$$) { my $rate; my $alt; my $width; my $remainder; my $png_name; my $graph_code; # Check number of instrumented lines if ($_[1] == 0) { return ""; } $rate = $_[2] * 100 / $_[1]; $alt = sprintf("%.1f", $rate)."%"; $width = sprintf("%.0f", $rate); $remainder = sprintf("%d", 100-$width); # Decide which .png file to use if ($rate < $med_limit) { $png_name = "ruby.png"; } elsif ($rate < $hi_limit) { $png_name = "amber.png"; } else { $png_name = "emerald.png"; } if ($width == 0) { # Zero coverage $graph_code = (<$alt END_OF_HTML ; } elsif ($width == 100) { # Full coverage $graph_code = (<$alt END_OF_HTML ; } else { # Positive coverage $graph_code = (<$alt$alt END_OF_HTML ; } # Remove leading tabs from all lines $graph_code =~ s/^\t+//gm; chomp($graph_code); return($graph_code); } # # write_html(filehandle, html_code) # # Write out HTML_CODE to FILEHANDLE while removing a leading tabulator mark # in each line of HTML_CODE. # sub write_html(*$) { local *HTML_HANDLE = $_[0]; my $html_code = $_[1]; # Remove leading tab from all lines $html_code =~ s/^\t//gm; print(HTML_HANDLE $html_code) or die("ERROR: cannot write HTML data ($!)\n"); } # # write_html_prolog(filehandle, base_dir, pagetitle) # # Write an HTML prolog common to all HTML files to FILEHANDLE. PAGETITLE will # be used as HTML page title. BASE_DIR contains a relative path which points # to the base directory. # sub write_html_prolog(*$$) { my $pagetitle = $_[2]; # ************************************************************* write_html($_[0], < $pagetitle END_OF_HTML ; # ************************************************************* } # # write_header_prolog(filehandle, base_dir) # # Write beginning of page header HTML code. # sub write_header_prolog(*$) { # ************************************************************* write_html($_[0], < $title END_OF_HTML ; # ************************************************************* } # # write_header_line(filehandle, item1, value1, [item2, value2]) # # Write a header line, containing of either one or two pairs "header item" # and "header value". # sub write_header_line(*$$;$$) { my $item1 = $_[1]; my $value1 = $_[2]; # Use GOTO to prevent indenting HTML with more than one tabs if (scalar(@_) > 3) { goto two_items } # ************************************************************* write_html($_[0], < END_OF_HTML ; return(); # ************************************************************* two_items: my $item2 = $_[3]; my $value2 = $_[4]; # ************************************************************* write_html($_[0], < END_OF_HTML ; # ************************************************************* } # # write_header_epilog(filehandle, base_dir) # # Write end of page header HTML code. # sub write_header_epilog(*$) { # ************************************************************* write_html($_[0], <
$item1: $value1
$item1: $value1 $item2: $value2
END_OF_HTML ; # ************************************************************* } # # write_file_table_prolog(filehandle, left_heading, right_heading) # # Write heading for file table. # sub write_file_table_prolog(*$$) { # ************************************************************* write_html($_[0], < END_OF_HTML ; # ************************************************************* } # # write_file_table_entry(filehandle, cover_filename, cover_bar_graph, # cover_found, cover_hit) # # Write an entry of the file table. # sub write_file_table_entry(*$$$$) { my $rate; my $rate_string; my $classification = "Lo"; if ($_[3]>0) { $rate = $_[4] * 100 / $_[3]; $rate_string = sprintf("%.1f", $rate)." %"; if ($rate < $med_limit) { $classification = "Lo"; } elsif ($rate < $hi_limit) { $classification = "Med"; } else { $classification = "Hi"; } } else { $rate_string = "undefined"; } # ************************************************************* write_html($_[0], < END_OF_HTML ; # ************************************************************* } # # write_file_table_detail_heading(filehandle, left_heading, right_heading) # # Write heading for detail section in file table. # sub write_file_table_detail_heading(*$$) { # ************************************************************* write_html($_[0], < END_OF_HTML ; # ************************************************************* } # # write_file_table_detail_entry(filehandle, test_name, cover_found, cover_hit) # # Write entry for detail section in file table. # sub write_file_table_detail_entry(*$$$) { my $rate; my $name = $_[1]; if ($_[2]>0) { $rate = sprintf("%.1f", $_[3]*100/$_[2])." %"; } else { $rate = "undefined"; } if ($name =~ /^(.*),diff$/) { $name = $1." (converted)"; } if ($name eq "") { $name = "<unnamed>"; } # ************************************************************* write_html($_[0], < END_OF_HTML ; # ************************************************************* } # # write_file_table_epilog(filehandle) # # Write end of file table HTML code. # sub write_file_table_epilog(*) { # ************************************************************* write_html($_[0], <
END_OF_HTML ; # ************************************************************* } # # write_test_table_prolog(filehandle, table_heading) # # Write heading for test case description table. # sub write_test_table_prolog(*$) { # ************************************************************* write_html($_[0], <

$_[1] $_[2]
$_[1] $_[2] $rate_string $_[4] / $_[3] lines
$_[1] $_[2]
$name $rate $_[3] / $_[2] lines

$_[1]
END_OF_HTML ; # ************************************************************* } # # write_test_table_entry(filehandle, test_name, test_description) # # Write entry for the test table. # sub write_test_table_entry(*$$) { # ************************************************************* write_html($_[0], <$_[1] 
$_[2]

END_OF_HTML ; # ************************************************************* } # # write_test_table_epilog(filehandle) # # Write end of test description table HTML code. # sub write_test_table_epilog(*) { # ************************************************************* write_html($_[0], <

END_OF_HTML ; # ************************************************************* } # # write_source_prolog(filehandle) # # Write start of source code table. # sub write_source_prolog(*) { # ************************************************************* write_html($_[0], <
END_OF_HTML
	;

	# *************************************************************
}


#
# write_source_line(filehandle, line_num, source, hit_count, converted)
#
# Write formatted source code line. Return a line in a format as needed
# by gen_png()
#

sub write_source_line(*$$$$)
{
	my $source_format;
	my $count;
	my $result;
	my $anchor_start = "";
	my $anchor_end = "";

	if (!(defined$_[3]))
	{
		$result		= "";
		$source_format	= "";
		$count		= " "x15;
	}
	elsif ($_[3] == 0)
	{
		$result		= $_[3];
		$source_format	= '';
		$count		= sprintf("%15d", $_[3]);
	}
	elsif ($_[4] && defined($highlight))
	{
		$result		= "*".$_[3];
		$source_format	= '';
		$count		= sprintf("%15d", $_[3]);
	}
	else
	{
		$result		= $_[3];
		$source_format	= '';
		$count		= sprintf("%15d", $_[3]);
	}

	$result .= ":".$_[2];

	# Write out a line number navigation anchor every $nav_resolution
	# lines if necessary
	if ($frames && (($_[1] - 1) % $nav_resolution == 0))
	{
		$anchor_start	= "";
		$anchor_end	= "";
	}


	# *************************************************************

	write_html($_[0],
		   $anchor_start.
		   ''.sprintf("%8d", $_[1]).
		   " $source_format$count : ".
		   escape_html($_[2]).($source_format?"":"").
		   $anchor_end."\n");

	# *************************************************************

	return($result);
}


#
# write_source_epilog(filehandle)
#
# Write end of source code table.
#

sub write_source_epilog(*)
{
	# *************************************************************

	write_html($_[0], <
	      
	    
	  
	  
END_OF_HTML ; # ************************************************************* } # # write_html_epilog(filehandle, base_dir[, break_frames]) # # Write HTML page footer to FILEHANDLE. BREAK_FRAMES should be set when # this page is embedded in a frameset, clicking the URL link will then # break this frameset. # sub write_html_epilog(*$;$) { my $break_code = ""; if (defined($_[2])) { $break_code = " target=\"_parent\""; } # ************************************************************* write_html($_[0], < Generated by: $lcov_version
END_OF_HTML ; # ************************************************************* } # # write_frameset(filehandle, basedir, basename, pagetitle) # # sub write_frameset(*$$$) { my $frame_width = $overview_width + 40; # ************************************************************* write_html($_[0], < $_[3] <center>Frames not supported by your browser!<br></center> END_OF_HTML ; # ************************************************************* } # # sub write_overview_line(filehandle, basename, line, link) # # sub write_overview_line(*$$$) { my $y1 = $_[2] - 1; my $y2 = $y1 + $nav_resolution - 1; my $x2 = $overview_width - 1; # ************************************************************* write_html($_[0], < END_OF_HTML ; # ************************************************************* } # # write_overview(filehandle, basedir, basename, pagetitle, lines) # # sub write_overview(*$$$$) { my $index; my $max_line = $_[4] - 1; my $offset; # ************************************************************* write_html($_[0], < $_[3] END_OF_HTML ; # ************************************************************* # Make $offset the next higher multiple of $nav_resolution $offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution; $offset = sprintf("%d", $offset ) * $nav_resolution; # Create image map for overview image for ($index = 1; $index <= $_[4]; $index += $nav_resolution) { # Enforce nav_offset if ($index < $offset + 1) { write_overview_line($_[0], $_[2], $index, 1); } else { write_overview_line($_[0], $_[2], $index, $index - $offset); } } # ************************************************************* write_html($_[0], <
Top

Overview
END_OF_HTML ; # ************************************************************* } # # write_header(filehandle, type, trunc_file_name, rel_file_name, lines_found, # lines_hit) # # Write a complete standard page header. TYPE may be (0, 1, 2, 3) # corresponding to (directory view header, file view header, source view # header, test case description header) # sub write_header(*$$$$$) { local *HTML_HANDLE = $_[0]; my $type = $_[1]; my $trunc_name = $_[2]; my $rel_filename = $_[3]; my $lines_found = $_[4]; my $lines_hit = $_[5]; my $base_dir; my $view; my $test; my $rate; my $base_name; # Calculate coverage rate if ($lines_found>0) { $rate = sprintf("%.1f", $lines_hit * 100 / $lines_found)." %"; } else { $rate = "-"; } $base_name = basename($rel_filename); # Prepare text for "current view" field if ($type == 0) { # Main overview $base_dir = ""; $view = $overview_title; } elsif ($type == 1) { # Directory overview $base_dir = get_relative_base_path($rel_filename); $view = "". "$overview_title - $trunc_name"; } elsif ($type == 2) { # File view my $dir_name = dirname($rel_filename); $base_dir = get_relative_base_path($dir_name); if ($frames) { # Need to break frameset when clicking any of these # links $view = "$overview_title - ". "". "$dir_name - $base_name"; } else { $view = "". "$overview_title - ". "". "$dir_name - $base_name"; } } elsif ($type == 3) { # Test description header $base_dir = ""; $view = "". "$overview_title - test case descriptions"; } # Prepare text for "test" field $test = escape_html($test_title); # Append link to test description page if available if (%test_description && ($type != 3)) { if ($frames && ($type == 2)) { # Need to break frameset when clicking this link $test .= " ( ". "view test case descriptions )"; } else { $test .= " ( ". "view test case descriptions )"; } } # Write header write_header_prolog(*HTML_HANDLE, $base_dir); write_header_line(*HTML_HANDLE, "Current view", $view); write_header_line(*HTML_HANDLE, "Test", $test); write_header_line(*HTML_HANDLE, "Date", $date, "Instrumented lines", $lines_found); write_header_line(*HTML_HANDLE, "Code covered", $rate, "Executed lines", $lines_hit); write_header_epilog(*HTML_HANDLE, $base_dir); } # # split_filename(filename) # # Return (path, filename, extension) for a given FILENAME. # sub split_filename($) { if (!$_[0]) { return(); } my @path_components = split('/', $_[0]); my @file_components = split('\.', pop(@path_components)); my $extension = pop(@file_components); return (join("/",@path_components), join(".",@file_components), $extension); } # # write_file_table(filehandle, base_dir, overview, testhash, fileview) # # Write a complete file table. OVERVIEW is a reference to a hash containing # the following mapping: # # filename -> "lines_found,lines_hit,page_link" # # TESTHASH is a reference to the following hash: # # filename -> \%testdata # %testdata: name of test affecting this file -> \%testcount # %testcount: line number -> execution count for a single test # # Heading of first column is "Filename" if FILEVIEW is true, "Directory name" # otherwise. # sub write_file_table(*$$$$) { local *HTML_HANDLE = $_[0]; my $base_dir = $_[1]; my %overview = %{$_[2]}; my %testhash = %{$_[3]}; my $fileview = $_[4]; my $filename; my $bar_graph; my $hit; my $found; my $page_link; my $testname; my $testdata; my $testcount; my %affecting_tests; my $coverage_heading = "Coverage"; # Provide a link to details/non-detail list if this is directory # overview and we are supposed to create a detail view if (($base_dir ne "") && $show_details) { if (%testhash) { # This is the detail list, provide link to standard # list $coverage_heading .= " ( hide ". "details )"; } else { # This is the standard list, provide link to detail # list $coverage_heading .= " ( show ". "details )"; } } write_file_table_prolog(*HTML_HANDLE, $fileview ? "Filename" : "Directory name", $coverage_heading); foreach $filename (sort(keys(%overview))) { ($found, $hit, $page_link) = split(",", $overview{$filename}); $bar_graph = get_bar_graph_code($base_dir, $found, $hit); $testdata = $testhash{$filename}; # Add anchor tag in case a page link is provided if ($page_link) { $filename = "$filename"; } write_file_table_entry(*HTML_HANDLE, $filename, $bar_graph, $found, $hit); # Check whether we should write test specific coverage # as well if (!($show_details && $testdata)) { next; } # Filter out those tests that actually affect this file %affecting_tests = %{ get_affecting_tests($testdata) }; # Does any of the tests affect this file at all? if (!%affecting_tests) { next; } # Write test details for this entry write_file_table_detail_heading(*HTML_HANDLE, "Test name", "Lines hit"); foreach $testname (keys(%affecting_tests)) { ($found, $hit) = split(",", $affecting_tests{$testname}); # Insert link to description of available if ($test_description{$testname}) { $testname = "". "$testname"; } write_file_table_detail_entry(*HTML_HANDLE, $testname, $found, $hit); } } write_file_table_epilog(*HTML_HANDLE); } # # get_found_and_hit(hash) # # Return the count for entries (found) and entries with an execution count # greater than zero (hit) in a hash (linenumber -> execution count) as # a list (found, hit) # sub get_found_and_hit($) { my %hash = %{$_[0]}; my $found = 0; my $hit = 0; # Calculate sum $found = 0; $hit = 0; foreach (keys(%hash)) { $found++; if ($hash{$_}>0) { $hit++; } } return ($found, $hit); } # # get_affecting_tests(hashref) # # HASHREF contains a mapping filename -> (linenumber -> exec count). Return # a hash containing mapping filename -> "lines found, lines hit" for each # filename which has a nonzero hit count. # sub get_affecting_tests($) { my %hash = %{$_[0]}; my $testname; my $testcount; my %result; my $found; my $hit; foreach $testname (keys(%hash)) { # Get (line number -> count) hash for this test case $testcount = $hash{$testname}; # Calculate sum ($found, $hit) = get_found_and_hit($testcount); if ($hit>0) { $result{$testname} = "$found,$hit"; } } return(\%result); } # # write_source(filehandle, source_filename, count_data, checksum_data, # converted_data) # # Write an HTML view of a source code file. Returns a list containing # data as needed by gen_png(). # # Die on error. # sub write_source($$$$$) { local *HTML_HANDLE = $_[0]; local *SOURCE_HANDLE; my $source_filename = $_[1]; my %count_data; my $line_number; my @result; my $checkdata = $_[3]; my $converted = $_[4]; if ($_[2]) { %count_data = %{$_[2]}; } open(SOURCE_HANDLE, "<".$source_filename) or die("ERROR: cannot open $source_filename for reading!\n"); write_source_prolog(*HTML_HANDLE); for ($line_number = 1; ; $line_number++) { chomp($_); # Source code matches coverage data? if (defined($checkdata->{$line_number}) && ($checkdata->{$line_number} ne md5_base64($_))) { die("ERROR: checksum mismatch at $source_filename:". "$line_number\n"); } push (@result, write_source_line(HTML_HANDLE, $line_number, $_, $count_data{$line_number}, $converted->{$line_number})); } close(SOURCE_HANDLE); write_source_epilog(*HTML_HANDLE); return(@result); } # # info(printf_parameter) # # Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag # is not set. # sub info(@) { if (!$quiet) { # Print info string printf(@_); } } # # subtract_counts(data_ref, base_ref) # sub subtract_counts($$) { my %data = %{$_[0]}; my %base = %{$_[1]}; my $line; my $data_count; my $base_count; my $hit = 0; my $found = 0; foreach $line (keys(%data)) { $found++; $data_count = $data{$line}; $base_count = $base{$line}; if (defined($base_count)) { $data_count -= $base_count; # Make sure we don't get negative numbers if ($data_count<0) { $data_count = 0; } } $data{$line} = $data_count; if ($data_count > 0) { $hit++; } } return (\%data, $found, $hit); } # # add_counts(data1_ref, data2_ref) # # DATA1_REF and DATA2_REF are references to hashes containing a mapping # # line number -> execution count # # Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF # is a reference to a hash containing the combined mapping in which # execution counts are added. # sub add_counts($$) { my %data1 = %{$_[0]}; # Hash 1 my %data2 = %{$_[1]}; # Hash 2 my %result; # Resulting hash my $line; # Current line iteration scalar my $data1_count; # Count of line in hash1 my $data2_count; # Count of line in hash2 my $found = 0; # Total number of lines found my $hit = 0; # Number of lines with a count > 0 foreach $line (keys(%data1)) { $data1_count = $data1{$line}; $data2_count = $data2{$line}; # Add counts if present in both hashes if (defined($data2_count)) { $data1_count += $data2_count; } # Store sum in %result $result{$line} = $data1_count; $found++; if ($data1_count > 0) { $hit++; } } # Add lines unique to data2 foreach $line (keys(%data2)) { # Skip lines already in data1 if (defined($data1{$line})) { next; } # Copy count from data2 $result{$line} = $data2{$line}; $found++; if ($result{$line} > 0) { $hit++; } } return (\%result, $found, $hit); } # # apply_baseline(data_ref, baseline_ref) # # Subtract the execution counts found in the baseline hash referenced by # BASELINE_REF from actual data in DATA_REF. # sub apply_baseline($$) { my %data_hash = %{$_[0]}; my %base_hash = %{$_[1]}; my $filename; my $testname; my $data; my $data_testdata; my $data_funcdata; my $data_checkdata; my $data_count; my $base; my $base_testdata; my $base_checkdata; my $base_count; my $sumcount; my $found; my $hit; foreach $filename (keys(%data_hash)) { # Get data set for data and baseline $data = $data_hash{$filename}; $base = $base_hash{$filename}; # Get set entries for data and baseline ($data_testdata, undef, $data_funcdata, $data_checkdata) = get_info_entry($data); ($base_testdata, $base_count, undef, $base_checkdata) = get_info_entry($base); # Check for compatible checksums merge_checksums($data_checkdata, $base_checkdata, $filename); # Sumcount has to be calculated anew $sumcount = {}; # For each test case, subtract test specific counts foreach $testname (keys(%{$data_testdata})) { # Get counts of both data and baseline $data_count = $data_testdata->{$testname}; $hit = 0; ($data_count, undef, $hit) = subtract_counts($data_count, $base_count); # Check whether this test case did hit any line at all if ($hit > 0) { # Write back resulting hash $data_testdata->{$testname} = $data_count; } else { # Delete test case which did not impact this # file delete($data_testdata->{$testname}); } # Add counts to sum of counts ($sumcount, $found, $hit) = add_counts($sumcount, $data_count); } # Write back resulting entry set_info_entry($data, $data_testdata, $sumcount, $data_funcdata, $data_checkdata, $found, $hit); $data_hash{$filename} = $data; } return (\%data_hash); } # # remove_unused_descriptions() # # Removes all test descriptions from the global hash %test_description which # are not present in %info_data. # sub remove_unused_descriptions() { my $filename; # The current filename my %test_list; # Hash containing found test names my $test_data; # Reference to hash test_name -> count_data my $before; # Initial number of descriptions my $after; # Remaining number of descriptions $before = scalar(keys(%test_description)); foreach $filename (keys(%info_data)) { ($test_data) = get_info_entry($info_data{$filename}); foreach (keys(%{$test_data})) { $test_list{$_} = ""; } } # Remove descriptions for tests which are not in our list foreach (keys(%test_description)) { if (!defined($test_list{$_})) { delete($test_description{$_}); } } $after = scalar(keys(%test_description)); if ($after < $before) { info("Removed ".($before - $after). " unused descriptions, $after remaining.\n"); } } # # merge_checksums(ref1, ref2, filename) # # REF1 and REF2 are references to hashes containing a mapping # # line number -> checksum # # Merge checksum lists defined in REF1 and REF2 and return reference to # resulting hash. Die if a checksum for a line is defined in both hashes # but does not match. # sub merge_checksums($$$) { my $ref1 = $_[0]; my $ref2 = $_[1]; my $filename = $_[2]; my %result; my $line; foreach $line (keys(%{$ref1})) { if (defined($ref2->{$line}) && ($ref1->{$line} ne $ref2->{$line})) { die("ERROR: checksum mismatch at $filename:$line\n"); } $result{$line} = $ref1->{$line}; } foreach $line (keys(%{$ref2})) { $result{$line} = $ref2->{$line}; } return \%result; } # # combine_info_entries(entry_ref1, entry_ref2, filename) # # Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2. # Return reference to resulting hash. # sub combine_info_entries($$$) { my $entry1 = $_[0]; # Reference to hash containing first entry my $testdata1; my $sumcount1; my $funcdata1; my $checkdata1; my $entry2 = $_[1]; # Reference to hash containing second entry my $testdata2; my $sumcount2; my $funcdata2; my $checkdata2; my %result; # Hash containing combined entry my %result_testdata; my $result_sumcount = {}; my %result_funcdata; my $lines_found; my $lines_hit; my $testname; my $filename = $_[2]; # Retrieve data ($testdata1, $sumcount1, $funcdata1, $checkdata1) = get_info_entry($entry1); ($testdata2, $sumcount2, $funcdata2, $checkdata2) = get_info_entry($entry2); # Merge checksums $checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename); # Combine funcdata foreach (keys(%{$funcdata1})) { $result_funcdata{$_} = $funcdata1->{$_}; } foreach (keys(%{$funcdata2})) { $result_funcdata{$_} = $funcdata2->{$_}; } # Combine testdata foreach $testname (keys(%{$testdata1})) { if (defined($testdata2->{$testname})) { # testname is present in both entries, requires # combination ($result_testdata{$testname}) = add_counts($testdata1->{$testname}, $testdata2->{$testname}); } else { # testname only present in entry1, add to result $result_testdata{$testname} = $testdata1->{$testname}; } # update sum count hash ($result_sumcount, $lines_found, $lines_hit) = add_counts($result_sumcount, $result_testdata{$testname}); } foreach $testname (keys(%{$testdata2})) { # Skip testnames already covered by previous iteration if (defined($testdata1->{$testname})) { next; } # testname only present in entry2, add to result hash $result_testdata{$testname} = $testdata2->{$testname}; # update sum count hash ($result_sumcount, $lines_found, $lines_hit) = add_counts($result_sumcount, $result_testdata{$testname}); } # Calculate resulting sumcount # Store result set_info_entry(\%result, \%result_testdata, $result_sumcount, \%result_funcdata, $checkdata1, $lines_found, $lines_hit); return(\%result); } # # combine_info_files(info_ref1, info_ref2) # # Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return # reference to resulting hash. # sub combine_info_files($$) { my %hash1 = %{$_[0]}; my %hash2 = %{$_[1]}; my $filename; foreach $filename (keys(%hash2)) { if ($hash1{$filename}) { # Entry already exists in hash1, combine them $hash1{$filename} = combine_info_entries($hash1{$filename}, $hash2{$filename}, $filename); } else { # Entry is unique in both hashes, simply add to # resulting hash $hash1{$filename} = $hash2{$filename}; } } return(\%hash1); } # # apply_prefix(filename, prefix) # # If FILENAME begins with PREFIX, remove PREFIX from FILENAME and return # resulting string, otherwise return FILENAME. # sub apply_prefix($$) { my $filename = $_[0]; my $prefix = $_[1]; if (defined($prefix) && ($prefix ne "")) { if ($filename =~ /^\Q$prefix\E\/(.*)$/) { return substr($filename, length($prefix) + 1); } } return $filename; } # # system_no_output(mode, parameters) # # Call an external program using PARAMETERS while suppressing depending on # the value of MODE: # # MODE & 1: suppress STDOUT # MODE & 2: suppress STDERR # # Return 0 on success, non-zero otherwise. # sub system_no_output($@) { my $mode = shift; my $result; local *OLD_STDERR; local *OLD_STDOUT; # Save old stdout and stderr handles ($mode & 1) && open(OLD_STDOUT, ">>&STDOUT"); ($mode & 2) && open(OLD_STDERR, ">>&STDERR"); # Redirect to /dev/null ($mode & 1) && open(STDOUT, ">/dev/null"); ($mode & 2) && open(STDERR, ">/dev/null"); system(@_); $result = $?; # Close redirected handles ($mode & 1) && close(STDOUT); ($mode & 2) && close(STDERR); # Restore old handles ($mode & 1) && open(STDOUT, ">>&OLD_STDOUT"); ($mode & 2) && open(STDERR, ">>&OLD_STDERR"); return $result; } # # read_config(filename) # # Read configuration file FILENAME and return a reference to a hash containing # all valid key=value pairs found. # sub read_config($) { my $filename = $_[0]; my %result; my $key; my $value; local *HANDLE; if (!open(HANDLE, "<$filename")) { warn("WARNING: cannot read configuration file $filename\n"); return undef; } while () { chomp; # Skip comments s/#.*//; # Remove leading blanks s/^\s+//; # Remove trailing blanks s/\s+$//; next unless length; ($key, $value) = split(/\s*=\s*/, $_, 2); if (defined($key) && defined($value)) { $result{$key} = $value; } else { warn("WARNING: malformed statement in line $. ". "of configuration file $filename\n"); } } close(HANDLE); return \%result; } # # apply_config(REF) # # REF is a reference to a hash containing the following mapping: # # key_string => var_ref # # where KEY_STRING is a keyword and VAR_REF is a reference to an associated # variable. If the global configuration hash CONFIG contains a value for # keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. # sub apply_config($) { my $ref = $_[0]; foreach (keys(%{$ref})) { if (defined($config->{$_})) { ${$ref->{$_}} = $config->{$_}; } } }