#!/usr/bin/env perl # cloc -- Count Lines of Code {{{1 # Copyright (C) 2006-2025 Al Danial # First release August 2006 # # Includes code from: # - SLOCCount v2.26 # http://www.dwheeler.com/sloccount/ # by David Wheeler. # - Regexp::Common v2017060201 # https://metacpan.org/pod/Regexp::Common # by Damian Conway and Abigail. # - Win32::Autoglob 1.01 # https://metacpan.org/pod/Win32::Autoglob # by Sean M. Burke. # - Algorithm::Diff 1.1902 # https://metacpan.org/pod/Algorithm::Diff # by Tye McQueen. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details: # . # # 1}}} my $VERSION = "2.05"; # odd number == beta; even number == stable my $URL = "github.com/AlDanial/cloc"; # 'https://' pushes header too wide require 5.10.0; # use modules {{{1 use warnings; use strict; use Getopt::Long; use File::Basename; use File::Temp qw { tempfile tempdir }; use File::Find; use File::Path; use File::Spec; use IO::File; use List::Util qw( min max ); use Cwd; use Encode qw( encode ); use POSIX qw { strftime ceil}; # Parallel::ForkManager isn't in the standard distribution. # Use it only if installed, and only if --processes=N is given. # The module load happens in get_max_processes(). my $HAVE_Parallel_ForkManager = 0; # Digest::MD5 isn't in the standard distribution. Use it only if installed. my $HAVE_Digest_MD5 = 0; eval "use Digest::MD5;"; if (defined $Digest::MD5::VERSION) { $HAVE_Digest_MD5 = 1; } else { warn "Digest::MD5 not installed; will skip file uniqueness checks.\n"; } # Time::HiRes became standard with Perl 5.8 my $HAVE_Time_HiRes = 0; eval "use Time::HiRes;"; $HAVE_Time_HiRes = 1 if defined $Time::HiRes::VERSION; my $HAVE_Rexexp_Common; # Regexp::Common isn't in the standard distribution. It will # be installed in a temp directory if necessary. eval "use Regexp::Common qw ( comment ) "; if (defined $Regexp::Common::VERSION) { $HAVE_Rexexp_Common = 1; } else { $HAVE_Rexexp_Common = 0; } # Uncomment next two lines when building Windows executable with perl2exe # or if running on a system that already has Regexp::Common. #use Regexp::Common; #$HAVE_Rexexp_Common = 1; #perl2exe_include "Regexp/Common/whitespace.pm" #perl2exe_include "Regexp/Common/URI.pm" #perl2exe_include "Regexp/Common/URI/fax.pm" #perl2exe_include "Regexp/Common/URI/file.pm" #perl2exe_include "Regexp/Common/URI/ftp.pm" #perl2exe_include "Regexp/Common/URI/gopher.pm" #perl2exe_include "Regexp/Common/URI/http.pm" #perl2exe_include "Regexp/Common/URI/pop.pm" #perl2exe_include "Regexp/Common/URI/prospero.pm" #perl2exe_include "Regexp/Common/URI/news.pm" #perl2exe_include "Regexp/Common/URI/tel.pm" #perl2exe_include "Regexp/Common/URI/telnet.pm" #perl2exe_include "Regexp/Common/URI/tv.pm" #perl2exe_include "Regexp/Common/URI/wais.pm" #perl2exe_include "Regexp/Common/CC.pm" #perl2exe_include "Regexp/Common/SEN.pm" #perl2exe_include "Regexp/Common/number.pm" #perl2exe_include "Regexp/Common/delimited.pm" #perl2exe_include "Regexp/Common/profanity.pm" #perl2exe_include "Regexp/Common/net.pm" #perl2exe_include "Regexp/Common/zip.pm" #perl2exe_include "Regexp/Common/comment.pm" #perl2exe_include "Regexp/Common/balanced.pm" #perl2exe_include "Regexp/Common/lingua.pm" #perl2exe_include "Regexp/Common/list.pm" #perl2exe_include "File/Glob.pm" use Text::Tabs qw { expand }; use Cwd qw { cwd }; use File::Glob; # 1}}} # Usage information, options processing. {{{1 my $ON_WINDOWS = 0; $ON_WINDOWS = 1 if ($^O =~ /^MSWin/) or ($^O eq "Windows_NT"); if ($ON_WINDOWS and $ENV{'SHELL'}) { if ($ENV{'SHELL'} =~ m{^/}) { $ON_WINDOWS = 0; # make Cygwin look like Unix } else { $ON_WINDOWS = 1; # MKS defines $SHELL but still acts like Windows } } my $ON_MINGW64 = 0; # git-bash on Windows has special needs if (!$ON_WINDOWS and defined $ENV{'SSH_ASKPASS'} and $ENV{'SSH_ASKPASS'} =~ m{^/mingw64/}) { $ON_MINGW64 = 1; } use Cwd "abs_path"; use File::Spec; # Fix for issues when runing cloc through a symlink on Windows # e.g. : it have been installed with Winget # See https://github.com/AlDanial/cloc/issues/849 if ($ON_WINDOWS) { my $exec_path = abs_path($0); if (-l $0) { $exec_path = abs_path(readlink($0)); } $0 = $exec_path; } my $HAVE_Win32_Long_Path = 0; # Win32::LongPath is an optional dependency that when available on # Windows will be used to support reading files past the 255 char # path length limit. if ($ON_WINDOWS) { eval "use Win32::LongPath;"; if (defined $Win32::LongPath::VERSION) { $HAVE_Win32_Long_Path = 1; } } my $config_file = ''; if ( $ENV{'HOME'} ) { $config_file = File::Spec->catfile( $ENV{'HOME'}, '.config', 'cloc', 'options.txt'); } elsif ( $ENV{'APPDATA'} and $ON_WINDOWS ) { $config_file = File::Spec->catfile( $ENV{'APPDATA'}, 'cloc'); } # $config_file may be updated by check_alternate_config_files() my $NN = chr(27) . "[0m"; # normal $NN = "" if $ON_WINDOWS or !(-t STDOUT); # -t STDOUT: is it a terminal? my $BB = chr(27) . "[1m"; # bold $BB = "" if $ON_WINDOWS or !(-t STDOUT); my $script = basename $0; # Intended for v1.88: # --git-diff-simindex Git diff strategy #3: use git's similarity index # (git diff -M --name-status) to identify file pairs # to compare. This is especially useful to compare # files that were renamed between the commits. my $brief_usage = brief_usage(); my $usage = long_usage(); $| = 1; # flush STDOUT my $start_time = get_time(); my ( $opt_categorized , $opt_found , @opt_force_lang , $opt_lang_no_ext , @opt_script_lang , $opt_count_diff , $opt_diff , $opt_diff_alignment , $opt_diff_list_file , $opt_diff_list_files , $opt_diff_timeout , $opt_timeout , $opt_html , $opt_ignored , $opt_unique , $opt_counted , $opt_show_ext , $opt_show_lang , $opt_progress_rate , $opt_print_filter_stages , $opt_v , $opt_vcs , $opt_version , $opt_include_content , $opt_exclude_content , $opt_exclude_lang , $opt_exclude_list_file , $opt_exclude_dir , $opt_explain , $opt_include_ext , $opt_include_lang , $opt_force_lang_def , $opt_read_lang_def , $opt_write_lang_def , $opt_write_lang_def_incl_dup, $opt_strip_code , $opt_strip_comments , $opt_original_dir , $opt_quiet , $opt_report_file , $opt_sdir , $opt_sum_reports , $opt_hide_rate , $opt_processes , $opt_unicode , $opt_no3 , # accept it but don't use it $opt_3 , $opt_extract_with , $opt_by_file , $opt_by_file_by_lang , $opt_by_percent , $opt_percent , $opt_xml , $opt_xsl , $opt_yaml , $opt_csv , $opt_csv_delimiter , $opt_fullpath , $opt_json , $opt_md , $opt_match_f , @opt_not_match_f , $opt_match_d , @opt_not_match_d , $opt_skip_uniqueness , $opt_list_file , $opt_help , $opt_skip_win_hidden , $opt_read_binary_files , $opt_sql , $opt_sql_append , $opt_sql_project , $opt_sql_style , $opt_inline , $opt_exclude_ext , $opt_ignore_whitespace , $opt_ignore_case , $opt_ignore_case_ext , @opt_ignore_regex , $opt_follow_links , $opt_autoconf , $opt_sum_one , $opt_stdin_name , $opt_force_on_windows , $opt_force_on_unix , # actually forces !$ON_WINDOWS $opt_show_os , $opt_skip_archive , $opt_max_file_size , # in MB $opt_use_sloccount , $opt_no_autogen , $opt_force_git , $opt_git_diff_rel , $opt_git_diff_all , $opt_git_diff_simindex , $opt_config_file , $opt_strip_str_comments , $opt_file_encoding , $opt_docstring_as_code , $opt_stat , $opt_summary_cutoff , $opt_skip_leading , $opt_no_recurse , $opt_only_count_files , $opt_fmt , $opt_encoding , $opt_txt , # not associated with a command line option ); my $getopt_success = GetOptions( # {{{1 "by_file|by-file" => \$opt_by_file , "by_file_by_lang|by-file-by-lang" => \$opt_by_file_by_lang , "categorized=s" => \$opt_categorized , "counted=s" => \$opt_counted , "include_ext|include-ext=s" => \$opt_include_ext , "include_lang|include-lang=s" => \$opt_include_lang , "include_content|include-content=s" => \$opt_include_content , "exclude_content|exclude-content=s" => \$opt_exclude_content , "exclude_lang|exclude-lang=s" => \$opt_exclude_lang , "exclude_dir|exclude-dir=s" => \$opt_exclude_dir , "exclude_list_file|exclude-list-file=s" => \$opt_exclude_list_file , "explain=s" => \$opt_explain , "extract_with|extract-with=s" => \$opt_extract_with , "found=s" => \$opt_found , "count_and_diff|count-and-diff" => \$opt_count_diff , "diff" => \$opt_diff , "diff-alignment|diff_alignment=s" => \$opt_diff_alignment , "diff-timeout|diff_timeout=i" => \$opt_diff_timeout , "diff-list-file|diff_list_file=s" => \$opt_diff_list_file , "diff-list-files|diff_list_files" => \$opt_diff_list_files , "timeout=i" => \$opt_timeout , "html" => \$opt_html , "ignored=s" => \$opt_ignored , "unique=s" => \$opt_unique , "quiet" => \$opt_quiet , "force_lang_def|force-lang-def=s" => \$opt_force_lang_def , "read_lang_def|read-lang-def=s" => \$opt_read_lang_def , "show_ext|show-ext:s" => \$opt_show_ext , "show_lang|show-lang:s" => \$opt_show_lang , "progress_rate|progress-rate=i" => \$opt_progress_rate , "print_filter_stages|print-filter-stages" => \$opt_print_filter_stages , "report_file|report-file=s" => \$opt_report_file , "out=s" => \$opt_report_file , "script_lang|script-lang=s" => \@opt_script_lang , "sdir=s" => \$opt_sdir , "skip_uniqueness|skip-uniqueness" => \$opt_skip_uniqueness , "strip_code|strip-code=s" => \$opt_strip_code , "strip_comments|strip-comments=s" => \$opt_strip_comments , "original_dir|original-dir" => \$opt_original_dir , "sum_reports|sum-reports" => \$opt_sum_reports , "hide_rate|hide-rate" => \$opt_hide_rate , "processes=n" => \$opt_processes , "unicode" => \$opt_unicode , "no3" => \$opt_no3 , # ignored "3" => \$opt_3 , "v|verbose:i" => \$opt_v , "vcs=s" => \$opt_vcs , "version" => \$opt_version , "write_lang_def|write-lang-def=s" => \$opt_write_lang_def , "write_lang_def_incl_dup|write-lang-def-incl-dup=s" => \$opt_write_lang_def_incl_dup, "xml" => \$opt_xml , "xsl=s" => \$opt_xsl , "force_lang|force-lang=s" => \@opt_force_lang , "lang_no_ext|lang-no-ext=s" => \$opt_lang_no_ext , "yaml" => \$opt_yaml , "csv" => \$opt_csv , "csv_delimiter|csv-delimiter=s" => \$opt_csv_delimiter , "json" => \$opt_json , "md" => \$opt_md , "fullpath" => \$opt_fullpath , "match_f|match-f=s" => \$opt_match_f , "not_match_f|not-match-f=s" => \@opt_not_match_f , "match_d|match-d=s" => \$opt_match_d , "not_match_d|not-match-d=s" => \@opt_not_match_d , "list_file|list-file=s" => \$opt_list_file , "help" => \$opt_help , "skip_win_hidden|skip-win-hidden" => \$opt_skip_win_hidden , "read_binary_files|read-binary-files" => \$opt_read_binary_files , "sql=s" => \$opt_sql , "sql_project|sql-project=s" => \$opt_sql_project , "sql_append|sql-append" => \$opt_sql_append , "sql_style|sql-style=s" => \$opt_sql_style , "inline" => \$opt_inline , "exclude_ext|exclude-ext=s" => \$opt_exclude_ext , "ignore_whitespace|ignore-whitespace" => \$opt_ignore_whitespace , "ignore_case|ignore-case" => \$opt_ignore_case , "ignore_case_ext|ignore-case-ext" => \$opt_ignore_case_ext , "ignore_regex|ignore-regex=s" => \@opt_ignore_regex , "follow_links|follow-links" => \$opt_follow_links , "autoconf" => \$opt_autoconf , "sum_one|sum-one" => \$opt_sum_one , "percent" => \$opt_percent , "by_percent|by-percent=s" => \$opt_by_percent , "stdin_name|stdin-name=s" => \$opt_stdin_name , "windows" => \$opt_force_on_windows , "unix" => \$opt_force_on_unix , "show_os|show-os" => \$opt_show_os , "skip_archive|skip-archive=s" => \$opt_skip_archive , "max_file_size|max-file-size=f" => \$opt_max_file_size , "use_sloccount|use-sloccount" => \$opt_use_sloccount , "no_autogen|no-autogen" => \$opt_no_autogen , "git" => \$opt_force_git , "git_diff_rel|git-diff-rel" => \$opt_git_diff_rel , "git_diff_all|git-diff-all" => \$opt_git_diff_all , # "git_diff_simindex|git-diff-simindex" => \$opt_git_diff_simindex , "config=s" => \$opt_config_file , "strip_str_comments|strip-str-comments" => \$opt_strip_str_comments , "file_encoding|file-encoding=s" => \$opt_file_encoding , "docstring_as_code|docstring-as-code" => \$opt_docstring_as_code , "stat" => \$opt_stat , "summary_cutoff|summary-cutoff=s" => \$opt_summary_cutoff , "skip_leading|skip-leading:s" => \$opt_skip_leading , "no_recurse|no-recurse" => \$opt_no_recurse , "only_count_files|only-count-files" => \$opt_only_count_files , "fmt=i" => \$opt_fmt , "encoding=s" => \$opt_encoding , # not production ready #880 ); $opt_txt = 0; my $opt_fmt_tempfile = ""; # 1}}} $config_file = $opt_config_file if defined $opt_config_file; load_from_config_file($config_file, # {{{2 \$opt_by_file , \$opt_by_file_by_lang , \$opt_categorized , \$opt_counted , \$opt_include_ext , \$opt_include_lang , \$opt_include_content , \$opt_exclude_content , \$opt_exclude_lang , \$opt_exclude_dir , \$opt_exclude_list_file , \$opt_explain , \$opt_extract_with , \$opt_found , \$opt_count_diff , \$opt_diff_list_files , \$opt_diff , \$opt_diff_alignment , \$opt_diff_timeout , \$opt_timeout , \$opt_html , \$opt_ignored , \$opt_unique , \$opt_quiet , \$opt_force_lang_def , \$opt_read_lang_def , \$opt_show_ext , \$opt_show_lang , \$opt_progress_rate , \$opt_print_filter_stages , \$opt_report_file , \@opt_script_lang , \$opt_sdir , \$opt_skip_uniqueness , \$opt_strip_code , \$opt_strip_comments , \$opt_original_dir , \$opt_sum_reports , \$opt_hide_rate , \$opt_processes , \$opt_unicode , \$opt_3 , \$opt_v , \$opt_vcs , \$opt_version , \$opt_write_lang_def , \$opt_write_lang_def_incl_dup, \$opt_xml , \$opt_xsl , \@opt_force_lang , \$opt_lang_no_ext , \$opt_yaml , \$opt_csv , \$opt_csv_delimiter , \$opt_json , \$opt_md , \$opt_fullpath , \$opt_match_f , \@opt_not_match_f , \$opt_match_d , \@opt_not_match_d , \$opt_list_file , \$opt_help , \$opt_skip_win_hidden , \$opt_read_binary_files , \$opt_sql , \$opt_sql_project , \$opt_sql_append , \$opt_sql_style , \$opt_inline , \$opt_exclude_ext , \$opt_ignore_whitespace , \$opt_ignore_case , \$opt_ignore_case_ext , \@opt_ignore_regex , \$opt_follow_links , \$opt_autoconf , \$opt_sum_one , \$opt_by_percent , \$opt_stdin_name , \$opt_force_on_windows , \$opt_force_on_unix , \$opt_show_os , \$opt_skip_archive , \$opt_max_file_size , \$opt_use_sloccount , \$opt_no_autogen , \$opt_force_git , \$opt_strip_str_comments , \$opt_file_encoding , \$opt_docstring_as_code , \$opt_stat , ); # 2}}} Not pretty. Not at all. if ($opt_version) { printf "$VERSION\n"; exit; } my $opt_git = 0; $opt_git = 1 if defined($opt_git_diff_all) or defined($opt_git_diff_rel) or (defined($opt_vcs) and ($opt_vcs eq "git")); $opt_by_file = 1 if defined $opt_by_file_by_lang; $opt_fmt = 0 unless defined $opt_fmt; if ($opt_fmt) { $opt_by_file = 1; $opt_json = 1; } my $CLOC_XSL = "cloc.xsl"; # created with --xsl $CLOC_XSL = "cloc-diff.xsl" if $opt_diff; die "\n" unless $getopt_success; print $usage and exit if $opt_help; my %Exclude_Language = (); %Exclude_Language = map { $_ => 1 } split(/,/, $opt_exclude_lang) if $opt_exclude_lang; my %Exclude_Dir = (); %Exclude_Dir = map { $_ => 1 } split(/,/, $opt_exclude_dir ) if $opt_exclude_dir ; die unless exclude_dir_validates(\%Exclude_Dir); my %Include_Ext = (); %Include_Ext = map { $_ => 1 } split(/,/, $opt_include_ext) if $opt_include_ext; my %Include_Language = (); # keys are lower case language names %Include_Language = map { lc($_) => 1 } split(/,/, $opt_include_lang) if $opt_include_lang; # Forcibly exclude .svn, .cvs, .hg, .git, .bzr directories. The contents of these # directories often conflict with files of interest. $opt_exclude_dir = 1; $Exclude_Dir{".svn"} = 1; $Exclude_Dir{".cvs"} = 1; $Exclude_Dir{".hg"} = 1; $Exclude_Dir{".git"} = 1; $Exclude_Dir{".bzr"} = 1; $Exclude_Dir{".snapshot"} = 1; # NetApp backups $Exclude_Dir{".config"} = 1; $opt_count_diff = defined $opt_count_diff ? 1 : 0; $opt_diff = 1 if $opt_diff_alignment or $opt_diff_list_file or $opt_diff_list_files or $opt_git_diff_rel or $opt_git_diff_all or $opt_git_diff_simindex; $opt_force_git = 1 if $opt_git_diff_rel or $opt_git_diff_all or $opt_git_diff_simindex; $opt_diff_alignment = 0 if $opt_diff_list_file; $opt_exclude_ext = "" unless $opt_exclude_ext; $opt_ignore_whitespace = 0 unless $opt_ignore_whitespace; $opt_ignore_case = 0 unless $opt_ignore_case; $opt_ignore_case_ext = 0 unless $opt_ignore_case_ext; my %ignore_regex = (); $opt_lang_no_ext = 0 unless $opt_lang_no_ext; $opt_follow_links = 0 unless $opt_follow_links; if (defined $opt_diff_timeout) { # if defined but with a value of <= 0, set to 2^31-1 seconds = 68 years $opt_diff_timeout = 2**31-1 unless $opt_diff_timeout > 0; } else { $opt_diff_timeout =10; # seconds } if (defined $opt_timeout) { # if defined but with a value of <= 0, set to 2^31-1 seconds = 68 years $opt_timeout = 2**31-1 unless $opt_timeout > 0; # else is computed dynamically, ref $max_duration_sec } $opt_csv = 0 unless defined $opt_csv; $opt_csv = 1 if $opt_csv_delimiter; $ON_WINDOWS = 1 if $opt_force_on_windows; $ON_WINDOWS = 0 if $opt_force_on_unix; $opt_max_file_size = 100 unless $opt_max_file_size; my $HAVE_SLOCCOUNT_c_count = 0; if (!$ON_WINDOWS and $opt_use_sloccount) { # Only bother doing this kludgey test is user explicitly wants # to use SLOCCount. Debian based systems will hang if just doing # external_utility_exists("c_count") # if c_count is in $PATH; c_count expects to have input. $HAVE_SLOCCOUNT_c_count = external_utility_exists("c_count /bin/sh"); } if ($opt_use_sloccount) { if (!$HAVE_SLOCCOUNT_c_count) { warn "c_count could not be found; ignoring --use-sloccount\n"; $opt_use_sloccount = 0; } else { warn "Using c_count, php_count, xml_count, pascal_count from SLOCCount\n"; warn "--diff is disabled with --use-sloccount\n" if $opt_diff; warn "--count-and-diff is disabled with --use-sloccount\n" if $opt_count_diff; warn "--unicode is disabled with --use-sloccount\n" if $opt_unicode; warn "--strip-comments is disabled with --use-sloccount\n" if $opt_strip_comments; warn "--strip-code is disabled with --use-sloccount\n" if $opt_strip_code; $opt_diff = 0; $opt_count_diff = undef; $opt_unicode = 0; $opt_strip_comments = 0; $opt_strip_code = 0; } } die "--strip-comments and --strip-code are mutually exclusive\n" if $opt_strip_comments and $opt_strip_code; $opt_vcs = 0 if $opt_force_git; # replace Windows path separators with / if ($ON_WINDOWS) { map { s{\\}{/}g } @ARGV; if ($opt_git) { # PowerShell tab expansion automatically prefixes local directories # with ".\" (now mapped to "./"). git ls-files output does not # include this. Strip this prefix to permit clean matches. map { s{^\./}{} } @ARGV; } } my @COUNT_DIFF_ARGV = undef; my $COUNT_DIFF_report_file = undef; if ($opt_count_diff and !$opt_diff_list_file) { die "--count-and-diff requires two arguments; got ", scalar @ARGV, "\n" if scalar @ARGV != 2; # prefix with a dummy term so that $opt_count_diff is the # index into @COUNT_DIFF_ARGV to work on at each pass @COUNT_DIFF_ARGV = (undef, $ARGV[0], $ARGV[1], [$ARGV[0], $ARGV[1]]); # 3rd pass: diff them $COUNT_DIFF_report_file = $opt_report_file if $opt_report_file; } # Options defaults: $opt_quiet = 1 if ($opt_md or $opt_json or !(-t STDOUT)) and !defined $opt_report_file; $opt_progress_rate = 100 unless defined $opt_progress_rate; $opt_progress_rate = 0 if defined $opt_quiet; if (!defined $opt_v) { $opt_v = 0; } elsif (!$opt_v) { $opt_v = 1; } if (defined $opt_xsl) { $opt_xsl = $CLOC_XSL if $opt_xsl eq "1"; $opt_xml = 1; } my $skip_generate_report = 0; $opt_sql_style = 0 unless defined $opt_sql_style; $opt_sql = 0 unless $opt_sql_style or defined $opt_sql; if ($opt_sql eq "-" || $opt_sql eq "1") { # stream SQL output to STDOUT $opt_quiet = 1; $skip_generate_report = 1; $opt_by_file = 1; $opt_sum_reports = 0; $opt_progress_rate = 0; } elsif ($opt_sql) { # write SQL output to a file $opt_by_file = 1; $skip_generate_report = 1; $opt_sum_reports = 0; } if ($opt_sql_style) { $opt_sql_style = lc $opt_sql_style; if (!grep { lc $_ eq $opt_sql_style } qw ( Oracle Named_Columns )) { die "'$opt_sql_style' is not a recognized SQL style.\n"; } } $opt_by_percent = '' unless defined $opt_by_percent; if ($opt_by_percent and $opt_by_percent !~ m/^(c|cm|cb|cmb|t)$/i) { die "--by-percent must be either 'c', 'cm', 'cb', 'cmb', or 't'\n"; } $opt_by_percent = lc $opt_by_percent; $opt_by_percent = 't' if $opt_percent; $opt_txt = 1 if $opt_report_file; my $N_OUTPUT_FORMATS = 0; my %OUTFILE_EXT = ( "txt" => $opt_txt , "csv" => $opt_csv , "xml" => 0 , "yaml" => 0 , "json" => 0 , "md" => 0 , ); $OUTFILE_EXT{"xml" } = 1 if defined($opt_xml ); $OUTFILE_EXT{"yaml"} = 1 if defined($opt_yaml); $OUTFILE_EXT{"json"} = 1 if defined($opt_json); $OUTFILE_EXT{"md" } = 1 if defined($opt_md ); foreach my $out_style (sort keys %OUTFILE_EXT) { ++$N_OUTPUT_FORMATS if $OUTFILE_EXT{$out_style}; } if ($N_OUTPUT_FORMATS >= 2 and $OUTFILE_EXT{"txt"}) { --$N_OUTPUT_FORMATS; $OUTFILE_EXT{"txt"} = 0; } if (!$N_OUTPUT_FORMATS) { $N_OUTPUT_FORMATS = 1; $OUTFILE_EXT{"txt"} = 1; $opt_txt = 1; } if (defined $opt_vcs) { if ($opt_vcs eq "auto") { if (is_dir(".git")) { $opt_vcs = "git"; } elsif (is_dir(".svn")) { $opt_vcs = "svn"; } else { warn "--vcs auto: unable to determine versioning system\n"; } } if ($opt_vcs eq "git") { $opt_vcs = "git -c \"safe.directory=*\" ls-files"; my @submodules = invoke_generator("git -c \"safe.directory=*\" submodule status", \@ARGV); foreach my $SM (@submodules) { $SM =~ s/^\s+//; # may have leading space $SM =~ s/\(\S+\)\s*$//; # may end with something like (heads/master) my ($checksum, $dir) = split(' ', $SM, 2); $dir =~ s/\s+$//; $Exclude_Dir{$dir} = 1; } } elsif ($opt_vcs eq "svn") { $opt_vcs = "svn list -R"; } } my $list_no_autogen = 0; if (defined $opt_no_autogen and scalar @ARGV == 1 and $ARGV[0] eq "list") { $list_no_autogen = 1; } if ($opt_summary_cutoff) { my $error = summary_cutoff_error($opt_summary_cutoff); die "$error\n" if $error; } if (!$opt_config_file) { # if not explicitly given, look for a config file in other # possible locations my $other_loc = check_alternate_config_files($opt_list_file, $opt_exclude_list_file, $opt_read_lang_def, $opt_force_lang_def, $opt_diff_list_file); $opt_config_file = $other_loc if $other_loc; } # --match-d and --not-match-d: if end with a trailing slash, update the # regex to be either slash or end of line since File::Find::find() will # not see the trailing slash in leaf directories (#732, #833). if ($opt_match_d and $opt_match_d =~ m{/$}) { $opt_match_d =~ s{/$}{(/|\$)}; } foreach my $nmd (@opt_not_match_d) { if ($nmd =~ m{/$}) { $nmd =~ s{/$}{(/|\$)}; } } die $brief_usage unless defined $opt_version or defined $opt_show_lang or defined $opt_show_ext or defined $opt_show_os or defined $opt_write_lang_def or defined $opt_write_lang_def_incl_dup or defined $opt_list_file or defined $opt_diff_list_file or defined $opt_vcs or defined $opt_xsl or defined $opt_explain or $list_no_autogen or scalar @ARGV >= 1; if (!$opt_diff_list_file) { die "--diff requires two arguments; got ", scalar @ARGV, "\n" if $opt_diff and !$opt_sum_reports and scalar @ARGV != 2; die "--diff arguments are identical; nothing done", "\n" if $opt_diff and !$opt_sum_reports and scalar @ARGV == 2 and $ARGV[0] eq $ARGV[1]; } my $HAVE_Algorithm_Diff = 0; # Algorithm::Diff isn't in the standard distribution. It will # be installed in a temp directory if necessary. eval "use Algorithm::Diff qw ( sdiff ) "; if (defined $Algorithm::Diff::VERSION) { $HAVE_Algorithm_Diff = 1; } else { Install_Algorithm_Diff(); } trick_pp_packer_encode() if $ON_WINDOWS and $opt_file_encoding; $File::Find::dont_use_nlink = 1 if $opt_stat or top_level_SMB_dir(\@ARGV); my @git_similarity = (); # only populated with --git-diff-simindex my %git_metadata = (); # key is hash, tag, or other git reference; # this has two keys if doing git diff and # both L and R are git references get_git_metadata(\@ARGV, \%git_metadata) if $opt_force_git; #use Data::Dumper; #print Dumper(\%git_metadata); replace_git_hash_with_tarfile(\@ARGV, \@git_similarity); # 1}}} # Step 1: Initialize global constants. {{{1 # my $nFiles_Found = 0; # updated in make_file_list my (%Language_by_Extension, %Language_by_Script, %Filters_by_Language, %Not_Code_Extension, %Not_Code_Filename, %Language_by_File_Type, %Language_by_Filename, %Scale_Factor, %Known_Binary_Archives, %Language_by_Prefix, %EOL_Continuation_re, ); my $ALREADY_SHOWED_HEADER = 0; my $ALREADY_SHOWED_XML_SECTION = 0; my %Error_Codes = ( 'Unable to read' => -1, 'Neither file nor directory' => -2, 'Diff error (quoted comments?)' => -3, 'Diff error, exceeded timeout' => -4, 'Line count, exceeded timeout' => -5, ); my %Extension_Collision = ( 'ADSO/IDSM' => [ 'adso' ] , 'C#/Smalltalk' => [ 'cs' ] , 'D/dtrace' => [ 'd' ] , 'F#/Forth' => [ 'fs' ] , 'Fortran 77/Forth' => [ 'f', 'for' ] , 'IDL/Qt Project/Prolog/ProGuard' => [ 'pro' ] , 'Lisp/Julia' => [ 'jl' ] , 'Lisp/OpenCL' => [ 'cl' ] , 'MATLAB/Mathematica/Objective-C/MUMPS/Mercury' => [ 'm' ] , 'Pascal/Pawn' => [ 'p' ] , 'Pascal/Puppet' => [ 'pp' ] , 'Perl/Prolog' => [ 'pl', 'PL' ] , 'PHP/Pascal/Fortran/Pawn' => [ 'inc' ] , 'Raku/Prolog' => [ 'p6', 'P6' ] , 'XML-Qt-GTK/Glade' => [ 'ui' ] , 'TypeScript/Qt Linguist' => [ 'ts' ] , 'Verilog-SystemVerilog/Coq' => [ 'v' ] , 'Visual Basic/TeX/Apex Class' => [ 'cls' ] , 'Scheme/SaltStack' => [ 'sls' ] , 'SKILL/.NET IL' => [ 'il' ] , 'Clojure/Cangjie' => [ 'cj' ] , ); my @Autogen_to_ignore = no_autogen_files($list_no_autogen); if ($opt_force_lang_def) { # replace cloc's definitions read_lang_def( $opt_force_lang_def , # Sample values: \%Language_by_Extension, # Language_by_Extension{f} = 'Fortran 77' \%Language_by_Script , # Language_by_Script{sh} = 'Bourne Shell' \%Language_by_File_Type , # Language_by_File_Type{makefile} = 'make' \%Filters_by_Language , # Filters_by_Language{Bourne Shell}[0] = # [ 'remove_matches' , '^\s*#' ] \%Not_Code_Extension , # Not_Code_Extension{jpg} = 1 \%Not_Code_Filename , # Not_Code_Filename{README} = 1 \%Scale_Factor , # Scale_Factor{Perl} = 4.0 \%EOL_Continuation_re , # EOL_Continuation_re{C++} = '\\$' ); } else { set_constants( # \%Language_by_Extension, # Language_by_Extension{f} = 'Fortran 77' \%Language_by_Script , # Language_by_Script{sh} = 'Bourne Shell' \%Language_by_File_Type , # Language_by_File_Type{makefile} = 'make' \%Language_by_Prefix , # Language_by_Prefix{Dockerfile} = 'Dockerfile' \%Filters_by_Language , # Filters_by_Language{Bourne Shell}[0] = # [ 'remove_matches' , '^\s*#' ] \%Not_Code_Extension , # Not_Code_Extension{jpg} = 1 \%Not_Code_Filename , # Not_Code_Filename{README} = 1 \%Scale_Factor , # Scale_Factor{Perl} = 4.0 \%Known_Binary_Archives, # Known_Binary_Archives{.tar} = 1 \%EOL_Continuation_re , # EOL_Continuation_re{C++} = '\\$' ); if ($opt_no_autogen) { foreach my $F (@Autogen_to_ignore) { $Not_Code_Filename{ $F } = 1; } } } if ($opt_read_lang_def) { # augment cloc's definitions (keep cloc's where there are overlaps) merge_lang_def( $opt_read_lang_def , # Sample values: \%Language_by_Extension, # Language_by_Extension{f} = 'Fortran 77' \%Language_by_Script , # Language_by_Script{sh} = 'Bourne Shell' \%Language_by_File_Type , # Language_by_File_Type{makefile} = 'make' \%Filters_by_Language , # Filters_by_Language{Bourne Shell}[0] = # [ 'remove_matches' , '^\s*#' ] \%Not_Code_Extension , # Not_Code_Extension{jpg} = 1 \%Not_Code_Filename , # Not_Code_Filename{README} = 1 \%Scale_Factor , # Scale_Factor{Perl} = 4.0 \%EOL_Continuation_re , # EOL_Continuation_re{C++} = '\\$' ); } if ($opt_lang_no_ext and !defined $Filters_by_Language{$opt_lang_no_ext}) { die_unknown_lang($opt_lang_no_ext, "--lang-no-ext") } check_scale_existence(\%Filters_by_Language, \%Language_by_Extension, \%Scale_Factor); parse_ignore_regex(\@opt_ignore_regex, \%Filters_by_Language, \%ignore_regex) if @opt_ignore_regex; my $nCounted = 0; # Process command line provided extension-to-language mapping overrides. # Make a hash of known languages in lower case for easier matching. my %Recognized_Language_lc = (); # key = language name in lc, value = true name foreach my $language (keys %Filters_by_Language) { my $lang_lc = lc $language; $Recognized_Language_lc{$lang_lc} = $language; } my %Forced_Extension = (); # file name extensions which user wants to count my $All_One_Language = 0; # set to !0 if --force-lang's is missing foreach my $pair (@opt_force_lang) { my ($lang, $extension) = split(',', $pair); my $lang_lc = lc $lang; if (defined $extension) { $Forced_Extension{$extension} = $lang; die_unknown_lang($lang, "--force-lang") unless $Recognized_Language_lc{$lang_lc}; $Language_by_Extension{$extension} = $Recognized_Language_lc{$lang_lc}; } else { # the scary case--count everything as this language $All_One_Language = $Recognized_Language_lc{$lang_lc}; } } foreach my $pair (@opt_script_lang) { my ($lang, $script_name) = split(',', $pair); my $lang_lc = lc $lang; if (!defined $script_name) { die "The --script-lang option requires a comma separated pair of ". "strings.\n"; } die_unknown_lang($lang, "--script-lang") unless $Recognized_Language_lc{$lang_lc}; $Language_by_Script{$script_name} = $Recognized_Language_lc{$lang_lc}; } # If user provided a language definition file, make sure those # extensions aren't rejected. foreach my $ext (%Language_by_Extension) { next unless defined $Not_Code_Extension{$ext}; delete $Not_Code_Extension{$ext}; } # If user provided file extensions to ignore, add these to # the exclusion list. foreach my $ext (map { $_ => 1 } split(/,/, $opt_exclude_ext ) ) { $ext = lc $ext if $ON_WINDOWS or $opt_ignore_case_ext; $Not_Code_Extension{$ext} = 1; } # If SQL or --by-file output is requested, keep track of directory names # generated by File::Temp::tempdir and used to temporarily hold the results # of compressed archives. Contents of the SQL table 't' will be much # cleaner if these meaningless directory names are stripped from the front # of files pulled from the archives. my %TEMP_DIR = (); my $TEMP_OFF = 0; # Needed for --sdir; keep track of the number of # scratch directories made in this run to avoid # file overwrites by multiple extractions to same # sdir. # Also track locations where temporary installations, if necessary, of # Algorithm::Diff and/or Regexp::Common are done. Make sure these # directories are not counted as inputs (ref bug #80 2012-11-23). my %TEMP_INST = (); # invert %Language_by_Script hash to get an easy-to-look-up list of known # scripting languages my %Script_Language = map { $_ => 1 } values %Language_by_Script ; # 1}}} # Step 2: Early exits for display, summation. {{{1 # print_extension_info( $opt_show_ext ) if defined $opt_show_ext ; print_language_info( $opt_show_lang, '') if defined $opt_show_lang; print_language_filters( $opt_explain ) if defined $opt_explain ; exit if (defined $opt_show_ext) or (defined $opt_show_lang) or (defined $opt_explain) or $list_no_autogen; Top_of_Processing_Loop: # Sorry, coding purists. Using a goto to implement --count-and-diff # which has to do three passes over the main code, starting with # a clean slate each time. if ($opt_count_diff) { @ARGV = ( $COUNT_DIFF_ARGV[ $opt_count_diff ] ); if ($opt_count_diff == 3) { $opt_diff = 1; @ARGV = @{$COUNT_DIFF_ARGV[ $opt_count_diff ]}; # last arg is list of list } elsif ($opt_diff_list_files) { $opt_diff = 0; } if ($opt_report_file) { # Instead of just one output file, will have three. # Keep their names unique otherwise results are clobbered. # Replace file path separators with underscores otherwise # may end up with illegal file names. my ($fn_0, $fn_1) = (undef, undef); if ($ON_WINDOWS) { ($fn_0 = $ARGV[0]) =~ s{\\}{_}g; $fn_0 =~ s{:}{_}g; $fn_0 =~ s{/}{_}g; ($fn_1 = $ARGV[1]) =~ s{\\}{_}g if defined $ARGV[1]; $fn_1 =~ s{:}{_}g if defined $ARGV[1]; $fn_1 =~ s{/}{_}g if defined $ARGV[1]; } else { ($fn_0 = $ARGV[0]) =~ s{/}{_}g; ($fn_1 = $ARGV[1]) =~ s{/}{_}g if defined $ARGV[1]; } if ($opt_count_diff == 3) { $opt_report_file = $COUNT_DIFF_report_file . ".diff.$fn_0.$fn_1"; } else { $opt_report_file = $COUNT_DIFF_report_file . ".$fn_0"; } } else { # STDOUT; print a header showing what it's working on if ($opt_count_diff == 3) { print "\ndiff $ARGV[0] $ARGV[1]::\n"; } else { print "\n" if $opt_count_diff > 1; print "$ARGV[0]::\n"; } } $ALREADY_SHOWED_HEADER = 0; $ALREADY_SHOWED_XML_SECTION = 0; } #print "Before glob have [", join(",", @ARGV), "]\n"; @ARGV = windows_glob(@ARGV) if $ON_WINDOWS; #print "after glob have [", join(",", @ARGV), "]\n"; # filter out archive files if requested to do so if (defined $opt_skip_archive) { my @non_archive = (); foreach my $candidate (@ARGV) { if ($candidate !~ m/${opt_skip_archive}$/) { push @non_archive, $candidate; } } @ARGV = @non_archive; } if ($opt_sum_reports and $opt_diff) { my @results = (); if ($opt_csv and !defined($opt_csv_delimiter)) { $opt_csv_delimiter = ","; } if ($opt_list_file) { # read inputs from the list file my @list = read_list_file($opt_list_file); if ($opt_csv) { @results = combine_csv_diffs($opt_csv_delimiter, \@list); } else { @results = combine_diffs(\@list); } } elsif ($opt_vcs) { # read inputs from the VCS generator my @list = invoke_generator($opt_vcs, \@ARGV); if ($opt_csv) { @results = combine_csv_diffs($opt_csv_delimiter, \@list); } else { @results = combine_diffs(\@list); } } else { # get inputs from the command line if ($opt_csv) { @results = combine_csv_diffs($opt_csv_delimiter, \@ARGV); } else { @results = combine_diffs(\@ARGV); } } if ($opt_report_file) { write_file($opt_report_file, {}, @results); } else { print "\n", join("\n", @results), "\n"; } exit; } if ($opt_sum_reports) { my %Results = (); foreach my $type( "by language", "by report file" ) { my $found_lang = undef; if ($opt_list_file or $opt_vcs) { # read inputs from the list file my @list; if ($opt_vcs) { @list = invoke_generator($opt_vcs, \@ARGV); } else { @list = read_list_file($opt_list_file); } $found_lang = combine_results(\@list, $type, \%{$Results{ $type }}, \%Filters_by_Language ); } else { # get inputs from the command line $found_lang = combine_results(\@ARGV, $type, \%{$Results{ $type }}, \%Filters_by_Language ); } next unless %Results; my $end_time = get_time(); my @results = generate_report($VERSION, $end_time - $start_time, $type, \%{$Results{ $type }}, \%Scale_Factor, "txt"); if ($opt_report_file) { my $ext = ".lang"; $ext = ".file" unless $type eq "by language"; next if !$found_lang and $ext eq ".lang"; write_file($opt_report_file . $ext, {}, @results); } else { print "\n", join("\n", @results), "\n"; } } exit; } if ($opt_write_lang_def or $opt_write_lang_def_incl_dup) { my $file = $opt_write_lang_def if $opt_write_lang_def; $file = $opt_write_lang_def_incl_dup if $opt_write_lang_def_incl_dup; write_lang_def($file , \%Language_by_Extension, \%Language_by_Script , \%Language_by_File_Type , \%Filters_by_Language , \%Not_Code_Extension , \%Not_Code_Filename , \%Scale_Factor , \%EOL_Continuation_re , ); exit; } if ($opt_show_os) { if ($ON_WINDOWS) { print "Windows\n"; } else { print "UNIX\n"; } exit; } my $max_processes = get_max_processes(); # 1}}} # Step 3: Create a list of files to consider. {{{1 # a) If inputs are binary archives, first cd to a temp # directory, expand the archive with the user-given # extraction tool, then add the temp directory to # the list of dirs to process. # b) Create a list of every file that might contain source # code. Ignore binary files, zero-sized files, and # any file in a directory the user says to exclude. # c) Determine the language for each file in the list. # my @binary_archive = (); my $cwd = cwd(); if ($opt_extract_with) { #print "cwd main = [$cwd]\n"; my @extract_location = (); foreach my $bin_file (@ARGV) { my $extract_dir = undef; if ($opt_sdir) { ++$TEMP_OFF; $extract_dir = "$opt_sdir/$TEMP_OFF"; File::Path::rmtree($extract_dir) if is_dir($extract_dir); File::Path::mkpath($extract_dir) unless is_dir($extract_dir); } else { $extract_dir = tempdir( CLEANUP => 1 ); # 1 = delete on exit } $TEMP_DIR{ $extract_dir } = 1 if $opt_sql or $opt_by_file; print "mkdir $extract_dir\n" if $opt_v; print "cd $extract_dir\n" if $opt_v; chdir $extract_dir; my $bin_file_full_path = ""; if (File::Spec->file_name_is_absolute( $bin_file )) { $bin_file_full_path = $bin_file; #print "bin_file_full_path (was ful) = [$bin_file_full_path]\n"; } else { $bin_file_full_path = File::Spec->catfile( $cwd, $bin_file ); #print "bin_file_full_path (was rel) = [$bin_file_full_path]\n"; } my $extract_cmd = uncompress_archive_cmd($bin_file_full_path); print $extract_cmd, "\n" if $opt_v; system $extract_cmd; push @extract_location, $extract_dir; chdir $cwd; } # It is possible that the binary archive itself contains additional # files compressed the same way (true for Java .ear files). Go # through all the files that were extracted, see if they are binary # archives and try to extract them. Lather, rinse, repeat. my $binary_archives_exist = 1; my $count_binary_archives = 0; my $previous_count = 0; my $n_pass = 0; while ($binary_archives_exist) { @binary_archive = (); foreach my $dir (@extract_location) { find(\&archive_files, $dir); # populates global @binary_archive } foreach my $archive (@binary_archive) { my $extract_dir = undef; if ($opt_sdir) { ++$TEMP_OFF; $extract_dir = "$opt_sdir/$TEMP_OFF"; File::Path::rmtree($extract_dir) if is_dir($extract_dir); File::Path::mkpath($extract_dir) unless is_dir($extract_dir); } else { $extract_dir = tempdir( CLEANUP => 1 ); # 1 = delete on exit } $TEMP_DIR{ $extract_dir } = 1 if $opt_sql or $opt_by_file; print "mkdir $extract_dir\n" if $opt_v; print "cd $extract_dir\n" if $opt_v; chdir $extract_dir; my $extract_cmd = uncompress_archive_cmd($archive); print $extract_cmd, "\n" if $opt_v; system $extract_cmd; push @extract_location, $extract_dir; unlink $archive; # otherwise will be extracting it forever } $count_binary_archives = scalar @binary_archive; if ($count_binary_archives == $previous_count) { $binary_archives_exist = 0; } $previous_count = $count_binary_archives; } chdir $cwd; @ARGV = @extract_location; } else { # see if any of the inputs need to be auto-uncompressed &/or expanded my @updated_ARGS = (); replace_git_hash_with_tarfile(\@ARGV, \@git_similarity) if $opt_force_git; foreach my $Arg (@ARGV) { if (is_dir($Arg)) { push @updated_ARGS, $Arg; next; } my $full_path = ""; if (File::Spec->file_name_is_absolute( $Arg )) { $full_path = $Arg; } else { $full_path = File::Spec->catfile( $cwd, $Arg ); } #print "full_path = [$full_path]\n"; my $extract_cmd = uncompress_archive_cmd($full_path); if ($extract_cmd) { my $extract_dir = undef; if ($opt_sdir) { ++$TEMP_OFF; $extract_dir = "$opt_sdir/$TEMP_OFF"; File::Path::rmtree($extract_dir) if is_dir($extract_dir); File::Path::mkpath($extract_dir) unless is_dir($extract_dir); } else { $extract_dir = tempdir( CLEANUP => 1 ); # 1 = delete on exit } $TEMP_DIR{ $extract_dir } = 1 if $opt_sql or $opt_by_file; print "mkdir $extract_dir\n" if $opt_v; print "cd $extract_dir\n" if $opt_v; chdir $extract_dir; print $extract_cmd, "\n" if $opt_v; system $extract_cmd; push @updated_ARGS, $extract_dir; chdir $cwd; } else { # this is a conventional, uncompressed, unarchived file # or a directory; keep as-is push @updated_ARGS, $Arg; } } @ARGV = @updated_ARGS; # make sure we're not counting any directory containing # temporary installations of Regexp::Common, Algorithm::Diff foreach my $d (sort keys %TEMP_INST) { foreach my $a (@ARGV) { next unless is_dir($a); if ($opt_v > 2) { printf "Comparing %s (location of %s) to input [%s]\n", $d, $TEMP_INST{$d}, $a; } if ($a eq $d) { die "File::Temp::tempdir chose directory ", $d, " to install ", $TEMP_INST{$d}, " but this ", "matches one of your input directories. Rerun ", "with --sdir and supply a different temporary ", "directory for ", $TEMP_INST{$d}, "\n"; } } } } # 1}}} my @Errors = (); my @file_list = (); # global variable updated in files() my %Ignored = (); # files that are not counted (language not recognized or # problems reading the file) my %upper_lower_map = (); # global variable (needed only on Windows) to # track case of original filename, populated in # make_file_list() if $ON_WINDOWS if ($opt_diff) { # Step 4: Separate code from non-code files. {{{1 my @fh = (); my @files_for_set = (); my @files_added_tot = (); my @files_removed_tot = (); my @file_pairs_tot = (); # make file lists for each separate argument if ($opt_diff_list_file) { @files_for_set = ( (), () ); file_pairs_from_file($opt_diff_list_file, # in \@files_added_tot , # out \@files_removed_tot , # out \@file_pairs_tot , # out ); foreach my $F (@files_added_tot) { if ($ON_WINDOWS) { (my $lc = lc $F) =~ s{\\}{/}g; $upper_lower_map{$lc} = $F; $F = $lc; } push @{$files_for_set[1]}, $F; } foreach my $F (@files_removed_tot) { if ($ON_WINDOWS) { (my $lc = lc $F) =~ s{\\}{/}g; $upper_lower_map{$lc} = $F; $F = $lc; } push @{$files_for_set[0]}, $F; } foreach my $pair (@file_pairs_tot) { if ($ON_WINDOWS) { push @{$files_for_set[0]}, lc $pair->[0]; push @{$files_for_set[1]}, lc $pair->[1]; } else { push @{$files_for_set[0]}, $pair->[0]; push @{$files_for_set[1]}, $pair->[1]; } } @ARGV = (1, 2); # place holders } for (my $i = 0; $i < scalar @ARGV; $i++) { if ($opt_diff_list_file) { push @fh, make_file_list($files_for_set[$i], $i+1, \%Error_Codes, \@Errors, \%Ignored); @{$files_for_set[$i]} = @file_list; } elsif ($opt_diff_list_files) { my @list_files = read_list_file($ARGV[$i]); push @fh, make_file_list(\@list_files, $i+1, \%Error_Codes, \@Errors, \%Ignored); @{$files_for_set[$i]} = @file_list; } else { push @fh, make_file_list([ $ARGV[$i] ], $i+1, \%Error_Codes, \@Errors, \%Ignored); @{$files_for_set[$i]} = @file_list; } if ($opt_exclude_list_file) { # note: process_exclude_list_file() references global @file_list process_exclude_list_file($opt_exclude_list_file, \%Exclude_Dir, \%Ignored); } if ($opt_no_autogen) { exclude_autogenerated_files(\@{$files_for_set[$i]}, # in/out \%Error_Codes, \@Errors, \%Ignored); } @file_list = (); } # 1}}} # Step 5: Remove duplicate files. {{{1 # my %Language = (); my %unique_source_file = (); my $n_set = 0; foreach my $FH (@fh) { # loop over each pair of file sets ++$n_set; remove_duplicate_files($FH, \%{$Language{$FH}} , \%{$unique_source_file{$FH}} , \%Error_Codes , \@Errors , \%Ignored ); if ($opt_exclude_content) { exclude_by_regex($opt_exclude_content, # in \%{$unique_source_file{$FH}}, # in/out \%Ignored); # out } elsif ($opt_include_content) { include_by_regex($opt_include_content, # in \%{$unique_source_file{$FH}}, # in/out \%Ignored); # out } if ($opt_include_lang) { # remove files associated with languages not # specified by --include-lang my @delete_file = (); foreach my $file (keys %{$unique_source_file{$FH}}) { my $keep_file = 0; foreach my $keep_lang (keys %Include_Language) { if (lc($Language{$FH}{$file}) eq $keep_lang) { $keep_file = 1; last; } } next if $keep_file; push @delete_file, $file; } foreach my $file (@delete_file) { delete $Language{$FH}{$file}; } } printf "%2d: %8d unique file%s. \r", $n_set, plural_form(scalar keys %unique_source_file) unless $opt_quiet; my $suffix = $n_set == 1 ? "L" : "R"; write_file("${opt_unique}_${suffix}", {}, sort keys %unique_source_file) if $opt_unique; } # 1}}} # Step 6: Count code, comments, blank lines. {{{1 # my %Results_by_Language = (); my %Results_by_File = (); my %Delta_by_Language = (); my %Delta_by_File = (); my %Renamed = (); my %alignment = (); my $fset_a = $fh[0]; my $fset_b = $fh[1]; my $n_filepairs_compared = 0; my $tot_counted = 0; if ( scalar @fh != 2 ) { print "Error: incorrect length fh array when preparing diff at step 6.\n"; exit 1; } if ($opt_git_diff_rel and (scalar(keys %git_metadata) == 2)) { # --git --diff with both L and R as git references align_from_git($ARGV[0] , # in, before tag $ARGV[1] , # in, after tag \@files_added_tot , # out \@files_removed_tot , # out \@file_pairs_tot , # out \%Renamed , # out ); } elsif (!$opt_diff_list_file) { align_by_pairs(\%{$unique_source_file{$fset_a}} , # in \%{$unique_source_file{$fset_b}} , # in \@files_added_tot , # out \@files_removed_tot , # out \@file_pairs_tot , # out ); } #use Data::Dumper; #print "in files L : ", Dumper($unique_source_file{$fset_a}); #print "in files R : ", Dumper($unique_source_file{$fset_b}); #print "added : ", Dumper(\@files_added_tot); #print "removed : ", Dumper(\@files_removed_tot); #print "pairs : ", Dumper(\@file_pairs_tot); if ( $max_processes == 0) { # Multiprocessing is disabled my $part = count_filesets ( $fset_a, $fset_b, \@files_added_tot, \@files_removed_tot, \@file_pairs_tot, 0, \%Language, \%ignore_regex, \%Ignored); %Results_by_File = %{$part->{'results_by_file'}}; %Results_by_Language= %{$part->{'results_by_language'}}; %Delta_by_File = %{$part->{'delta_by_file'}}; %Delta_by_Language= %{$part->{'delta_by_language'}}; %Ignored = ( %Ignored, %{$part->{'ignored'}}); %alignment = %{$part->{'alignment'}}; $n_filepairs_compared = $part->{'n_filepairs_compared'}; push ( @Errors, @{$part->{'errors'}}); } else { # Multiprocessing is enabled # Do not create more processes than the amount of data to be processed my $num_processes = min(max(scalar @files_added_tot, scalar @files_removed_tot, scalar @file_pairs_tot), $max_processes); # ... but use at least one process. $num_processes = 1 if $num_processes == 0; # Start processes for counting my $pm = Parallel::ForkManager->new($num_processes); # When processes finish, they will use the embedded subroutine for # merging the data into global variables. $pm->run_on_finish ( sub { my ($pid, $exit_code, $ident, $exit_signal, $core_dump, $part) = @_; my $part_ignored = $part->{'ignored'}; my $part_result_by_file = $part->{'results_by_file'}; my $part_result_by_language = $part->{'results_by_language'}; my $part_delta_by_file = $part->{'delta_by_file'}; my $part_delta_by_language = $part->{'delta_by_language'}; my $part_alignment = $part->{'alignment'}; my $part_errors = $part->{'errors'}; $tot_counted += scalar keys %$part_result_by_file; $n_filepairs_compared += $part->{'n_filepairs_compared'}; # Since files are processed by multiple processes, we can't measure # the number of processed files exactly. We approximate this by showing # the number of files counted by finished processes. printf "Counting: %d\r", $tot_counted if $opt_progress_rate; foreach my $this_language ( keys %$part_result_by_language ) { my $counts = $part_result_by_language->{$this_language}; foreach my $inner_key ( keys %$counts ) { $Results_by_Language{$this_language}{$inner_key} += $counts->{$inner_key}; } } foreach my $this_language ( keys %$part_delta_by_language ) { my $counts = $part_delta_by_language->{$this_language}; foreach my $inner_key ( keys %$counts ) { my $statuses = $counts->{$inner_key}; foreach my $inner_status ( keys %$statuses ) { $Delta_by_Language{$this_language}{$inner_key}{$inner_status} += $counts->{$inner_key}->{$inner_status}; } } } foreach my $label ( keys %$part_alignment ) { my $inner = $part_alignment->{$label}; foreach my $key ( keys %$inner ) { $alignment{$label}{$key} = 1; } } %Results_by_File = ( %Results_by_File, %$part_result_by_file ); %Delta_by_File = ( %Delta_by_File, %$part_delta_by_file ); %Ignored = (%Ignored, %$part_ignored ); push ( @Errors, @$part_errors ); } ); my $num_filepairs_per_part = ceil ( ( scalar @file_pairs_tot ) / $num_processes ); my $num_filesremoved_per_part = ceil ( ( scalar @files_removed_tot ) / $num_processes ); my $num_filesadded_per_part = ceil ( ( scalar @files_added_tot ) / $num_processes ); while ( 1 ) { my @files_added_part = splice @files_added_tot, 0, $num_filesadded_per_part; my @files_removed_part = splice @files_removed_tot, 0, $num_filesremoved_per_part; my @filepairs_part = splice @file_pairs_tot, 0, $num_filepairs_per_part; if ( scalar @files_added_part == 0 and scalar @files_removed_part == 0 and scalar @filepairs_part == 0 ) { last; } $pm->start() and next; my $count_result = count_filesets ( $fset_a, $fset_b, \@files_added_part, \@files_removed_part, \@filepairs_part, 1, \%Language, \%ignore_regex, \%Ignored ); $pm->finish(0 , $count_result); } # Wait for processes to finish $pm->wait_all_children(); } # Write alignment data if needed requested if ($opt_diff_alignment) { write_alignment_data ( $opt_diff_alignment, $n_filepairs_compared, \%alignment ) ; } my $separator = defined $opt_csv_delimiter ? $opt_csv_delimiter : ": "; my @ignored_reasons = map { "${_}${separator} $Ignored{$_}" } sort keys %Ignored; write_file($opt_ignored, {"file_type" => "ignored", "separator" => ": ", "columns" => ["file", "reason"], }, @ignored_reasons ) if $opt_ignored; write_file($opt_counted, {}, sort keys %Results_by_File) if $opt_counted; # 1}}} # Step 7: Assemble results. {{{1 # my $end_time = get_time(); printf "%8d file%s ignored. \n", plural_form(scalar keys %Ignored) unless $opt_quiet; print_errors(\%Error_Codes, \@Errors) if @Errors; if (!%Delta_by_Language) { print "Nothing to count.\n"; exit; } generate_diff_sql($end_time - $start_time, \%Delta_by_File) if $opt_sql; exit if $skip_generate_report; foreach my $out_style (sort keys %OUTFILE_EXT) { next unless $OUTFILE_EXT{$out_style}; my @Lines_Out = (); if ($opt_by_file) { @Lines_Out = diff_report($VERSION, $end_time - $start_time, "by file", \%Delta_by_File, \%Scale_Factor, \%Renamed); } else { @Lines_Out = diff_report($VERSION, $end_time - $start_time, "by language", \%Delta_by_Language, \%Scale_Factor, undef); } my $outfile = $opt_report_file; produce_output(\@Lines_Out, $opt_report_file, $out_style); } # 1}}} } else { # Step 4: Separate code from non-code files. {{{1 my $fh = 0; if ($opt_list_file or $opt_diff_list_files or $opt_vcs) { my @list; if ($opt_vcs) { @list = invoke_generator($opt_vcs, \@ARGV); } elsif ($opt_list_file) { @list = read_list_file($opt_list_file); } else { @list = read_list_file($ARGV[0]); } $fh = make_file_list(\@list, 0, \%Error_Codes, \@Errors, \%Ignored); } else { $fh = make_file_list(\@ARGV, 0, \%Error_Codes, \@Errors, \%Ignored); # make_file_list populates global variable @file_list via call to # File::Find's find() which in turn calls files() } if ($opt_exclude_list_file) { # note: process_exclude_list_file() references global @file_list process_exclude_list_file($opt_exclude_list_file, \%Exclude_Dir, \%Ignored); } if ($opt_skip_win_hidden and $ON_WINDOWS) { my @file_list_minus_hidden = (); # eval code to run on Unix without 'missing Win32::File module' error. my $win32_file_invocation = ' use Win32::File; foreach my $F (@file_list) { my $attr = undef; Win32::File::GetAttributes($F, $attr); if ($attr & HIDDEN) { $Ignored{$F} = "Windows hidden file"; print "Ignoring $F since it is a Windows hidden file\n" if $opt_v > 1; } else { push @file_list_minus_hidden, $F; } }'; eval $win32_file_invocation; @file_list = @file_list_minus_hidden; } if ($opt_no_autogen) { exclude_autogenerated_files(\@file_list, # in/out \%Error_Codes, \@Errors, \%Ignored); } #printf "%8d file%s excluded. \n", # plural_form(scalar keys %Ignored) # unless $opt_quiet; # die print ": ", join("\n: ", @file_list), "\n"; # 1}}} # Step 5: Remove duplicate files. {{{1 # my %Language = (); my %unique_source_file = (); remove_duplicate_files($fh , # in \%Language , # out \%unique_source_file , # out \%Error_Codes , # in \@Errors , # out \%Ignored ); # out if ($opt_exclude_content) { exclude_by_regex($opt_exclude_content, # in \%unique_source_file , # in/out \%Ignored); # out } elsif ($opt_include_content) { include_by_regex($opt_include_content, # in \%unique_source_file , # in/out \%Ignored); # out } printf "%8d unique file%s. \n", plural_form(scalar keys %unique_source_file) unless $opt_quiet; write_file($opt_unique, {}, sort keys %unique_source_file) if $opt_unique; # 1}}} # Step 6: Count code, comments, blank lines. {{{1 # my %Results_by_Language = (); my %Results_by_File = (); my @results_parts = (); my @sorted_files = sort keys %unique_source_file; if ( $max_processes == 0) { # Multiprocessing is disabled my $part = count_files ( \@sorted_files , 0, \%ignore_regex, \%Language); %Results_by_File = %{$part->{'results_by_file'}}; %Results_by_Language= %{$part->{'results_by_language'}}; %Ignored = ( %Ignored, %{$part->{'ignored'}}); push ( @Errors, @{$part->{'errors'}}); } else { # Do not create more processes than the number of files to be processed my $num_files = scalar @sorted_files; my $num_processes = $num_files >= $max_processes ? $max_processes : $num_files; # Use at least one process. $num_processes = 1 if $num_processes == 0; # Start processes for counting my $pm = Parallel::ForkManager->new($num_processes); # When processes finish, they will use the embedded subroutine for # merging the data into global variables. $pm->run_on_finish ( sub { my ($pid, $exit_code, $ident, $exit_signal, $core_dump, $part) = @_; my $part_ignored = $part->{'ignored'}; my $part_result_by_file = $part->{'results_by_file'}; my $part_result_by_language = $part->{'results_by_language'}; my $part_errors = $part->{'errors'}; my $nCounted+= scalar keys %$part_result_by_file; # Since files are processed by multiple processes, we can't measure # the number of processed files exactly. We approximate this by showing # the number of files counted by finished processes. printf "Counting: %d\r", $nCounted if $opt_progress_rate; foreach my $this_language ( keys %$part_result_by_language ) { my $counts = $part_result_by_language->{$this_language}; foreach my $inner_key ( keys %$counts ) { $Results_by_Language{$this_language}{$inner_key} += $counts->{$inner_key}; } } %Results_by_File = ( %Results_by_File, %$part_result_by_file ); %Ignored = (%Ignored, %$part_ignored); push ( @Errors, @$part_errors); } ); my $num_files_per_part = ceil ( ( scalar @sorted_files ) / $num_processes ); while ( my @part = splice @sorted_files, 0 , $num_files_per_part ) { $pm->start() and next; my $count_result = count_files ( \@part, 1, \%ignore_regex, \%Language ); $pm->finish(0 , $count_result); } # Wait for processes to finish $pm->wait_all_children(); } my $separator = defined $opt_csv_delimiter ? $opt_csv_delimiter : ": "; my @ignored_reasons = map { "${_}${separator} $Ignored{$_}" } sort keys %Ignored; write_file($opt_ignored, {"file_type" => "ignored", "separator" => $separator, "columns" => ["file", "reason"], }, @ignored_reasons ) if $opt_ignored; if ($opt_summary_cutoff) { %Results_by_Language = apply_cutoff($opt_summary_cutoff, \%Results_by_Language); } write_file($opt_counted, {}, sort keys %Results_by_File) if $opt_counted; # 1}}} # Step 7: Assemble results. {{{1 # my $end_time = get_time(); printf "%8d file%s ignored.\n", plural_form(scalar keys %Ignored) unless $opt_quiet; print_errors(\%Error_Codes, \@Errors) if @Errors; if (!%Results_by_Language) { write_null_results($opt_json, $opt_xml, $opt_report_file); exit; } generate_sql($end_time - $start_time, \%Results_by_File, \%Scale_Factor) if $opt_sql; exit if $skip_generate_report; foreach my $out_style (sort keys %OUTFILE_EXT) { next unless $OUTFILE_EXT{$out_style}; my @Lines_Out = (); if ($opt_by_file_by_lang) { push @Lines_Out, generate_report( $VERSION, $end_time - $start_time, "by file", \%Results_by_File, \%Scale_Factor, $out_style); push @Lines_Out, generate_report( $VERSION, $end_time - $start_time, "by language", \%Results_by_Language, \%Scale_Factor, $out_style); } elsif ($opt_by_file) { push @Lines_Out, generate_report( $VERSION, $end_time - $start_time, "by file", \%Results_by_File, \%Scale_Factor, $out_style); } else { push @Lines_Out, generate_report( $VERSION, $end_time - $start_time, "by language", \%Results_by_Language, \%Scale_Factor, $out_style); } produce_output(\@Lines_Out, $opt_report_file, $out_style); } # 1}}} } if ($opt_count_diff) { ++$opt_count_diff; exit if $opt_count_diff > 3; goto Top_of_Processing_Loop; } suggest_remedies_for_errors(\@Errors) if @Errors; # - - - - - - - - - - - - - - - end of main - - - - - - - - - - - - - - - # sub produce_output { # {{{1 my ($ra_Lines_Out, $outfile, $out_style) = @_; if ($opt_fmt) { my $json_string = ""; write_file(\$json_string, {}, @{$ra_Lines_Out}); my ($file_len, $lang_len, $header, %contents) = load_json($json_string); @{$ra_Lines_Out} = print_format_n(abs($opt_fmt), $file_len, $lang_len, $header, %contents); } if ($outfile) { my $adjusted_outfile = $outfile; if ($N_OUTPUT_FORMATS > 1 and ($opt_report_file !~ /\.${out_style}$/)) { # ensure the output filename has a suitable extension $adjusted_outfile .= "." . $out_style; } write_file($adjusted_outfile, {}, @{$ra_Lines_Out}); } else { if ($opt_fmt) { print "@{$ra_Lines_Out}"; } else { print "\n" unless $opt_quiet; print join("\n", @{$ra_Lines_Out}), "\n"; } } } # 1}}} sub suggest_remedies_for_errors { # {{{1 my ($raa_errors) = @_; # [ [ numeric error code, filename], .. ] my $hit_timeout = 0; foreach my $pair (@{$raa_errors}) { my ($code, $file) = @{$pair}; $hit_timeout = $code == -5 ? 1 : $hit_timeout; } if ($hit_timeout) { print "\n"; print "One or more files took longer to process than expected.\n"; print "Try rerunning without timeout guards by adding --timeout 0\n"; print "to your command line arguments. See the documentation on\n"; print "the --timeout switch for more information.\n"; print "\n"; } } # 1}}} sub brief_usage { # {{{1 return " cloc -- Count Lines of Code Usage: $script [options] Count physical lines of source code and comments in the given files (may be archives such as compressed tarballs or zip files) and/or recursively below the given directories or git commit hashes. Example: cloc src/ include/ main.c $script [options] --diff Compute differences of physical lines of source code and comments between any pairwise combination of directory names, archive files or git commit hashes. Example: cloc --diff Python-3.5.tar.xz python-3.6/ $script --help shows full documentation on the options. https://$URL has numerous examples and more information. "; } # 1}}} sub long_usage { # {{{1 return " Usage: $script [options] | | Count, or compute differences of, physical lines of source code in the given files (may be archives such as compressed tarballs or zip files, or git commit hashes or branch names) and/or recursively below the given directories. ${BB}Input Options${NN} --extract-with= This option is only needed if cloc is unable to figure out how to extract the contents of the input file(s) by itself. Use to extract binary archive files (e.g.: .tar.gz, .zip, .Z). Use the literal '>FILE<' as a stand-in for the actual file(s) to be extracted. For example, to count lines of code in the input files gcc-4.2.tar.gz perl-5.8.8.tar.gz on Unix use --extract-with='gzip -dc >FILE< | tar xf -' or, if you have GNU tar, --extract-with='tar zxf >FILE<' and on Windows use, for example: --extract-with=\"\\\"c:\\Program Files\\WinZip\\WinZip32.exe\\\" -e -o >FILE< .\" (if WinZip is installed there). --list-file= Take the list of file and/or directory names to process from , which has one file/directory name per line. Only exact matches are counted; relative path names will be resolved starting from the directory where cloc is invoked. Set to - to read file names from a STDIN pipe. See also --exclude-list-file, --config. --diff-list-file= Take the pairs of file names to be diff'ed from , whose format matches the output of --diff-alignment. (Run with that option to see a sample.) The language identifier at the end of each line is ignored. This enables --diff mode and bypasses file pair alignment logic. Use --diff-list-files to define the file name pairs in separate files. See also --config. --diff-list-files Compute differences in code and comments between the files and directories listed in and . Each input file should use the same format as --list-file, where there is one file or directory name per line. Only exact matches are counted; relative path names will be resolved starting from the directory where cloc is invoked. This enables --diff mode. See also --list-file, --diff-list-file, --diff. --vcs= Invoke a system call to to obtain a list of files to work on. If is 'git', then will invoke 'git ls-files' to get a file list and 'git submodule status' to get a list of submodules whose contents will be ignored. See also --git which accepts git commit hashes and branch names. If is 'svn' then will invoke 'svn list -R'. The primary benefit is that cloc will then skip files explicitly excluded by the versioning tool in question, ie, those in .gitignore or have the svn:ignore property. Alternatively may be any system command that generates a list of files. Note: cloc must be in a directory which can read the files as they are returned by . cloc will not download files from remote repositories. 'svn list -R' may refer to a remote repository to obtain file names (and therefore may require authentication to the remote repository), but the files themselves must be local. Setting to 'auto' selects between 'git' and 'svn' (or neither) depending on the presence of a .git or .svn subdirectory below the directory where cloc is invoked. --unicode Check binary files to see if they contain Unicode expanded ASCII text. This causes performance to drop noticeably. ${BB}Processing Options${NN} --autoconf Count .in files (as processed by GNU autoconf) of recognized languages. See also --no-autogen. --by-file Report results for every source file encountered. See also --fmt under 'Output Options'. --by-file-by-lang Report results for every source file encountered in addition to reporting by language. --config Read command line switches from instead of the default location of $config_file. The file should contain one switch, along with arguments (if any), per line. Blank lines and lines beginning with '#' are skipped. Options given on the command line take priority over entries read from the file. If a directory is also given with any of these switches: --list-file, --exclude-list-file, --read-lang-def, --force-lang-def, --diff-list-file and a config file exists in that directory, it will take priority over $config_file. --count-and-diff First perform direct code counts of source file(s) of and separately, then perform a diff of these. Inputs may be pairs of files, directories, or archives. If --out or --report-file is given, three output files will be created, one for each of the two counts and one for the diff. See also --diff, --diff-alignment, --diff-timeout, --ignore-case, --ignore-whitespace. --diff Compute differences in code and comments between source file(s) of and . The inputs may be any mix of files, directories, archives, or git commit hashes. Use --diff-alignment to generate a list showing which file pairs where compared. When comparing git branches, only files which have changed in either commit are compared. See also --git, --count-and-diff, --diff-alignment, --diff-list-file, --diff-timeout, --ignore-case, --ignore-whitespace. --diff-timeout Ignore files which take more than seconds to process. Default is 10 seconds. Setting to 0 allows unlimited time. (Large files with many repeated lines can cause Algorithm::Diff::sdiff() to take hours.) See also --timeout. --docstring-as-code cloc considers docstrings to be comments, but this is not always correct as docstrings represent regular strings when they appear on the right hand side of an assignment or as function arguments. This switch forces docstrings to be counted as code. --follow-links [Unix only] Follow symbolic links to directories (sym links to files are always followed). See also --stat. --force-lang=[,] Process all files that have a extension with the counter for language . For example, to count all .f files with the Fortran 90 counter (which expects files to end with .f90) instead of the default Fortran 77 counter, use --force-lang=\"Fortran 90,f\" If is omitted, every file will be counted with the counter. This option can be specified multiple times (but that is only useful when is given each time). See also --script-lang, --lang-no-ext. --force-lang-def= Load language processing filters from , then use these filters instead of the built-in filters. Note: languages which map to the same file extension (for example: MATLAB/Mathematica/Objective-C/MUMPS/Mercury; Pascal/PHP; Lisp/OpenCL; Lisp/Julia; Perl/Prolog) will be ignored as these require additional processing that is not expressed in language definition files. Use --read-lang-def to define new language filters without replacing built-in filters (see also --write-lang-def, --write-lang-def-incl-dup, --config). --git Forces the inputs to be interpreted as git targets (commit hashes, branch names, et cetera) if these are not first identified as file or directory names. This option overrides the --vcs=git logic if this is given; in other words, --git gets its list of files to work on directly from git using the hash or branch name rather than from 'git ls-files'. This option can be used with --diff to perform line count diffs between git commits, or between a git commit and a file, directory, or archive. Use -v/--verbose to see the git system commands cloc issues. --git-diff-rel Same as --git --diff, or just --diff if the inputs are recognized as git targets. Only files which have changed in either commit are compared. --git-diff-all Git diff strategy #2: compare all files in the repository between the two commits. --ignore-whitespace Ignore horizontal white space when comparing files with --diff. See also --ignore-case. --ignore-case Ignore changes in case within file contents; consider upper- and lowercase letters equivalent when comparing files with --diff. See also --ignore-whitespace. --ignore-case-ext Ignore case of file name extensions. This will cause problems counting some languages (specifically, .c and .C are associated with C and C++; this switch would count .C files as C rather than C++ on *nix operating systems). File name case insensitivity is always true on Windows. --ignore-regex Ignore lines in source files that match the given Perl regular expression for the given language(s). This option can be specified multiple times. Language names are comma separated and are followed by the pipe character and the regular expression. Use * to match all languages. Examples: --ignore-regex=\"C,Java,C++|^\\s*[{};]\\s*\$\" --ignore-regex=\"*|DEBUG|TEST\\s+ONLY\" These filters are applied after comments are removed. Use --strip-comments=EXT to create new files that show these filters applied. The primary use case is to ignore lines containing only braces, brackets, or puctuation. --lang-no-ext= Count files without extensions using the counter. This option overrides internal logic for files without extensions (where such files are checked against known scripting languages by examining the first line for #!). See also --force-lang, --script-lang. --max-file-size= Skip files larger than megabytes when traversing directories. By default, =100. cloc's memory requirement is roughly twenty times larger than the largest file so running with files larger than 100 MB on a computer with less than 2 GB of memory will cause problems. Note: this check does not apply to files explicitly passed as command line arguments. --no-autogen[=list] Ignore files generated by code-production systems such as GNU autoconf. To see a list of these files (then exit), run with --no-autogen list See also --autoconf. --no-recurse Count files in the given directories without recursively descending below them. --original-dir [Only effective in combination with --strip-comments or --strip-code] Write the stripped files to the same directory as the original files. --only-count-files Only count files by language. Blank, comment, and code counts will be zero. --read-binary-files Process binary files in addition to text files. This is usually a bad idea and should only be attempted with text files that have embedded binary data. --read-lang-def= Load new language processing filters from and merge them with those already known to cloc. If defines a language cloc already knows about, cloc's definition will take precedence. Use --force-lang-def to over-ride cloc's definitions (see also --write-lang-def, --write-lang-def-incl-dup, --config). --script-lang=, Process all files that invoke as a #! scripting language with the counter for language . For example, files that begin with #!/usr/local/bin/perl5.8.8 will be counted with the Perl counter by using --script-lang=Perl,perl5.8.8 The language name is case insensitive but the name of the script language executable, , must have the right case. This option can be specified multiple times. See also --force-lang, --lang-no-ext. --sdir= Use as the scratch directory instead of letting File::Temp chose the location. Files written to this location are not removed at the end of the run (as they are with File::Temp). --skip-leading= Skip the first lines of each file. If a comma separated list of extensions is also given, only skip lines from those file types. Example: --skip-leading=10,cpp,h will skip the first ten lines of *.cpp and *.h files. This is useful for ignoring boilerplate text. --skip-uniqueness Skip the file uniqueness check. This will give a performance boost at the expense of counting files with identical contents multiple times (if such duplicates exist). --stat Some file systems (AFS, CD-ROM, FAT, HPFS, SMB) do not have directory 'nlink' counts that match the number of its subdirectories. Consequently cloc may undercount or completely skip the contents of such file systems. This switch forces File::Find to stat directories to obtain the correct count. File search speed will decrease. See also --follow-links. --stdin-name= Give a file name to use to determine the language for standard input. (Use - as the input name to receive source code via STDIN.) --strip-code= For each file processed, write to the current directory a version of the file which has blank and code lines, including code with (in-line comments) removed. The name of each stripped file is the original file name with . appended to it. It is written to the current directory unless --original-dir is on. --strip-comments= For each file processed, write to the current directory a version of the file which has blank and commented lines removed (in-line comments persist). The name of each stripped file is the original file name with . appended to it. It is written to the current directory unless --original-dir is on. --strip-str-comments Replace comment markers embedded in strings with 'xx'. This attempts to work around a limitation in Regexp::Common::Comment where comment markers embedded in strings are seen as actual comment markers and not strings, often resulting in a 'Complex regular subexpression recursion limit' warning and incorrect counts. There are two disadvantages to using this switch: 1/code count performance drops, and 2/code generated with --strip-comments will contain different strings where ever embedded comments are found. --sum-reports Input arguments are report files previously created with the --report-file option in plain format (eg. not JSON, YAML, XML, or SQL). Makes a cumulative set of results containing the sum of data from the individual report files. --timeout Ignore files which take more than seconds to process at any of the language's filter stages. The default maximum number of seconds spent on a filter stage is the number of lines in the file divided by one thousand. Setting to 0 allows unlimited time. See also --diff-timeout. --processes=NUM [Available only on systems with a recent version of the Parallel::ForkManager module. Not available on Windows.] Sets the maximum number of cores that cloc uses. The default value of 0 disables multiprocessing. --unix Override the operating system autodetection logic and run in UNIX mode. See also --windows, --show-os. --use-sloccount If SLOCCount is installed, use its compiled executables c_count, java_count, pascal_count, php_count, and xml_count instead of cloc's counters. SLOCCount's compiled counters are substantially faster than cloc's and may give a performance improvement when counting projects with large files. However, these cloc-specific features will not be available: --diff, --count-and-diff, --strip-code, --strip-comments, --unicode. --windows Override the operating system autodetection logic and run in Microsoft Windows mode. See also --unix, --show-os. ${BB}Filter Options${NN} --include-content= Only count files containing text that matches the given regular expression. --exclude-content= Exclude files containing text that matches the given regular expression. --exclude-dir=[,D2,] Exclude the given comma separated directories D1, D2, D3, et cetera, from being scanned. For example --exclude-dir=.cache,test will skip all files and subdirectories that have /.cache/ or /test/ as their parent directory. Directories named .bzr, .cvs, .hg, .git, .svn, and .snapshot are always excluded. This option only works with individual directory names so including file path separators is not allowed. Use --fullpath and --not-match-d= to supply a regex matching multiple subdirectories. --exclude-ext=[,[...]] Do not count files having the given file name extensions. --exclude-lang=[,L2[...]] Exclude the given comma separated languages L1, L2, L3, et cetera, from being counted. --exclude-list-file= Ignore files and/or directories whose names appear in . should have one file name per line. Only exact matches are ignored; relative path names will be resolved starting from the directory where cloc is invoked. See also --list-file, --config. --fullpath Modifies the behavior of --match-f, --not-match-f, and --not-match-d to include the file's path-- relative to the directory from which cloc is invoked--in the regex, not just the file's basename. (This does not expand each filename to include its fully qualified absolute path; instead, it uses as much of the path as is passed in to cloc.) --include-ext=[,ext2[...]] Count only languages having the given comma separated file extensions. Use --show-ext to see the recognized extensions. --include-lang=[,L2[...]] Count only the given comma separated, case- insensitive languages L1, L2, L3, et cetera. Use --show-lang to see the list of recognized languages. --match-d= Only count files in directories matching the Perl regex. For example --match-d='/(src|include)/' only counts files in directories containing /src/ or /include/. Unlike --not-match-d, --match-f, and --not-match-f, --match-d always anchors the regex to the directory from which cloc is invoked. --not-match-d= Count all files except those in directories matching the Perl regex. Only the trailing directory name is compared, for example, when counting in /usr/local/lib, only 'lib' is compared to the regex. Add --fullpath to compare parent directories, beginning from the directory where cloc is invoked, to the regex. Do not include file path separators at the beginning or end of the regex. This option may be repeated. --match-f= Only count files whose basenames match the Perl regex. For example --match-f='^[Ww]idget' only counts files that start with Widget or widget. Add --fullpath to include parent directories in the regex instead of just the basename. --not-match-f= Count all files except those whose basenames match the Perl regex. Add --fullpath to include parent directories in the regex instead of just the basename. This option may be repeated. --skip-archive= Ignore files that end with the given Perl regular expression. For example, if given --skip-archive='(zip|tar(\.(gz|Z|bz2|xz|7z))?)' the code will skip files that end with .zip, .tar, .tar.gz, .tar.Z, .tar.bz2, .tar.xz, and .tar.7z. --skip-win-hidden On Windows, ignore hidden files. ${BB}Debug Options${NN} --categorized= Save file sizes in bytes, identified languages and names of categorized files to . --counted= Save names of processed source files to . See also --found, --ignored, --unique. --diff-alignment= Write to a list of files and file pairs showing which files were added, removed, and/or compared during a run with --diff. This switch forces the --diff mode on. --explain= Print the filters used to remove comments for language and exit. In some cases the filters refer to Perl subroutines rather than regular expressions. An examination of the source code may be needed for further explanation. --help Print this usage information and exit. --found= Save names of every file found to . See also --counted, --ignored, --unique. --ignored= Save names of ignored files and the reason they were ignored to . See also --counted, --found, --unique. --print-filter-stages Print processed source code before and after each filter is applied. --show-ext[=] Print information about all known (or just the given) file extensions and exit. --show-lang[=] Print information about all known (or just the given) languages and exit. --show-os Print the value of the operating system mode and exit. See also --unix, --windows. --unique= Save names of unique files found to . See also --counted, --found, --ignored. -v[=] Verbose switch (optional numeric value). -verbose[=] Long form of -v. --version Print the version of this program and exit. --write-lang-def= Writes to the language processing filters then exits. Useful as a first step to creating custom language definitions. Note: languages which map to the same file extension will be excluded. (See also --force-lang-def, --read-lang-def). --write-lang-def-incl-dup= Same as --write-lang-def, but includes duplicated extensions. This generates a problematic language definition file because cloc will refuse to use it until duplicates are removed. ${BB}Output Options${NN} --3 Print third-generation language output. (This option can cause report summation to fail if some reports were produced with this option while others were produced without it.) --by-percent X Instead of comment and blank line counts, show these values as percentages based on the value of X in the denominator, where X is c meaning lines of code cm meaning lines of code + comments cb meaning lines of code + blanks cmb meaning lines of code + comments + blanks t meaning sum of values in that column For example, if using method 'c' and your code has twice as many lines of comments as lines of code, the value in the comment column will be 200%. Method 't' computes percentages based on totals for each column. Another way of looking at this is that 't' computes percentages vertically while the other methods compute them horizontally. --csv Write the results as comma separated values. --csv-delimiter= Use the character as the delimiter for comma separated files instead of ,. This switch forces --file-encoding= Write output files using the encoding instead of the default ASCII ( = 'UTF-7'). Examples: 'UTF-16', 'euc-kr', 'iso-8859-16'. Known encodings can be printed with perl -MEncode -e 'print join(\"\\n\", Encode->encodings(\":all\")), \"\\n\"' --fmt= Alternate text output format where is a number from 1 to 5. 'total lines' means the sum of code, comment, and blank lines. The formats are: 1: by language (same as cloc default output) 2: by language with an extra column for total lines 3: by file with language 4: by file with a total lines column 5: by file with language and a total lines column --hide-rate Do not show elapsed time, line processing rate, or file processing rates in the output header. This makes output deterministic. --json Write the results as JavaScript Object Notation (JSON) formatted output. --md Write the results as Markdown-formatted text. --out= Synonym for --report-file=. --percent Show counts as percentages of sums for each column. Same as '--by-percent t'. --progress-rate= Show progress update after every files are processed (default =100). Set to 0 to suppress progress output (useful when redirecting output to STDOUT). --quiet Suppress all information messages except for the final report. --report-file= Write the results to instead of STDOUT. --summary-cutoff=X:N Aggregate to 'Other' results having X lines below N where X is one of c meaning lines of code f meaning files m meaning lines of comments cm meaning lines of code + comments Appending a percent sign to N changes the calculation from straight count to percentage. Ignored with --diff or --by-file. --sql= Write results as SQL create and insert statements which can be read by a database program such as SQLite. If is -, output is sent to STDOUT. --sql-append Append SQL insert statements to the file specified by --sql and do not generate table creation statements. Only valid with the --sql option. --sql-project= Use as the project identifier for the current run. Only valid with the --sql option. --sql-style=

';
    print "<- html_header\n" if $opt_v > 2;
} # 1}}}
sub html_end {                               # {{{1
return
'
'; } # 1}}} sub die_unknown_lang { # {{{1 my ($lang, $option_name) = @_; die "Unknown language '$lang' used with $option_name option. " . "The command\n $script --show-lang\n" . "will print all recognized languages. Language names are " . "case sensitive.\n" ; } # 1}}} sub unicode_file { # {{{1 my $file = shift @_; print "-> unicode_file($file)\n" if $opt_v > 2; return 0 if (get_size($file) > 2_000_000); # don't bother trying to test binary files bigger than 2 MB my $IN = open_file('<', $file, 1); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; return 0; } my @lines = <$IN>; $IN->close; if (unicode_to_ascii( join('', @lines) )) { print "<- unicode_file()\n" if $opt_v > 2; return 1; } else { print "<- unicode_file()\n" if $opt_v > 2; return 0; } } # 1}}} sub unicode_to_ascii { # {{{1 my $string = shift @_; # A trivial attempt to convert UTF-16 little or big endian # files into ASCII. These files exhibit the following byte # sequence: # byte 1: 255 # byte 2: 254 # byte 3: ord of ASCII character # byte 4: 0 # byte 3+i: ord of ASCII character # byte 4+i: 0 # or # byte 1: 255 # byte 2: 254 # byte 3: 0 # byte 4: ord of ASCII character # byte 3+i: 0 # byte 4+i: ord of ASCII character # print "-> unicode_to_ascii()\n" if $opt_v > 2; my $length = length $string; #print "length=$length\n"; return '' if $length <= 3; my @unicode = split(//, $string); # check the first 100 characters (= 200 bytes) for big or # little endian UTF-16 encoding my $max_peek = $length < 200 ? $length : 200; my $max_for_pass = $length < 200 ? 0.9*$max_peek/2 : 90; my @view_1 = (); for (my $i = 2; $i < $max_peek; $i += 2) { push @view_1, $unicode[$i] } my @view_2 = (); for (my $i = 3; $i < $max_peek; $i += 2) { push @view_2, $unicode[$i] } my $points_1 = 0; foreach my $C (@view_1) { ++$points_1 if (32 <= ord($C) and ord($C) <= 127) or ord($C) == 13 or ord($C) == 10 or ord($C) == 9; } my $points_2 = 0; foreach my $C (@view_2) { ++$points_2 if (32 <= ord($C) and ord($C) <= 127) or ord($C) == 13 or ord($C) == 10 or ord($C) == 9; } #print "points 1: $points_1\n"; #print "points 2: $points_2\n"; #print "max_peek : $max_peek\n"; #print "max_for_pass: $max_for_pass\n"; my $offset = undef; if ($points_1 > $max_for_pass) { $offset = 2; } elsif ($points_2 > $max_for_pass) { $offset = 3; } else { print "<- unicode_to_ascii() a p1=$points_1 p2=$points_2\n" if $opt_v > 2; return ''; } # neither big or little endian UTF-16 my @ascii = (); for (my $i = $offset; $i < $length; $i += 2) { # some compound characters are made of HT (9), LF (10), or CR (13) # True HT, LF, CR are followed by 00; only add those. my $L = $unicode[$i]; if (ord($L) == 9 or ord($L) == 10 or ord($L) == 13) { my $companion; if ($points_1) { last if $i+1 >= $length; $companion = $unicode[$i+1]; } else { $companion = $unicode[$i-1]; } if (ord($companion) == 0) { push @ascii, $L; } else { push @ascii, " "; # no clue what this letter is } } else { push @ascii, $L; } } print "<- unicode_to_ascii() b p1=$points_1 p2=$points_2\n" if $opt_v > 2; return join("", @ascii); } # 1}}} sub uncompress_archive_cmd { # {{{1 my ($archive_file, ) = @_; # Wrap $archive_file in single or double quotes in the system # commands below to avoid filename chicanery (including # spaces in the names). print "-> uncompress_archive_cmd($archive_file)\n" if $opt_v > 2; my $extract_cmd = ""; my $missing = ""; if ($opt_extract_with) { ( $extract_cmd = $opt_extract_with ) =~ s/>FILE -"; } elsif ($archive_file =~ /\.tar$/ and $ON_WINDOWS) { $extract_cmd = "tar -xf \"$archive_file\""; } elsif (($archive_file =~ /\.tar\.(gz|Z)$/ or $archive_file =~ /\.tgz$/ ) and !$ON_WINDOWS) { if (external_utility_exists("gzip --version")) { if (external_utility_exists("tar --version")) { $extract_cmd = "gzip -dc '$archive_file' | tar xf -"; } else { $missing = "tar"; } } else { $missing = "gzip"; } } elsif ($archive_file =~ /\.tar\.bz2$/ and !$ON_WINDOWS) { if (external_utility_exists("bzip2 --help")) { if (external_utility_exists("tar --version")) { $extract_cmd = "bzip2 -dc '$archive_file' | tar xf -"; } else { $missing = "tar"; } } else { $missing = "bzip2"; } } elsif ($archive_file =~ /\.tar\.xz$/ and !$ON_WINDOWS) { if (external_utility_exists("unxz --version")) { if (external_utility_exists("tar --version")) { $extract_cmd = "unxz -dc '$archive_file' | tar xf -"; } else { $missing = "tar"; } } else { $missing = "bzip2"; } } elsif ($archive_file =~ /\.tar$/ and !$ON_WINDOWS) { $extract_cmd = "tar xf '$archive_file'"; } elsif ($archive_file =~ /\.src\.rpm$/i and !$ON_WINDOWS) { if (external_utility_exists("cpio --version")) { if (external_utility_exists("rpm2cpio")) { $extract_cmd = "rpm2cpio '$archive_file' | cpio -i"; } else { $missing = "rpm2cpio"; } } else { $missing = "cpio"; } } elsif ($archive_file =~ /\.(whl|zip)$/i and !$ON_WINDOWS) { if (external_utility_exists("unzip")) { $extract_cmd = "unzip -qq -d . '$archive_file'"; } else { $missing = "unzip"; } } elsif ($archive_file =~ /\.deb$/i and !$ON_WINDOWS) { # only useful if the .deb contains source code--most # .deb files just have compiled executables if (external_utility_exists("dpkg-deb")) { $extract_cmd = "dpkg-deb -x '$archive_file' ."; } else { $missing = "dpkg-deb"; } } elsif ($ON_WINDOWS and $archive_file =~ /\.(whl|zip)$/i) { # use unzip on Windows (comes with git-for-Windows) if (external_utility_exists("unzip")) { $extract_cmd = "unzip -qq -d . \"$archive_file\" "; } else { $missing = "unzip"; } } print "<- uncompress_archive_cmd\n" if $opt_v > 2; if ($missing) { die "Unable to expand $archive_file because external\n", "utility '$missing' is not available.\n", "Another possibility is to use the --extract-with option.\n"; } else { return $extract_cmd; } } # 1}}} sub read_list_file { # {{{1 my ($file, ) = @_; # reads filenames from a STDIN pipe if $file == "-" print "-> read_list_file($file)\n" if $opt_v > 2; my @entry = (); if ($file eq "-") { # read from a STDIN pipe my $IN; open($IN, $file); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; return (); } while (<$IN>) { next if /^\s*$/ or /^\s*#/; # skip empty or commented lines s/\cM$//; # DOS to Unix chomp; push @entry, $_; } $IN->close; } else { # read from an actual file foreach my $line (read_file($file)) { next if $line =~ /^\s*$/ or $line =~ /^\s*#/; $line =~ s/\cM$//; # DOS to Unix chomp $line; push @entry, $line; } } print "<- read_list_file\n" if $opt_v > 2; return @entry; } # 1}}} sub external_utility_exists { # {{{1 my $exe = shift @_; # remove args, if any my $leading_exe = $exe; $leading_exe =~ s/^(\S+)\s*.*?$/$1/; my $success = 0; if ($ON_WINDOWS) { $success = 1 unless system $exe . ' > nul'; } else { $success = 1 unless system $exe . ' >/dev/null 2>&1'; if (!$success) { $success = 1 unless system "which" . " $leading_exe" . ' >/dev/null 2>&1'; } } return $success; } # 1}}} sub write_xsl_file { # {{{1 print "-> write_xsl_file\n" if $opt_v > 2; my $XSL = # {{{2 ' CLOC Results

'; # 2}}} if ($opt_by_file) { $XSL .= #
{{{2 ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= '
File Blank Comment Code Language3rd Generation Equivalent Scale
Total

'; # 2}}} } if (!$opt_by_file or $opt_by_file_by_lang) { $XSL .= #
{{{2 ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= '
Language Files Blank Comment CodeScale 3rd Generation Equivalent
Total
'; # 2}}} } $XSL.= <<'EO_XSL'; # {{{2
EO_XSL # 2}}} my $XSL_DIFF = <<'EO_DIFF_XSL'; # {{{2 CLOC Results

EO_DIFF_XSL # 2}}} if ($opt_by_file) { $XSL_DIFF.= <<'EO_DIFF_XSL'; # {{{2
Same
File Blank Comment Code
Modified
File Blank Comment Code
Added
File Blank Comment Code
Removed
File Blank Comment Code
EO_DIFF_XSL # 2}}} } if (!$opt_by_file or $opt_by_file_by_lang) { $XSL_DIFF.= <<'EO_DIFF_XSL'; # {{{2
Same
Language Files Blank Comment Code
Modified
Language Files Blank Comment Code
Added
Language Files Blank Comment Code
Removed
Language Files Blank Comment Code
EO_DIFF_XSL # 2}}} } $XSL_DIFF.= <<'EO_DIFF_XSL'; # {{{2
EO_DIFF_XSL # 2}}} if ($opt_diff) { write_file($CLOC_XSL, {}, ( $XSL_DIFF ) ); } else { write_file($CLOC_XSL, {}, ( $XSL ) ); } print "<- write_xsl_file\n" if $opt_v > 2; } # 1}}} sub normalize_file_names { # {{{1 print "-> normalize_file_names\n" if $opt_v > 2; my (@files, ) = @_; # Returns a hash of file names reduced to a canonical form # (fully qualified file names, all path separators changed to /, # Windows file names lowercased). Hash values are the original # file name. my %normalized = (); foreach my $F (@files) { my $F_norm = $F; if ($ON_WINDOWS) { $F_norm = lc $F_norm; # for case insensitive file name comparisons $F_norm =~ s{\\}{/}g; # Windows directory separators to Unix $F_norm =~ s{^\./}{}g; # remove leading ./ if (($F_norm !~ m{^/}) and ($F_norm !~ m{^\w:/})) { # looks like a relative path; prefix with cwd $F_norm = lc "$cwd/$F_norm"; } } else { $F_norm =~ s{^\./}{}g; # remove leading ./ if ($F_norm !~ m{^/}) { # looks like a relative path; prefix with cwd $F_norm = "$cwd/$F_norm"; } } # Remove trailing / so it does not interfere with further regex code # that does not expect it $F_norm =~ s{/+$}{}; $normalized{ $F_norm } = $F; } print "<- normalize_file_names\n" if $opt_v > 2; return %normalized; } # 1}}} sub combine_diffs { # {{{1 # subroutine by Andy (awalshe@sf.net) # https://sourceforge.net/tracker/?func=detail&aid=3261017&group_id=174787&atid=870625 my ($ra_files) = @_; print "-> combine_diffs\n" if $opt_v > 2; my $res = "$URL v $VERSION\n"; my $dl = '-'; my $width = 79; # columns are in this order my @cols = ('files', 'blank', 'comment', 'code'); my %HoH = (); foreach my $file (@{$ra_files}) { my $IN = open_file('<', $file, 1); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; next; } my $sec; while (<$IN>) { chomp; s/\cM$//; next if /^(http|Language|-----)/; if (/^[A-Za-z0-9]+/) { # section title $sec = $_; chomp($sec); $HoH{$sec} = () if ! exists $HoH{$sec}; next; } if (/^\s(same|modified|added|removed)/) { # calculated totals row my @ar = grep { $_ ne '' } split(/ /, $_); chomp(@ar); my $ttl = shift @ar; my $i = 0; foreach(@ar) { my $t = "${ttl}${dl}${cols[$i]}"; $HoH{$sec}{$t} = 0 if ! exists $HoH{$sec}{$t}; $HoH{$sec}{$t} += $_; $i++; } } } $IN->close; } # rows are in this order my @rows = ('same', 'modified', 'added', 'removed'); $res .= sprintf("%s\n", "-" x $width); $res .= sprintf("%-19s %14s %14s %14s %14s\n", 'Language', $cols[0], $cols[1], $cols[2], $cols[3]); $res .= sprintf("%s\n", "-" x $width); # no inputs? %HoH will be empty return $res unless %HoH; for my $sec ( keys %HoH ) { next if $sec =~ /SUM:/; next unless defined $HoH{$sec}; # eg, the header line $res .= "$sec\n"; foreach (@rows) { $res .= sprintf(" %-18s %14s %14s %14s %14s\n", $_, $HoH{$sec}{"${_}${dl}${cols[0]}"}, $HoH{$sec}{"${_}${dl}${cols[1]}"}, $HoH{$sec}{"${_}${dl}${cols[2]}"}, $HoH{$sec}{"${_}${dl}${cols[3]}"}); } } $res .= sprintf("%s\n", "-" x $width); my $sec = 'SUM:'; $res .= "$sec\n"; foreach (@rows) { $res .= sprintf(" %-18s %14s %14s %14s %14s\n", $_, $HoH{$sec}{"${_}${dl}${cols[0]}"}, $HoH{$sec}{"${_}${dl}${cols[1]}"}, $HoH{$sec}{"${_}${dl}${cols[2]}"}, $HoH{$sec}{"${_}${dl}${cols[3]}"}); } $res .= sprintf("%s\n", "-" x $width); print "<- combine_diffs\n" if $opt_v > 2; return $res; } # 1}}} sub combine_csv_diffs { # {{{1 my ($delimiter, $ra_files) = @_; print "-> combine_csv_diffs\n" if $opt_v > 2; my %sum = (); # sum{ language } = array of 17 values foreach my $file (@{$ra_files}) { my $IN = open_file('<', $file, 1); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; next; } my $sec; while (<$IN>) { next if /^Language${delimiter}\s==\sfiles${delimiter}/; chomp; my @words = split(/$delimiter/); my $n_col = scalar(@words); if ($n_col != 18) { warn "combine_csv_diffs(): Parse failure line $. of $file\n"; warn "Expected 18 columns, got $n_col\n"; die; } my $Lang = $words[0]; my @count = map { int($_) } @words[1..16]; if (defined $sum{$Lang}) { for (my $i = 0; $i < 16; $i++) { $sum{$Lang}[$i] += $count[$i]; } } else { @{$sum{$Lang}} = @count; } } $IN->close; } my @header = ("Language", "== files", "!= files", "+ files", "- files", "== blank", "!= blank", "+ blank", "- blank", "== comment", "!= comment", "+ comment", "- comment", "== code", "!= code", "+ code", "- code", "$URL v $VERSION" ); my $res = join("$delimiter ", @header) . "$delimiter\n"; foreach my $Lang (sort keys %sum) { $res .= $Lang . "$delimiter "; for (my $i = 0; $i < 16; $i++) { $res .= $sum{$Lang}[$i] . "$delimiter "; } $res .= "\n"; } print "<- combine_csv_diffs\n" if $opt_v > 2; return $res; } # 1}}} sub get_time { # {{{1 if ($HAVE_Time_HiRes) { return Time::HiRes::time(); } else { return time(); } } # 1}}} sub really_is_D { # {{{1 # Ref bug 131, files ending with .d could be init.d scripts # instead of D language source files. my ($file , # in $rh_Err , # in hash of error codes $raa_errors , # out ) = @_; print "-> really_is_D($file)\n" if $opt_v > 2; my ($possible_script, $L) = peek_at_first_line($file, $rh_Err, $raa_errors); print "<- really_is_D($file)\n" if $opt_v > 2; return $possible_script; # null string if D, otherwise a language } # 1}}} sub no_autogen_files { # {{{1 # ref https://github.com/AlDanial/cloc/issues/151 my ($print,) = @_; print "-> no_autogen($print)\n" if $opt_v > 2; # These sometimes created manually? # acinclude.m4 # configure.ac # Makefile.am my @files = qw ( aclocal.m4 announce-gen autogen.sh bootstrap compile config.guess config.h.in config.rpath config.status config.sub configure configure.in depcomp gendocs.sh gitlog-to-changelog git-version-gen gnupload gnu-web-doc-update install-sh libtool libtool.m4 link-warning.h ltmain.sh lt~obsolete.m4 ltoptions.m4 ltsugar.m4 ltversion.in ltversion.m4 Makefile.in mdate-sh missing mkinstalldirs test-driver texinfo.tex update-copyright useless-if-before-free vc-list-files ylwrap ); if ($print) { printf "cloc will ignore these %d files with --no-autogen:\n", scalar @files; foreach my $F (@files) { print " $F\n"; } print "Additionally, Go files with '// Code generated by .* DO NOT EDIT.'\n"; print "on the first line are ignored.\n"; } print "<- no_autogen()\n" if $opt_v > 2; return @files; } # 1}}} sub load_from_config_file { # {{{1 # Supports all options except --config itself which would # be pointless. my ($config_file, $rs_by_file , $rs_by_file_by_lang , $rs_categorized , $rs_counted , $rs_include_ext , $rs_include_lang , $rs_include_content , $rs_exclude_content , $rs_exclude_lang , $rs_exclude_dir , $rs_exclude_list_file , $rs_explain , $rs_extract_with , $rs_found , $rs_count_diff , $rs_diff_list_files , $rs_diff , $rs_diff_alignment , $rs_diff_timeout , $rs_timeout , $rs_html , $rs_ignored , $rs_unique , $rs_quiet , $rs_force_lang_def , $rs_read_lang_def , $rs_show_ext , $rs_show_lang , $rs_progress_rate , $rs_print_filter_stages , $rs_report_file , $ra_script_lang , $rs_sdir , $rs_skip_uniqueness , $rs_strip_code , $rs_strip_comments , $rs_original_dir , $rs_sum_reports , $rs_hide_rate , $rs_processes , $rs_unicode , $rs_3 , $rs_v , $rs_vcs , $rs_version , $rs_write_lang_def , $rs_write_lang_def_incl_dup, $rs_xml , $rs_xsl , $ra_force_lang , $rs_lang_no_ext , $rs_yaml , $rs_csv , $rs_csv_delimiter , $rs_json , $rs_md , $rs_fullpath , $rs_match_f , $ra_not_match_f , $rs_match_d , $ra_not_match_d , $rs_list_file , $rs_help , $rs_skip_win_hidden , $rs_read_binary_files , $rs_sql , $rs_sql_project , $rs_sql_append , $rs_sql_style , $rs_inline , $rs_exclude_ext , $rs_ignore_whitespace , $rs_ignore_case , $rs_ignore_case_ext , $ra_ignore_regex , $rs_follow_links , $rs_autoconf , $rs_sum_one , $rs_by_percent , $rs_stdin_name , $rs_force_on_windows , $rs_force_on_unix , $rs_show_os , $rs_skip_archive , $rs_max_file_size , $rs_use_sloccount , $rs_no_autogen , $rs_force_git , $rs_strip_str_comments , $rs_file_encoding , $rs_docstring_as_code , $rs_stat , ) = @_; # look for runtime configuration file in # $ENV{'HOME'}/.config/cloc/options.txt -> POSIX # $ENV{'APPDATA'} . 'cloc' print "-> load_from_config_file($config_file)\n" if $opt_v and $opt_v > 2; if (!is_file($config_file)) { print "<- load_from_config_file() (no such file: $config_file)\n" if $opt_v and $opt_v > 2; return; } elsif (!can_read($config_file)) { print "<- load_from_config_file() (unable to read $config_file)\n" if $opt_v and $opt_v > 2; return; } print "Reading options from $config_file.\n" if defined $opt_v; my $has_force_lang = @{$ra_force_lang}; my $has_script_lang = @{$ra_script_lang}; my @lines = read_file($config_file); foreach (@lines) { next if /^\s*$/ or /^\s*#/; s/\s*--//; s/^\s+//; if (!defined ${$rs_by_file} and /^(by_file|by-file)/) { ${$rs_by_file} = 1; } elsif (!defined ${$rs_by_file_by_lang} and /^(by_file_by_lang|by-file-by-lang)/) { ${$rs_by_file_by_lang} = 1; } elsif (!defined ${$rs_categorized} and /^categorized(=|\s+)(.*?)$/) { ${$rs_categorized} = $2; } elsif (!defined ${$rs_counted} and /^counted(=|\s+)(.*?)$/) { ${$rs_counted} = $2; } elsif (!defined ${$rs_include_ext} and /^(?:include_ext|include-ext)(=|\s+)(.*?)$/) { ${$rs_include_ext} = $2; } elsif (!defined ${$rs_include_lang} and /^(?:include_lang|include-lang)(=|\s+)(.*?)$/) { ${$rs_include_lang} = $2; } elsif (!defined ${$rs_include_content} and /^(?:include_content|include-content)(=|\s+)(.*?)$/) { ${$rs_include_content} = $2; } elsif (!defined ${$rs_exclude_content} and /^(?:exclude_content|exclude-content)(=|\s+)(.*?)$/) { ${$rs_exclude_content} = $2; } elsif (!defined ${$rs_exclude_lang} and /^(?:exclude_lang|exclude-lang)(=|\s+)(.*?)$/) { ${$rs_exclude_lang} = $2; } elsif (!defined ${$rs_exclude_dir} and /^(?:exclude_dir|exclude-dir)(=|\s+)(.*?)$/) { ${$rs_exclude_dir} = $2; } elsif (!defined ${$rs_explain} and /^explain(=|\s+)(.*?)$/) { ${$rs_explain} = $2; } elsif (!defined ${$rs_extract_with} and /^(?:extract_with|extract-with)(=|\s+)(.*?)$/) { ${$rs_extract_with} = $2; } elsif (!defined ${$rs_found} and /^found(=|\s+)(.*?)$/) { ${$rs_found} = $2; } elsif (!defined ${$rs_count_diff} and /^(count_and_diff|count-and-diff)/) { ${$rs_count_diff} = 1; } elsif (!defined ${$rs_diff_list_files} and /^(diff_list_files|diff-list-files)/) { ${$rs_diff_list_files} = 1; } elsif (!defined ${$rs_diff} and /^diff/) { ${$rs_diff} = 1; } elsif (!defined ${$rs_diff_alignment} and /^(?:diff-alignment|diff_alignment)(=|\s+)(.*?)$/) { ${$rs_diff_alignment} = $2; } elsif (!defined ${$rs_diff_timeout} and /^(?:diff-timeout|diff_timeout)(=|\s+)i/) { ${$rs_diff_timeout} = $1; } elsif (!defined ${$rs_timeout} and /^timeout(=|\s+)i/) { ${$rs_timeout} = $1; } elsif (!defined ${$rs_html} and /^html/) { ${$rs_html} = 1; } elsif (!defined ${$rs_ignored} and /^ignored(=|\s+)(.*?)$/) { ${$rs_ignored} = $2; } elsif (!defined ${$rs_unique} and /^unique(=|\s+)(.*?)$/) { ${$rs_unique} = $2; } elsif (!defined ${$rs_quiet} and /^quiet/) { ${$rs_quiet} = 1; } elsif (!defined ${$rs_force_lang_def} and /^(?:force_lang_def|force-lang-def)(=|\s+)(.*?)$/) { ${$rs_force_lang_def} = $2; } elsif (!defined ${$rs_read_lang_def} and /^(?:read_lang_def|read-lang-def)(=|\s+)(.*?)$/) { ${$rs_read_lang_def} = $2; } elsif (!defined ${$rs_progress_rate} and /^(?:progress_rate|progress-rate)(=|\s+)(\d+)/) { ${$rs_progress_rate} = $2; } elsif (!defined ${$rs_print_filter_stages} and /^(print_filter_stages|print-filter-stages)/) { ${$rs_print_filter_stages}= 1; } elsif (!defined ${$rs_report_file} and /^(?:report_file|report-file)(=|\s+)(.*?)$/) { ${$rs_report_file} = $2; } elsif (!defined ${$rs_report_file} and /^out(=|\s+)(.*?)$/) { ${$rs_report_file} = $2; } elsif (!defined ${$rs_sdir} and /^sdir(=|\s+)(.*?)$/) { ${$rs_sdir} = $2; } elsif (!defined ${$rs_skip_uniqueness} and /^(skip_uniqueness|skip-uniqueness)/) { ${$rs_skip_uniqueness} = 1; } elsif (!defined ${$rs_strip_code} and /^(?:strip_code|strip-code)(=|\s+)(.*?)$/) { ${$rs_strip_code} = $2; } elsif (!defined ${$rs_strip_comments} and /^(?:strip_comments|strip-comments)(=|\s+)(.*?)$/) { ${$rs_strip_comments} = $2; } elsif (!defined ${$rs_original_dir} and /^(original_dir|original-dir)/) { ${$rs_original_dir} = 1; } elsif (!defined ${$rs_sum_reports} and /^(sum_reports|sum-reports)/) { ${$rs_sum_reports} = 1; } elsif (!defined ${$rs_hide_rate} and /^(hid_rate|hide-rate)/) { ${$rs_hide_rate} = 1; } elsif (!defined ${$rs_processes} and /^processes(=|\s+)(\d+)/) { ${$rs_processes} = $2; } elsif (!defined ${$rs_unicode} and /^unicode/) { ${$rs_unicode} = 1; } elsif (!defined ${$rs_3} and /^3/) { ${$rs_3} = 1; } elsif (!defined ${$rs_vcs} and /^vcs(=|\s+)(\S+)/) { ${$rs_vcs} = $2; } elsif (!defined ${$rs_version} and /^version/) { ${$rs_version} = 1; } elsif (!defined ${$rs_write_lang_def} and /^(?:write_lang_def|write-lang-def)(=|\s+)(.*?)$/) { ${$rs_write_lang_def} = $2; } elsif (!defined ${$rs_write_lang_def_incl_dup} and /^(?:write_lang_def_incl_dup|write-lang-def-incl-dup)(=|\s+)(.*?)$/) { ${$rs_write_lang_def_incl_dup} = $2; } elsif (!defined ${$rs_xml} and /^xml/) { ${$rs_xml} = 1; } elsif (!defined ${$rs_xsl} and /^xsl(=|\s+)(.*?)$/) { ${$rs_xsl} = $2; } elsif (!defined ${$rs_lang_no_ext} and /^(?:lang_no_ext|lang-no-ext)(=|\s+)(.*?)$/) { ${$rs_lang_no_ext} = $2; } elsif (!defined ${$rs_yaml} and /^yaml/) { ${$rs_yaml} = 1; } elsif (!defined ${$rs_csv} and /^csv/) { ${$rs_csv} = 1; } elsif (!defined ${$rs_csv_delimiter} and /^(?:csv_delimiter|csv-delimiter)(=|\s+)(.*?)$/) { ${$rs_csv_delimiter} = $2; } elsif (!defined ${$rs_json} and /^json/) { ${$rs_json} = 1; } elsif (!defined ${$rs_md} and /^md/) { ${$rs_md} = 1; } elsif (!defined ${$rs_fullpath} and /^fullpath/) { ${$rs_fullpath} = 1; } elsif (!defined ${$rs_match_f} and /^(?:match_f|match-f)(=|\s+)['"]?(.*?)['"]?$/) { ${$rs_match_f} = $2; } elsif (! @{$ra_not_match_f} and /^(?:not_match_f|not-match-f)(=|\s+)['"]?(.*?)['"]?$/) { push @{$ra_not_match_f} , $2; } elsif (!defined ${$rs_match_d} and /^(?:match_d|match-d)(=|\s+)['"]?(.*?)['"]?$/) { ${$rs_match_d} = $2; } elsif (! @{$ra_not_match_d} and /^(?:not_match_d|not-match-d)(=|\s+)['"]?(.*?)['"]?$/) { push @{$ra_not_match_d} , $2; } elsif (!defined ${$rs_list_file} and /^(?:list_file|list-file)(=|\s+)(.*?)$/) { ${$rs_list_file} = $2; } elsif (!defined ${$rs_help} and /^help/) { ${$rs_help} = 1; } elsif (!defined ${$rs_skip_win_hidden} and /^(skip_win_hidden|skip-win-hidden)/) { ${$rs_skip_win_hidden} = 1; } elsif (!defined ${$rs_read_binary_files} and /^(read_binary_files|read-binary-files)/) { ${$rs_read_binary_files} = 1; } elsif (!defined ${$rs_sql} and /^sql(=|\s+)(.*?)$/) { ${$rs_sql} = $2; } elsif (!defined ${$rs_sql_project} and /^(?:sql_project|sql-project)(=|\s+)(.*?)$/) { ${$rs_sql_project} = $2; } elsif (!defined ${$rs_sql_append} and /^(sql_append|sql-append)/) { ${$rs_sql_append} = 1; } elsif (!defined ${$rs_sql_style} and /^(?:sql_style|sql-style)(=|\s+)(.*?)$/) { ${$rs_sql_style} = $2; } elsif (!defined ${$rs_inline} and /^inline/) { ${$rs_inline} = 1; } elsif (!defined ${$rs_exclude_ext} and /^(?:exclude_ext|exclude-ext)(=|\s+)(.*?)$/) { ${$rs_exclude_ext} = $2; } elsif (!defined ${$rs_ignore_whitespace} and /^(ignore_whitespace|ignore-whitespace)/) { ${$rs_ignore_whitespace} = 1; } elsif (!defined ${$rs_ignore_case_ext} and /^(ignore_case_ext|ignore-case-ext)/) { ${$rs_ignore_case_ext} = 1; } elsif (!defined ${$rs_ignore_case} and /^(ignore_case|ignore-case)/) { ${$rs_ignore_case} = 1; } elsif (! @{$ra_ignore_regex} and /^(?:ignore_regex|ignore-regex)(=|\s+)['"]?(.*?)['"]?$/) { push @{$ra_ignore_regex}, $2; } elsif (!defined ${$rs_follow_links} and /^(follow_links|follow-links)/) { ${$rs_follow_links} = 1; } elsif (!defined ${$rs_autoconf} and /^autoconf/) { ${$rs_autoconf} = 1; } elsif (!defined ${$rs_sum_one} and /^(sum_one|sum-one)/) { ${$rs_sum_one} = 1; } elsif (!defined ${$rs_by_percent} and /^(?:by_percent|by-percent)(=|\s+)(.*?)$/) { ${$rs_by_percent} = $2; } elsif (!defined ${$rs_stdin_name} and /^(?:stdin_name|stdin-name)(=|\s+)(.*?)$/) { ${$rs_stdin_name} = $2; } elsif (!defined ${$rs_force_on_windows} and /^windows/) { ${$rs_force_on_windows} = 1; } elsif (!defined ${$rs_force_on_unix} and /^unix/) { ${$rs_force_on_unix} = 1; } elsif (!defined ${$rs_show_os} and /^(show_os|show-os)/) { ${$rs_show_os} = 1; } elsif (!defined ${$rs_skip_archive} and /^(?:skip_archive|skip-archive)(=|\s+)(.*?)$/) { ${$rs_skip_archive} = $2; } elsif (!defined ${$rs_max_file_size} and /^(?:max_file_size|max-file-size)(=|\s+)(\d+)/) { ${$rs_max_file_size} = $2; } elsif (!defined ${$rs_use_sloccount} and /^(use_sloccount|use-sloccount)/) { ${$rs_use_sloccount} = 1; } elsif (!defined ${$rs_no_autogen} and /^(no_autogen|no-autogen)/) { ${$rs_no_autogen} = 1; } elsif (!defined ${$rs_force_git} and /^git/) { ${$rs_force_git} = 1; } elsif (!defined ${$rs_exclude_list_file} and /^(?:exclude_list_file|exclude-list-file)(=|\s+)(.*?)$/) { ${$rs_exclude_list_file} = $2; } elsif (!defined ${$rs_v} and /^(verbose|v)((=|\s+)(\d+))?/) { if (!defined $4) { ${$rs_v} = 0; } else { ${$rs_v} = $4; } } elsif (!$has_script_lang and /^(?:script_lang|script-lang)(=|\s+)(.*?)$/) { push @{$ra_script_lang} , $2; } elsif (!$has_force_lang and /^(?:force_lang|force-lang)(=|\s+)(.*?)$/) { push @{$ra_force_lang} , $2; } elsif (!defined ${$rs_show_ext} and /^(show_ext|show-ext)((=|\s+)(.*))?$/) { if (!defined $4) { ${$rs_show_ext} = 0; } else { ${$rs_show_ext} = $4; } } elsif (!defined ${$rs_show_lang} and /^(show_lang|show-lang)((=|\s+)(.*))?s/){ if (!defined $4) { ${$rs_show_lang} = 0; } else { ${$rs_show_lang} = $4; } } elsif (!defined ${$rs_strip_str_comments} and /^(strip_str_comments|strip-str-comments)/) { ${$rs_strip_str_comments} = 1; } elsif (!defined ${$rs_file_encoding} and /^(?:file_encoding|file-encoding)(=|\s+)(\S+)/) { ${$rs_file_encoding} = $2; } elsif (!defined ${$rs_docstring_as_code} and /^(docstring_as_code|docstring-as-code)/) { ${$rs_docstring_as_code} = 1; } elsif (!defined ${$rs_stat} and /stat/) { ${$rs_stat} = 1; } } } # 1}}} sub trick_pp_packer_encode { # {{{1 use Encode; # PAR::Packer gives 'Unknown PerlIO layer "encoding"' unless it is # forced into using this module. my ($OUT, $JunkFile) = tempfile(UNLINK => 1); # delete on exit open($OUT, "> :encoding(utf8)", $JunkFile); close($OUT); } # 1}}} sub really_is_smarty { # {{{1 # Given filename, returns TRUE if its contents look like Smarty template my ($filename, ) = @_; print "-> really_is_smarty($filename)\n" if $opt_v > 2; my @lines = read_file($filename); my $points = 0; foreach my $L (@lines) { if (($L =~ /\{(if|include)\s/) or ($L =~ /\{\/if\}/) or ($L =~ /(\{\*|\*\})/) or ($L =~ /\{\$\w/)) { ++$points; } last if $points >= 2; } print "<- really_is_smarty(points=$points)\n" if $opt_v > 2; return $points >= 2; } # 1}}} sub check_alternate_config_files { # {{{1 my ($list_file, $exclude_list_file, $read_lang_def, $force_lang_def, $diff_list_file, ) = @_; my $found_it = ""; foreach my $file ($list_file, $exclude_list_file, $read_lang_def, $force_lang_def, $diff_list_file ) { next unless defined $file; my $dir = dirname $file; next unless can_read($dir) and is_dir($dir); my $bn = basename $config_file; if (can_read("$dir/$bn")) { $found_it = "$dir/$bn"; print "Using configuration file $found_it\n" if $opt_v; last; } } return $found_it; } # 1}}} sub write_null_results { # {{{ my ($json, $xml, $report_file,) = @_; print "-> write_null_results\n" if $opt_v > 2; if ((defined $json) or (defined $xml)) { my $line = ""; if (defined $json) { $line = "{}"; } else { $line = ''; } if (defined $report_file) { open OUT, ">$report_file" or die "Cannot write to $report_file $!\n"; print OUT "$line\n"; close OUT; } else { print "$line\n"; } } print "<- write_null_results\n" if $opt_v > 2; } # }}} sub glob2regex { # {{{ # convert simple xpath-style glob pattern to a regex my $globstr = shift; my $re = $globstr; $re =~ s{^["']}{}; $re =~ s{^\.\/}{}; $re =~ s{["']$}{}; $re =~ s{\.}{\\.}g; $re =~ s{\*\*}{\cx}g; # ctrl x = .*? $re =~ s{\*}{\cy}g; # ctrl y = [^/]* $re =~ s{\cx}{.*?}g; $re =~ s{\cy}{[^/]*}g; return '^' . $re . '$'; } # }}} sub load_json { # {{{1 # # Load a cloc-generated JSON string into %contents # $contents{filename}{blank|comment|code|language} = value # then print in a variety of formats. # my ($json_string, ) = @_; print "-> load_json()\n" if $opt_v > 2; my %contents = (); my $heading = undef; foreach (split /\n/, $json_string) { if (/^{?"(.*?)"/) { $heading = $1; } else { if (/^\s+"(.*?)"\s*:\s+(\d+(\.\d+)?)\b/) { # numeric value $contents{$heading}{$1} = $2; } elsif (/^\s+"(.*?)"\s*:\s+"(.*?)"/) { $contents{$heading}{$1} = $2; } } } my $url = $contents{'header'}{'cloc_url'}; my $ver = $contents{'header'}{'cloc_version'}; my $sec = $contents{'header'}{'elapsed_seconds'}; my $n_file = $contents{'header'}{'n_files'}; my $n_line = $contents{'header'}{'n_lines'}; $sec = $sec == 0 ? 1.0e-3 : $sec; my $header = sprintf "%s v %s T=%.2f s (%.1f files/s, %.1f lines/s)", $url, $ver, $sec, $n_file/$sec, $n_line/$sec; delete $contents{'header'}; delete $contents{'SUM'}; my @file_list = (sort { $contents{$b}{'code'} <=> $contents{$a}{'code'} } keys %contents ); #die Dumper(\%contents); # Determine column widths for output my $file_len = 0; my $lang_len = 0; foreach my $file (keys %contents) { my $flen = length $file; my $llen = length $contents{$file}{'language'}; $file_len = $file_len > $flen ? $file_len : $flen; $lang_len = $lang_len > $llen ? $lang_len : $llen; } print "<- load_json()\n" if $opt_v > 2; return $file_len, $lang_len, $header, %contents; } # 1}}} sub print_format_n { # {{{1 # by file with # format 1 : Language | files | blank | comment | code # format 2 : Language | files | blank | comment | code | total # format 3 : File | Language | blank | comment | code # format 4 : File | blank | comment | code | total # format 5 : File | Language | blank | comment | code | total my ($format, $file_len, $lang_len, $header, %contents) = @_; print "-> print_format_n($format)\n" if $opt_v > 2; my @prt_lines = (); # 8 = characters in "Language" $lang_len = max(8, $lang_len); my %str_fmt = ( 1 => sprintf("%%-%ds %%7s %%7s %%7s %%7s\n", $lang_len), 2 => sprintf("%%-%ds %%7s %%7s %%7s %%7s %%7s\n", $lang_len), 3 => sprintf("%%-%ds %%-%ds %%7s %%7s %%7s\n", $file_len, $lang_len), 4 => sprintf("%%-%ds %%7s %%7s %%7s %%7s\n", $file_len), 5 => sprintf("%%-%ds %%-%ds %%7s %%7s %%7s %%7s\n", $file_len, $lang_len), ); my %val_fmt = ( 1 => sprintf("%%-%ds %%7d %%7d %%7d %%7d\n", $lang_len), 2 => sprintf("%%-%ds %%7d %%7d %%7d %%7d %%7d\n", $lang_len), 3 => sprintf("%%-%ds %%-%ds %%7d %%7d %%7d\n", $file_len, $lang_len), 4 => sprintf("%%-%ds %%7d %%7d %%7d %%7d\n", $file_len), 5 => sprintf("%%-%ds %%-%ds %%7d %%7d %%7d %%7d\n", $file_len, $lang_len), ); my %language = (); foreach my $file (keys %contents) { my $lang = $contents{$file}{'language'}; $language{$lang}{'files'} += 1; foreach my $category ('blank', 'comment', 'code',) { $language{$lang}{$category} += $contents{$file}{$category}; $language{$lang}{'total'} += $contents{$file}{$category}; } } my @file_list = (sort { $contents{$b}{'code'} <=> $contents{$a}{'code'} } keys %contents ); my @lang_list = (sort { $language{$b}{'code'} <=> $language{$a}{'code'} } keys %language ); my %hyphens = ( 1 => "-" x ($lang_len + 4*9), 2 => "-" x ($lang_len + 5*9), 3 => "-" x ($lang_len + $file_len + 2 + 3*9), 4 => "-" x ($file_len + 4*9), 5 => "-" x ($lang_len + $file_len + 2 + 4*9), ); my %col_headings = ( 1 => ["Language", "files", "blank", "comment", "code"], 2 => ["Language", "files", "blank", "comment", "code", "Total"], 3 => ["File", "Language", "blank", "comment", "code"], 4 => ["File", "blank", "comment", "code", "Total"], 5 => ["File", "Language", "blank", "comment", "code", "Total"], ); push @prt_lines, "$header\n"; push @prt_lines, "$hyphens{$format}\n"; push @prt_lines, sprintf $str_fmt{$format}, @{$col_headings{$format}}; push @prt_lines, "$hyphens{$format}\n"; my ($n_files, $n_blank, $n_comment, $n_code, $n_total) = (0, 0, 0, 0, 0); my @out; if ($format < 3) { # by language foreach my $lang (@lang_list) { my ($nF, $nB, $nCm, $nCo) = ($language{$lang}{'files'}, $language{$lang}{'blank'}, $language{$lang}{'comment'}, $language{$lang}{'code'}); if ($format == 1) { @out = ($lang, $nF, $nB, $nCm, $nCo); } else { @out = ($lang, $nF, $nB, $nCm, $nCo, $nB + $nCm + $nCo); } push @prt_lines, sprintf $val_fmt{$format}, @out; $n_files += $nF; $n_blank += $nB; $n_comment += $nCm; $n_code += $nCo; $n_total += $nB + $nCm + $nCo; } } else { # by file foreach my $file (@file_list) { my ($nB, $nCm, $nCo) = ($contents{$file}{'blank'}, $contents{$file}{'comment'}, $contents{$file}{'code'}); my $lang = $contents{$file}{'language'}; if ($format == 1) { } elsif ($format == 3) { @out = ($file, $lang, $nB, $nCm, $nCo); } elsif ($format == 4) { @out = ($file, $nB, $nCm, $nCo, $nB + $nCm + $nCo); } else { @out = ($file, $lang, $nB, $nCm, $nCo, $nB + $nCm + $nCo); } push @prt_lines, sprintf $val_fmt{$format}, @out; $n_blank += $nB; $n_comment += $nCm; $n_code += $nCo; $n_total += $nB + $nCm + $nCo; } } push @prt_lines, "$hyphens{$format}\n"; if (scalar @file_list > 1) { if ($format == 1) { @out = ( "SUM", $n_files, $n_blank, $n_comment, $n_code ); } elsif ($format == 2) { @out = ( "SUM", $n_files, $n_blank, $n_comment, $n_code, $n_total ); } elsif ($format == 3) { @out = ( "SUM", " ", $n_blank, $n_comment, $n_code ); } elsif ($format == 4) { @out = ( "SUM", $n_blank, $n_comment, $n_code, $n_total ); } else { @out = ( "SUM", " ", $n_blank, $n_comment, $n_code, $n_total ); } push @prt_lines, sprintf $val_fmt{$format}, @out; push @prt_lines, "$hyphens{$format}\n"; } return @prt_lines; print "<- print_format_n()\n" if $opt_v > 2; } # 1}}} sub parse_ignore_regex { # {{{1 # # Convert the list of "language(s)|regex" into a hash # $ignore_regex{language} = [list of regex] my ($ra_lang_regex , # in, as given on command line $rhaa_Filters_by_Language, # in, hash of filters by language $rha_ignore_regex) = @_; print "-> parse_ignore_regex()\n" if $opt_v > 2; foreach my $lang_regex (@{$ra_lang_regex}) { die "Missing '|' character in --ignore-regex '$lang_regex'\n" unless $lang_regex =~ /\|/; my ($lang, $regex) = split(/\|/, $lang_regex, 2); die "Invalid --ignore-regex: $lang_regex\n" unless defined $lang and defined $regex; my @languages = split(/,/, $lang); foreach my $lang (@languages) { if ($lang eq '*') { foreach my $lang (keys %{$rhaa_Filters_by_Language}) { push @{$rha_ignore_regex->{$lang}}, $regex; } } else { die "Unknown language '$lang' in --ignore-regex '$lang_regex'\n" unless defined $rhaa_Filters_by_Language->{$lang}; push @{$rha_ignore_regex->{$lang}}, $regex; } #print "lang=$lang regex=[$regex]\n"; } } #use Data::Dumper; #print Dumper($rha_ignore_regex); print "<- parse_ignore_regex()\n" if $opt_v > 2; } # 1}}} # really_is_pascal, really_is_incpascal, really_is_php from SLOCCount my %php_files = (); # really_is_php() sub really_is_pascal { # {{{1 # Given filename, returns TRUE if its contents really are Pascal. # This isn't as obvious as it seems. # Many ".p" files are Perl files # (such as /usr/src/redhat/BUILD/ispell-3.1/dicts/czech/glob.p), # others are C extractions # (such as /usr/src/redhat/BUILD/linux/include/linux/umsdos_fs.p # and some files in linuxconf). # However, test files in "p2c" really are Pascal, for example. # Note that /usr/src/redhat/BUILD/ucd-snmp-4.1.1/ov/bitmaps/UCD.20.p # is actually C code. The heuristics determine that they're not Pascal, # but because it ends in ".p" it's not counted as C code either. # I believe this is actually correct behavior, because frankly it # looks like it's automatically generated (it's a bitmap expressed as code). # Rather than guess otherwise, we don't include it in a list of # source files. Let's face it, someone who creates C files ending in ".p" # and expects them to be counted by default as C files in SLOCCount needs # their head examined. I suggest examining their head # with a sucker rod (see syslogd(8) for more on sucker rods). # This heuristic counts as Pascal such files such as: # /usr/src/redhat/BUILD/teTeX-1.0/texk/web2c/tangleboot.p # Which is hand-generated. We don't count woven documents now anyway, # so this is justifiable. my $filename = shift; chomp($filename); # The heuristic is as follows: it's Pascal _IF_ it has all of the following # (ignoring {...} and (*...*) comments): # 1. "^..program NAME" or "^..unit NAME", # 2. "procedure", "function", "^..interface", or "^..implementation", # 3. a "begin", and # 4. it ends with "end.", # # Or it has all of the following: # 1. "^..module NAME" and # 2. it ends with "end.". # # Or it has all of the following: # 1. "^..program NAME", # 2. a "begin", and # 3. it ends with "end.". # # The "end." requirements in particular filter out non-Pascal. # # Note (jgb): this does not detect Pascal main files in fpc, like # fpc-1.0.4/api/test/testterminfo.pas, which does not have "program" in # it my $is_pascal = 0; # Value to determine. my $has_program = 0; my $has_unit = 0; my $has_module = 0; my $has_procedure_or_function = 0; my $found_begin = 0; my $found_terminating_end = 0; my $has_begin = 0; my $PASCAL_FILE = open_file('<', $filename, 0); die "Can't open $filename to determine if it's pascal.\n" if !defined $PASCAL_FILE; while(<$PASCAL_FILE>) { s/\{.*?\}//g; # Ignore {...} comments on this line; imperfect, but effective. s/\(\*.*?\*\)//g; # Ignore (*...*) comments on this line; imperfect, but effective. if (m/\bprogram\s+[A-Za-z]/i) {$has_program=1;} if (m/\bunit\s+[A-Za-z]/i) {$has_unit=1;} if (m/\bmodule\s+[A-Za-z]/i) {$has_module=1;} if (m/\bprocedure\b/i) { $has_procedure_or_function = 1; } if (m/\bfunction\b/i) { $has_procedure_or_function = 1; } if (m/^\s*interface\s+/i) { $has_procedure_or_function = 1; } if (m/^\s*implementation\s+/i) { $has_procedure_or_function = 1; } if (m/\bbegin\b/i) { $has_begin = 1; } # Originally I said: # "This heuristic fails if there are multi-line comments after # "end."; I haven't seen that in real Pascal programs:" # But jgb found there are a good quantity of them in Debian, specially in # fpc (at the end of a lot of files there is a multiline comment # with the changelog for the file). # Therefore, assume Pascal if "end." appears anywhere in the file. if (m/end\.\s*$/i) {$found_terminating_end = 1;} # elsif (m/\S/) {$found_terminating_end = 0;} } close($PASCAL_FILE); # Okay, we've examined the entire file looking for clues; # let's use those clues to determine if it's really Pascal: if ( ( ($has_unit || $has_program) && $has_procedure_or_function && $has_begin && $found_terminating_end ) || ( $has_module && $found_terminating_end ) || ( $has_program && $has_begin && $found_terminating_end ) ) {$is_pascal = 1;} return $is_pascal; } # 1}}} sub really_is_incpascal { # {{{1 # Given filename, returns TRUE if its contents really are Pascal. # For .inc files (mainly seen in fpc) my $filename = shift; chomp($filename); # The heuristic is as follows: it is Pascal if any of the following: # 1. really_is_pascal returns true # 2. Any usual reserved word is found (program, unit, const, begin...) # If the general routine for Pascal files works, we have it if (really_is_pascal($filename)) { return 1; } my $is_pascal = 0; # Value to determine. my $found_begin = 0; my $PASCAL_FILE = open_file('<', $filename, 0); die "Can't open $filename to determine if it's pascal.\n" if !defined $PASCAL_FILE; while(<$PASCAL_FILE>) { s/\{.*?\}//g; # Ignore {...} comments on this line; imperfect, but effective. s/\(\*.*?\*\)//g; # Ignore (*...*) comments on this line; imperfect, but effective. if (m/\bprogram\s+[A-Za-z]/i) {$is_pascal=1;} if (m/\bunit\s+[A-Za-z]/i) {$is_pascal=1;} if (m/\bmodule\s+[A-Za-z]/i) {$is_pascal=1;} if (m/\bprocedure\b/i) {$is_pascal = 1; } if (m/\bfunction\b/i) {$is_pascal = 1; } if (m/^\s*interface\s+/i) {$is_pascal = 1; } if (m/^\s*implementation\s+/i) {$is_pascal = 1; } if (m/\bconstant\s+/i) {$is_pascal=1;} if (m/\bbegin\b/i) { $found_begin = 1; } if ((m/end\.\s*$/i) && ($found_begin = 1)) {$is_pascal = 1;} if ($is_pascal) { last; } } close($PASCAL_FILE); return $is_pascal; } # 1}}} sub really_is_php { # {{{1 # Given filename, returns TRUE if its contents really is php. my $filename = shift; chomp($filename); my $is_php = 0; # Value to determine. # Need to find a matching pair of surrounds, with ending after beginning: my $normal_surround = 0; # my $script_surround = 0; # ; bit 0 =