Commit 3e3d2df8 authored by Shane Snyder's avatar Shane Snyder

whole bunch of changes to job-summary scipts

parent 020d91b6
......@@ -110,21 +110,21 @@ install:: all
install -m 755 darshan-convert $(bindir)
# install -m 755 darshan-diff $(bindir)
install -m 755 darshan-parser $(bindir)
# install -m 755 $(srcdir)/darshan-summary-per-file.sh $(bindir)
install -m 755 $(srcdir)/darshan-summary-per-file.sh $(bindir)
install -m 755 libdarshan-util.a $(libdir)
ifeq ($(DARSHAN_ENABLE_SHARED),1)
install -m 755 libdarshan-util.so $(libdir)
endif
install -m 644 $(srcdir)/darshan-logutils.h $(includedir)
install -m 644 $(DARSHAN_LOG_FORMAT) $(includedir)
# install -m 755 darshan-job-summary/bin/darshan-job-summary.pl $(bindir)
# install -d $(libdir)/TeX
# install -m 644 $(srcdir)/darshan-job-summary/lib/TeX/Encode.pm $(libdir)/TeX/
# install -d $(libdir)/Number
# install -d $(libdir)/Number/Bytes
# install -m 644 $(srcdir)/darshan-job-summary/lib/Number/Bytes/Human.pm $(libdir)/Number/Bytes
# install -d $(datarootdir)
# install -m 644 $(srcdir)/darshan-job-summary/share/* $(datarootdir)
install -m 755 darshan-job-summary/bin/darshan-job-summary.pl $(bindir)
install -d $(libdir)/TeX
install -m 644 $(srcdir)/darshan-job-summary/lib/TeX/Encode.pm $(libdir)/TeX/
install -d $(libdir)/Number
install -d $(libdir)/Number/Bytes
install -m 644 $(srcdir)/darshan-job-summary/lib/Number/Bytes/Human.pm $(libdir)/Number/Bytes
install -d $(datarootdir)
install -m 644 $(srcdir)/darshan-job-summary/share/* $(datarootdir)
install -m 644 maint/darshan-util.pc $(pkgconfigdir)
......
......@@ -18,6 +18,1222 @@ use English;
use Number::Bytes::Human qw(format_bytes);
use POSIX qw(strftime);
#
# system commands used
#
my $darshan_parser = "$PREFIX/bin/darshan-parser";
my $pdflatex = "pdflatex";
my $epstopdf = "epstopdf";
my $cp = "cp";
my $mv = "mv";
my $gnuplot = "gnuplot";
my $orig_dir = getcwd;
my $output_file = "summary.pdf";
my $verbose_flag = 0;
my $input_file = "";
my %posix_access_hash = ();
my %mpiio_access_hash = ();
my @access_size = ();
my %hash_files = ();
# data structures for calculating performance
my %hash_unique_file_time = ();
my $shared_file_time = 0;
my $total_job_bytes = 0;
process_args();
check_prereqs();
my $tmp_dir = tempdir( CLEANUP => !$verbose_flag );
if ($verbose_flag)
{
print "verbose: $tmp_dir\n";
}
open(PARSE_OUT, "$darshan_parser $input_file |") || die("Can't execute \"$darshan_parser $input_file\": $!\n");
open(FA_READ, ">$tmp_dir/file-access-read.dat") || die("error opening output file: $!\n");
open(FA_WRITE, ">$tmp_dir/file-access-write.dat") || die("error opening output file: $!\n");
open(FA_READ_SH, ">$tmp_dir/file-access-read-sh.dat") || die("error opening output file: $!\n");
open(FA_WRITE_SH, ">$tmp_dir/file-access-write-sh.dat") || die("error opening output file: $!\n");
my $last_read_start = 0;
my $last_write_start = 0;
my $cumul_read_indep = 0;
my $cumul_read_bytes_indep = 0;
my $cumul_write_indep = 0;
my $cumul_write_bytes_indep = 0;
my $cumul_read_shared = 0;
my $cumul_read_bytes_shared = 0;
my $cumul_write_shared = 0;
my $cumul_write_bytes_shared = 0;
my $cumul_meta_shared = 0;
my $cumul_meta_indep = 0;
my $first_data_line = 1;
my $current_rank = 0;
my $current_hash = 0;
my %file_record_hash = ();
my %fs_data = ();
while($line = <PARSE_OUT>)
{
chomp($line);
if ($line =~ /^\s*$/)
{
# ignore blank lines
}
elsif ($line =~ /^#/)
{
if ($line =~ /^# exe: /)
{
($junk, $cmdline) = split(':', $line, 2);
# add escape characters if needed for special characters in
# command line
$cmdline = encode('latex', $cmdline);
}
elsif ($line =~ /^# nprocs: /)
{
($junk, $nprocs) = split(':', $line, 2);
}
elsif ($line =~ /^# run time: /)
{
($junk, $runtime) = split(':', $line, 2);
}
elsif ($line =~ /^# start_time: /)
{
($junk, $starttime) = split(':', $line, 2);
}
elsif ($line =~ /^# uid: /)
{
($junk, $uid) = split(':', $line, 2);
}
elsif ($line =~ /^# jobid: /)
{
($junk, $jobid) = split(':', $line, 2);
}
elsif ($line =~ /^# darshan log version: /)
{
($junk, $version) = split(':', $line, 2);
$version =~ s/^\s+//;
}
}
else
{
# parse line
@fields = split(/[\t ]+/, $line);
# encode the file system name to protect against special characters
$fields[5] = encode('latex', $fields[5]);
# is this our first piece of data?
if($first_data_line)
{
$current_rank = $fields[1];
$current_hash = $fields[2];
$first_data_line = 0;
}
# is this a new file record?
if($fields[1] != $current_rank || $fields[2] != $current_hash)
{
# process previous record (if posix or mpiio record)
if ($fields[0] eq "POSIX" || $fields[0] eq "MPIIO")
{
process_file_record($current_rank, $current_hash, \%file_record_hash);
}
# reset variables for next record
$current_rank = $fields[1];
$current_hash = $fields[2];
%file_record_hash = ();
$file_record_hash{FILE_NAME} = $fields[5];
}
$file_record_hash{$fields[3]} = $fields[4];
$summary{$fields[3]} += $fields[4];
# accumulate independent and shared data as well as fs data
if ($fields[3] eq "POSIX_F_READ_TIME")
{
if ($fields[1] == -1)
{
$cumul_read_shared += $fields[4];
}
else
{
$cumul_read_indep += $fields[4];
}
}
elsif ($fields[3] eq "POSIX_F_WRITE_TIME")
{
if ($fields[1] == -1)
{
$cumul_write_shared += $fields[4];
}
else
{
$cumul_write_indep += $fields[4];
}
}
elsif ($fields[3] eq "POSIX_F_META_TIME")
{
if ($fields[1] == -1)
{
$cumul_meta_shared += $fields[4];
}
else
{
$cumul_meta_indep += $fields[4];
}
}
elsif ($fields[3] eq "POSIX_BYTES_READ")
{
if ($fields[1] == -1)
{
$cumul_read_bytes_shared += $fields[4];
}
else
{
$cumul_read_bytes_indep += $fields[4];
}
if (not defined $fs_data{$fields[6]})
{
$fs_data{$fields[6]} = [0,0];
}
$fs_data{$fields[6]}->[0] += $fields[4];
}
elsif ($fields[3] eq "POSIX_BYTES_WRITTEN")
{
if ($fields[1] == -1)
{
$cumul_write_bytes_shared += $fields[4];
}
else
{
$cumul_write_bytes_indep += $fields[4];
}
if (not defined $fs_data{$fields[6]})
{
$fs_data{$fields[6]} = [0,0];
}
$fs_data{$fields[6]}->[1] += $fields[4];
}
# record start and end of reads and writes
elsif ($fields[3] eq "POSIX_F_READ_START_TIMESTAMP")
{
# store until we find the end
$last_read_start = $fields[4];
}
elsif ($fields[3] eq "POSIX_F_READ_END_TIMESTAMP" && $fields[4] != 0)
{
# assume we got the read start already
my $xdelta = $fields[4] - $last_read_start;
if($fields[1] == -1)
{
print FA_READ_SH "$last_read_start\t0\t$xdelta\t0\n";
}
else
{
print FA_READ "$last_read_start\t$fields[1]\t$xdelta\t0\n";
}
}
elsif ($fields[3] eq "POSIX_F_WRITE_START_TIMESTAMP")
{
# store until we find the end
$last_write_start = $fields[4];
}
elsif ($fields[3] eq "POSIX_F_WRITE_END_TIMESTAMP" && $fields[4] != 0)
{
# assume we got the write start already
my $xdelta = $fields[4] - $last_write_start;
if($fields[1] == -1)
{
print FA_WRITE_SH "$last_write_start\t0\t$xdelta\t0\n";
}
else
{
print FA_WRITE "$last_write_start\t$fields[1]\t$xdelta\t0\n";
}
}
# record common access counter info
elsif ($fields[3] =~ /^POSIX_ACCESS(.)_ACCESS/)
{
$access_size[$1] = $fields[4];
}
elsif ($fields[3] =~ /^MPIIO_ACCESS(.)_ACCESS/)
{
$access_size[$1] = $fields[4];
}
elsif ($fields[3] =~ /^POSIX_ACCESS(.)_COUNT/)
{
my $tmp_access_size = $access_size[$1];
if(defined $posix_access_hash{$tmp_access_size})
{
$posix_access_hash{$tmp_access_size} += $fields[4];
}
else
{
$posix_access_hash{$tmp_access_size} = $fields[4];
}
}
elsif ($fields[3] =~ /^MPIIO_ACCESS(.)_COUNT/)
{
my $tmp_access_size = $access_size[$1];
if(defined $mpiio_access_hash{$tmp_access_size})
{
$mpiio_access_hash{$tmp_access_size} += $fields[4];
}
else
{
$mpiio_access_hash{$tmp_access_size} = $fields[4];
}
}
}
}
close(PARSE_OUT) || die "darshan-parser failure: $! $?";
# Fudge one point at the end to make xrange match in read and write plots.
# For some reason I can't get the xrange command to work. -Phil
print FA_READ "$runtime\t-1\t0\t0\n";
print FA_WRITE "$runtime\t-1\t0\t0\n";
print FA_READ_SH "$runtime\t0\t0\t0\n";
print FA_WRITE_SH "$runtime\t0\t0\t0\n";
close(FA_READ);
close(FA_READ_SH);
close(FA_WRITE);
close(FA_WRITE_SH);
#
# Exit out if there are no actual file accesses
#
if ($first_data_line)
{
$strtm = strftime("%a %b %e %H:%M:%S %Y", localtime($starttime));
print "This darshan log has no file records. No summary was produced.\n";
print " jobid: $jobid\n";
print " uid: $uid\n";
print "starttime: $strtm ($starttime )\n";
print " runtime: $runtime (seconds)\n";
print " nprocs: $nprocs\n";
print " version: $version\n";
exit(1);
}
# process last file record
$file_record_hash{FILE_NAME} = $fields[5];
if ($fields[0] eq "POSIX" || $fields[0] eq "MPIIO")
{
process_file_record($current_rank, $current_hash, \%file_record_hash);
}
# copy template files to tmp tmp_dir
system "$cp $PREFIX/share/*.gplt $tmp_dir/";
system "$cp $PREFIX/share/*.tex $tmp_dir/";
# summary of time spent in POSIX & MPI-IO functions
open(TIME_SUMMARY, ">$tmp_dir/time-summary.dat") || die("error opening output file:$!\n");
print TIME_SUMMARY "# <type>, <app time>, <read>, <write>, <meta>\n";
print TIME_SUMMARY "POSIX, ", ((($runtime * $nprocs - $summary{POSIX_F_READ_TIME} -
$summary{POSIX_F_WRITE_TIME} -
$summary{POSIX_F_META_TIME})/($runtime * $nprocs)) * 100);
print TIME_SUMMARY ", ", (($summary{POSIX_F_READ_TIME}/($runtime * $nprocs))*100);
print TIME_SUMMARY ", ", (($summary{POSIX_F_WRITE_TIME}/($runtime * $nprocs))*100);
print TIME_SUMMARY ", ", (($summary{POSIX_F_META_TIME}/($runtime * $nprocs))*100), "\n";
if (defined $summary{MPIIO_INDEP_OPENS})
{
print TIME_SUMMARY "MPI-IO, ", ((($runtime * $nprocs - $summary{MPIIO_F_READ_TIME} -
$summary{MPIIO_F_WRITE_TIME} -
$summary{MPIIO_F_META_TIME})/($runtime * $nprocs)) * 100);
print TIME_SUMMARY ", ", (($summary{MPIIO_F_READ_TIME}/($runtime * $nprocs))*100);
print TIME_SUMMARY ", ", (($summary{MPIIO_F_WRITE_TIME}/($runtime * $nprocs))*100);
print TIME_SUMMARY ", ", (($summary{MPIIO_F_META_TIME}/($runtime * $nprocs))*100), "\n";
}
close TIME_SUMMARY;
# counts of operations
open(PSX_OP_COUNTS, ">$tmp_dir/posix-op-counts.dat") || die("error opening output file: $!\n");
print PSX_OP_COUNTS "# <operation>, <POSIX count>\n";
print PSX_OP_COUNTS
"Read, ", $summary{POSIX_READS} + $summary{POSIX_FREADS}, "\n",
"Write, ", $summary{POSIX_WRITES} + $summary{POSIX_FWRITES}, "\n",
"Open, ", $summary{POSIX_OPENS} + $summary{POSIX_FOPENS}, "\n",
"Stat, ", $summary{POSIX_STATS}, "\n",
"Seek, ", $summary{POSIX_SEEKS}, "\n",
"Mmap, ", $summary{POSIX_MMAPS}, "\n",
"Fsync, ", $summary{POSIX_FSYNCS} + $summary{POSIX_FDSYNCS}, "\n";
close PSX_OP_COUNTS;
if (defined $summary{MPIIO_INDEP_OPENS})
{
# TODO: do we want to look at MPI split or non-blocking i/o here?
open(MPI_OP_COUNTS, ">$tmp_dir/mpiio-op-counts.dat") || die("error opening output file: $!\n");
print MPI_OP_COUNTS "# <operation>, <MPI Ind. count>, <MPI Coll. count>\n";
print MPI_OP_COUNTS
"Read, ", $summary{MPIIO_INDEP_READS}, ", ", $summary{MPIIO_COLL_READS}, "\n",
"Write, ", $summary{MPIIO_INDEP_WRITES}, ", ", $summary{MPIIO_COLL_WRITES}, "\n",
"Open, ", $summary{MPIIO_INDEP_OPENS},", ", $summary{MPIIO_COLL_OPENS}, "\n",
"Stat, ", "0, 0\n",
"Seek, ", "0, 0\n",
"Mmap, ", "0, 0\n",
"Fsync, ", "0, ", $summary{MPIIO_SYNCS}, "\n";
close MPI_OP_COUNTS;
}
# histograms of reads and writes (for POSIX and MPI-IO modules)
open (IO_HIST, ">$tmp_dir/posix-access-hist.dat") || die("error opening output file: $!\n");
print IO_HIST "# <size_range>, <POSIX_reads>, <POSIX_writes>\n";
print IO_HIST "0-100, ",
$summary{POSIX_SIZE_READ_0_100}, ", ",
$summary{POSIX_SIZE_WRITE_0_100}, "\n";
print IO_HIST "101-1K, ",
$summary{POSIX_SIZE_READ_100_1K}, ", ",
$summary{POSIX_SIZE_WRITE_100_1K}, "\n";
print IO_HIST "1K-10K, ",
$summary{POSIX_SIZE_READ_1K_10K}, ", ",
$summary{POSIX_SIZE_WRITE_1K_10K}, "\n";
print IO_HIST "10K-100K, ",
$summary{POSIX_SIZE_READ_10K_100K}, ", ",
$summary{POSIX_SIZE_WRITE_10K_100K}, "\n";
print IO_HIST "100K-1M, ",
$summary{POSIX_SIZE_READ_100K_1M}, ", ",
$summary{POSIX_SIZE_WRITE_100K_1M}, "\n";
print IO_HIST "1M-4M, ",
$summary{POSIX_SIZE_READ_1M_4M}, ", ",
$summary{POSIX_SIZE_WRITE_1M_4M}, "\n";
print IO_HIST "4M-10M, ",
$summary{POSIX_SIZE_READ_4M_10M}, ", ",
$summary{POSIX_SIZE_WRITE_4M_10M}, "\n";
print IO_HIST "10M-100M, ",
$summary{POSIX_SIZE_READ_10M_100M}, ", ",
$summary{POSIX_SIZE_WRITE_10M_100M}, "\n";
print IO_HIST "100M-1G, ",
$summary{POSIX_SIZE_READ_100M_1G}, ", ",
$summary{POSIX_SIZE_WRITE_100M_1G}, "\n";
print IO_HIST "1G+, ",
$summary{POSIX_SIZE_READ_1G_PLUS}, ", ",
$summary{POSIX_SIZE_WRITE_1G_PLUS}, "\n";
close IO_HIST;
if (defined $summary{MPIIO_INDEP_OPENS})
{
open (IO_HIST, ">$tmp_dir/mpiio-access-hist.dat") || die("error opening output file: $!\n");
print IO_HIST "# <size_range>, <MPIIO_reads>, <MPIIO_writes>\n";
print IO_HIST "0-100, ",
$summary{MPIIO_SIZE_READ_AGG_0_100}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_0_100}, "\n";
print IO_HIST "101-1K, ",
$summary{MPIIO_SIZE_READ_AGG_100_1K}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_100_1K}, "\n";
print IO_HIST "1K-10K, ",
$summary{MPIIO_SIZE_READ_AGG_1K_10K}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_1K_10K}, "\n";
print IO_HIST "10K-100K, ",
$summary{MPIIO_SIZE_READ_AGG_10K_100K}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_10K_100K}, "\n";
print IO_HIST "100K-1M, ",
$summary{MPIIO_SIZE_READ_AGG_100K_1M}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_100K_1M}, "\n";
print IO_HIST "1M-4M, ",
$summary{MPIIO_SIZE_READ_AGG_1M_4M}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_1M_4M}, "\n";
print IO_HIST "4M-10M, ",
$summary{MPIIO_SIZE_READ_AGG_4M_10M}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_4M_10M}, "\n";
print IO_HIST "10M-100M, ",
$summary{MPIIO_SIZE_READ_AGG_10M_100M}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_10M_100M}, "\n";
print IO_HIST "100M-1G, ",
$summary{MPIIO_SIZE_READ_AGG_100M_1G}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_100M_1G}, "\n";
print IO_HIST "1G+, ",
$summary{MPIIO_SIZE_READ_AGG_1G_PLUS}, ", ",
$summary{MPIIO_SIZE_WRITE_AGG_1G_PLUS}, "\n";
close IO_HIST;
}
# sequential and consecutive access patterns
open (PATTERN, ">$tmp_dir/pattern.dat") || die("error opening output file: $!\n");
print PATTERN "# op total sequential consecutive\n";
print PATTERN "Read, ", $summary{POSIX_READS} + $summary{POSIX_FREADS}, ", ",
$summary{POSIX_SEQ_READS}, ", ", $summary{POSIX_CONSEC_READS}, "\n";
print PATTERN "Write, ", $summary{POSIX_WRITES} + $summary{POSIX_FWRITES}, ", ",
$summary{POSIX_SEQ_WRITES}, ", ", $summary{POSIX_CONSEC_WRITES}, "\n";
close PATTERN;
# table of common access sizes
open(ACCESS_TABLE, ">$tmp_dir/access-table.tex") || die("error opening output file:$!\n");
print ACCESS_TABLE "
\\begin{tabular}{r|r|r}
\\multicolumn{3}{c}{ } \\\\
\\multicolumn{3}{c}{Most Common Access Sizes} \\\\
\\hline
\& access size \& count \\\\
\\hline
\\hline
";
# sort POSIX & MPI-IO access sizes (descending)
my $i = 0;
my $tmp_access_count = 0;
foreach $value (keys %posix_access_hash) {
if ($posix_access_hash{$value} > 0) {
$tmp_access_count++;
if ($tmp_access_count == 4) {
last;
}
}
}
if ($tmp_access_count > 0)
{
foreach $value (sort {$posix_access_hash{$b} <=> $posix_access_hash{$a} } keys %posix_access_hash)
{
if ($i == 4) {
last;
}
if ($posix_access_hash{$value} == 0) {
last;
}
if ($i == 0) {
print ACCESS_TABLE "
\\multirow{$tmp_access_count}{*}{POSIX} \& $value \& $posix_access_hash{$value} \\\\\n
";
}
else {
print ACCESS_TABLE "
\& $value \& $posix_access_hash{$value} \\\\\n
";
}
$i++;
}
}
$i = 0;
$tmp_access_count = 0;
foreach $value (keys %mpiio_access_hash) {
if ($mpiio_access_hash{$value} > 0) {
$tmp_access_count++;
if ($tmp_access_count == 4) {
last;
}
}
}
if ($tmp_access_count > 0)
{
foreach $value (sort {$mpiio_access_hash{$b} <=> $mpiio_access_hash{$a} } keys %mpiio_access_hash)
{
if ($i == 4) {
last;
}
if ($mpiio_access_hash{$value} == 0) {
last;
}
if ($i == 0) {
print ACCESS_TABLE "
\\hline
\\multirow{$tmp_access_count}{*}{MPI-IO \\textdagger} \& $value \& $mpiio_access_hash{$value} \\\\\n
";
}
else {
print ACCESS_TABLE "
\& $value \& $mpiio_access_hash{$value} \\\\\n
";
}
$i++;
}
}
print ACCESS_TABLE "
\\hline
\\end{tabular}
";
close ACCESS_TABLE;
# file count table
#open(TABLES, ">$tmp_dir/file-count-table.tex") || die("error opening output file:$!\n");
#print TABLES "
#\\begin{tabular}{r|r|r|r}
#\\multicolumn{4}{c}{ } \\\\
#\\multicolumn{4}{c}{File Count Summary} \\\\
#";
#if($size_est_flag == 1)
#{
#print TABLES "
#\\multicolumn{4}{c}{(estimated by I/O access offsets)} \\\\
#";
#}
#print TABLES "
#\\hline
#type \& number of files \& avg. size \& max size \\\\
#\\hline
#\\hline
#";
#my $counter;
#my $sum;
#my $max;
#my $key;
#my $avg;
#
#$counter = 0;
#$sum = 0;
#$max = 0;
#foreach $key (keys %hash_files) {
# $counter++;
# if($hash_files{$key}{'min_open_size'} >
# $hash_files{$key}{'max_size'})
# {
# $sum += $hash_files{$key}{'min_open_size'};
# if($hash_files{$key}{'min_open_size'} > $max)
# {
# $max = $hash_files{$key}{'min_open_size'};
# }
# }
# else
# {
# $sum += $hash_files{$key}{'max_size'};
# if($hash_files{$key}{'max_size'} > $max)
# {
# $max = $hash_files{$key}{'max_size'};
# }
# }
#}
#if($counter > 0) { $avg = $sum / $counter; }
#else { $avg = 0; }
#$avg = format_bytes($avg);
#$max = format_bytes($max);
#print TABLES "total opened \& $counter \& $avg \& $max \\\\\n";
#
#$counter = 0;
#$sum = 0;
#$max = 0;
#foreach $key (keys %hash_files) {
# if($hash_files{$key}{'was_read'} && !($hash_files{$key}{'was_written'}))
# {
# $counter++;
# if($hash_files{$key}{'min_open_size'} >
# $hash_files{$key}{'max_size'})
# {
# $sum += $hash_files{$key}{'min_open_size'};
# if($hash_files{$key}{'min_open_size'} > $max)
# {
# $max = $hash_files{$key}{'min_open_size'};
# }
# }
# else
# {
# $sum += $hash_files{$key}{'max_size'};
# if($hash_files{$key}{'max_size'} > $max)
# {
# $max = $hash_files{$key}{'max_size'};
# }
# }
# }
#}
#if($counter > 0) { $avg = $sum / $counter; }
#else { $avg = 0; }
#$avg = format_bytes($avg);
#$max = format_bytes($max);
#print TABLES "read-only files \& $counter \& $avg \& $max \\\\\n";
#
#$counter = 0;
#$sum = 0;
#$max = 0;
#foreach $key (keys %hash_files) {
# if(!($hash_files{$key}{'was_read'}) && $hash_files{$key}{'was_written'})
# {
# $counter++;
# if($hash_files{$key}{'min_open_size'} >
# $hash_files{$key}{'max_size'})
# {
# $sum += $hash_files{$key}{'min_open_size'};
# if($hash_files{$key}{'min_open_size'} > $max)
# {
# $max = $hash_files{$key}{'min_open_size'};
# }
# }
# else
# {
# $sum += $hash_files{$key}{'max_size'};
# if($hash_files{$key}{'max_size'} > $max)