darshan-job-summary.pl.in 43 KB
Newer Older
1
#!/usr/bin/perl -w
2
3
4
5
6
#
#  (C) 2009 by Argonne National Laboratory.
#      See COPYRIGHT in top-level directory.
#

7
# Set via configure
8
my $PREFIX="@prefix@";
9

10
use lib "@prefix@/lib";
11
12
use TeX::Encode;
use Encode;
13
14
use File::Temp qw/ tempdir /;
use Cwd;
15
16
use Getopt::Long;
use English;
17
use Number::Bytes::Human qw(format_bytes);
18
use POSIX qw(strftime);
19

20
21
22
23
24
25
26
27
#
# system commands used
#
my $darshan_parser = "$PREFIX/bin/darshan-parser";
my $pdflatex       = "pdflatex";
my $epstopdf       = "epstopdf";
my $cp             = "cp";
my $mv             = "mv";
28
29
30
31
32
33
34
35
36
37
my $gnuplot;
# Prefer gnuplot installed with darshan, if present.
if(-x "$PREFIX/bin/gnuplot")
{
    $gnuplot       = "$PREFIX/bin/gnuplot";
}
else
{
    $gnuplot       = "gnuplot";
}
38

39
my $orig_dir = getcwd;
40
my $output_file = "summary.pdf";
41
my $verbose_flag = 0;
42
my $input_file = "";
43
44
my %access_hash = ();
my @access_size = ();
45
my %hash_files = ();
46

47
# data structures for calculating performance
48
49
my %hash_unique_file_time = ();
my $shared_file_time = 0;
50
my $total_job_bytes = 0;
51

52
53
process_args();

54
55
check_prereqs();

56
57
58
59
60
61
my $tmp_dir = tempdir( CLEANUP => !$verbose_flag );
if ($verbose_flag)
{
    print "verbose: $tmp_dir\n";
}

62
63
64

open(TRACE, "$darshan_parser $input_file |") || die("Can't execute \"$darshan_parser $input_file\": $!\n");

65
66
open(FA_READ, ">$tmp_dir/file-access-read.dat") || die("error opening output file: $!\n");
open(FA_WRITE, ">$tmp_dir/file-access-write.dat") || die("error opening output file: $!\n");
67
68
open(FA_READ_SH, ">$tmp_dir/file-access-read-sh.dat") || die("error opening output file: $!\n");
open(FA_WRITE_SH, ">$tmp_dir/file-access-write-sh.dat") || die("error opening output file: $!\n");
69
70
71

my $last_read_start = 0;
my $last_write_start = 0;
72

73
74
75
76
77
78
79
80
81
82
83
84
my $cumul_read_indep = 0;
my $cumul_read_bytes_indep = 0;

my $cumul_write_indep = 0;
my $cumul_write_bytes_indep = 0;

my $cumul_read_shared = 0;
my $cumul_read_bytes_shared = 0;

my $cumul_write_shared = 0;
my $cumul_write_bytes_shared = 0;

85
86
87
my $cumul_meta_shared = 0;
my $cumul_meta_indep = 0;

88
89
90
91
92
my $first_data_line = 1;
my $current_rank = 0;
my $current_hash = 0;
my %file_record_hash = ();

93
94
my %fs_data = ();

95
while ($line = <TRACE>) {
96
    chomp($line);
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
    
    if ($line =~ /^\s*$/) {
        # ignore blank lines
    }
    elsif ($line =~ /^#/) {
	if ($line =~ /^# exe: /) {
	    ($junk, $cmdline) = split(':', $line, 2);
            # add escape characters if needed for special characters in
            # command line
            $cmdline = encode('latex', $cmdline);
	}
	if ($line =~ /^# nprocs: /) {
	    ($junk, $nprocs) = split(':', $line, 2);
	    $procreads[$nprocs] = 0;
	}
	if ($line =~ /^# run time: /) {
	    ($junk, $runtime) = split(':', $line, 2);
	}
	if ($line =~ /^# start_time: /) {
	    ($junk, $starttime) = split(':', $line, 2);
	}
	if ($line =~ /^# uid: /) {
	    ($junk, $uid) = split(':', $line, 2);
	}
121
122
123
        if ($line =~ /^# jobid: /) {
	    ($junk, $jobid) = split(':', $line, 2);
        }
124
125
126
127
128
        if ($line =~ /^# darshan log version: /) {
            ($junk, $version) = split(':', $line, 2);
            $version =~ s/^\s+//;
            ($major, $minor) = split(/\./, $version, 2);
        }
129
130
    }
    else {
131
        # parse line
132
	@fields = split(/[\t ]+/, $line);
133
134
135
136
137
138
139
140
141
142

        # is this our first piece of data?
        if($first_data_line)
        {
            $current_rank = $fields[0];
            $current_hash = $fields[1];
            $first_data_line = 0;
        }

        # is this a new file record?
143
        if($fields[0] != $current_rank || $fields[1] != $current_hash)
144
        {
145
146
            $file_record_hash{CP_NAME_SUFFIX} = $fields[4];

147
            # process previous record
148
            process_file_record($current_rank, $current_hash, \%file_record_hash);
149
150
151
152
153
154
155
156
157

            # reset variables for next record 
            $current_rank = $fields[0];
            $current_hash = $fields[1];
            %file_record_hash = ();
        }

        $file_record_hash{$fields[2]} = $fields[3];

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
	$summary{$fields[2]} += $fields[3];

	# record per-process POSIX read count
	if ($fields[2] eq "CP_POSIX_READS") {
	    if ($fields[0] == -1) {
		$procreads[$nprocs] += $fields[3];
	    }
	    else {
		$procreads[$fields[0]] += $fields[3];
	    }
	}

	# record per-proces POSIX write count
	if ($fields[2] eq "CP_POSIX_WRITES") {
	    if ($fields[0] == -1) {
		$procwrites[$nprocs] += $fields[3];
	    }
	    else {
		$procwrites[$fields[0]] += $fields[3];
	    }
	}

180
        # seperate accumulators for independent and shared reads and writes
Philip Carns's avatar
Philip Carns committed
181
        if ($fields[2] eq "CP_F_POSIX_READ_TIME" && $fields[0] == -1){
182
183
            $cumul_read_shared += $fields[3];
        }
Philip Carns's avatar
Philip Carns committed
184
        if ($fields[2] eq "CP_F_POSIX_READ_TIME" && $fields[0] != -1){
185
186
            $cumul_read_indep += $fields[3];
        }
Philip Carns's avatar
Philip Carns committed
187
        if ($fields[2] eq "CP_F_POSIX_WRITE_TIME" && $fields[0] == -1){
188
189
            $cumul_write_shared += $fields[3];
        }
Philip Carns's avatar
Philip Carns committed
190
        if ($fields[2] eq "CP_F_POSIX_WRITE_TIME" && $fields[0] != -1){
191
192
193
            $cumul_write_indep += $fields[3];
        }

194
195
196
197
198
199
200
        if ($fields[2] eq "CP_F_POSIX_META_TIME" && $fields[0] == -1){
            $cumul_meta_shared += $fields[3];
        }
        if ($fields[2] eq "CP_F_POSIX_META_TIME" && $fields[0] != -1){
            $cumul_meta_indep += $fields[3];
        }

201
202
        if ((($fields[2] eq "CP_BYTES_READ") or
             ($fields[2] eq "CP_BYTES_WRITTEN")) and
203
204
205
206
            not defined($fs_data{$fields[5]}))
        {
            $fs_data{$fields[5]} = [0,0];
        }
207

Philip Carns's avatar
Philip Carns committed
208
        if ($fields[2] eq "CP_BYTES_READ" && $fields[0] == -1){
209
            $cumul_read_bytes_shared += $fields[3];
210
            $fs_data{$fields[5]}->[0] += $fields[3];
211
        }
Philip Carns's avatar
Philip Carns committed
212
        if ($fields[2] eq "CP_BYTES_READ" && $fields[0] != -1){
213
            $cumul_read_bytes_indep += $fields[3];
214
            $fs_data{$fields[5]}->[0] += $fields[3];
215
        }
Philip Carns's avatar
Philip Carns committed
216
        if ($fields[2] eq "CP_BYTES_WRITTEN" && $fields[0] == -1){
217
            $cumul_write_bytes_shared += $fields[3];
218
            $fs_data{$fields[5]}->[1] += $fields[3];
219
        }
Philip Carns's avatar
Philip Carns committed
220
        if ($fields[2] eq "CP_BYTES_WRITTEN" && $fields[0] != -1){
221
            $cumul_write_bytes_indep += $fields[3];
222
            $fs_data{$fields[5]}->[1] += $fields[3];
223
224
        }

225
226
227
228
229
230
231
232
233
234
235
236
237
238
        # record start and end of reads and writes

        if ($fields[2] eq "CP_F_READ_START_TIMESTAMP") {
            # store until we find the end
            # adjust for systems that give absolute time stamps
            $last_read_start = $fields[3];
        }
        if ($fields[2] eq "CP_F_READ_END_TIMESTAMP" && $fields[3] != 0) {
            # assume we got the read start already 
            my $xdelta = $fields[3] - $last_read_start;
            # adjust for systems that have absolute time stamps 
            if($last_read_start > $starttime) {
                $last_read_start -= $starttime;
            }
239
240
241
242
243
244
            if($fields[0] == -1){
                print FA_READ_SH "$last_read_start\t0\t$xdelta\t0\n";
            }
            else{
                print FA_READ "$last_read_start\t$fields[0]\t$xdelta\t0\n";
            }
245
246
247
248
249
250
251
252
253
254
255
256
        }
        if ($fields[2] eq "CP_F_WRITE_START_TIMESTAMP") {
            # store until we find the end
            $last_write_start = $fields[3];
        }
        if ($fields[2] eq "CP_F_WRITE_END_TIMESTAMP" && $fields[3] != 0) {
            # assume we got the write start already 
            my $xdelta = $fields[3] - $last_write_start;
            # adjust for systems that have absolute time stamps 
            if($last_write_start > $starttime) {
                $last_write_start -= $starttime;
            }
257
258
259
260
261
262
            if($fields[0] == -1){
                print FA_WRITE_SH "$last_write_start\t0\t$xdelta\t0\n";
            }
            else{
                print FA_WRITE "$last_write_start\t$fields[0]\t$xdelta\t0\n";
            }
263
264
        }

265
266
        if ($fields[2] =~ /^CP_ACCESS(.)_ACCESS/) {
            $access_size[$1] = $fields[3];
267
        }
268
269
270
271
        if ($fields[2] =~ /^CP_ACCESS(.)_COUNT/) {
            my $tmp_access_size = $access_size[$1];
            if(defined $access_hash{$tmp_access_size}){
                $access_hash{$tmp_access_size} += $fields[3];
272
            }
273
274
            else{
                $access_hash{$tmp_access_size} = $fields[3];
275
276
277
278
279
            }
        }
    }
}

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
#
# Exit out if there are no actual file accesses
#
if ($first_data_line)
{
    $strtm = strftime("%a %b %e %H:%M:%S %Y", localtime($starttime));

    print "This darshan log has no file records. No summary was produced.\n";
    print "    jobid:$jobid\n";
    print "      uid:$uid\n";
    print "starttime: $strtm ($starttime )\n";
    print "  runtime:$runtime (seconds)\n";
    print "   nprocs:$nprocs\n";
    print "  version: $version\n";
    close(TRACE);
    exit(1);
}

298
# process last file record
299
$file_record_hash{CP_NAME_SUFFIX} = $fields[4];
300
process_file_record($current_rank, $current_hash, \%file_record_hash);
301
close(TRACE) || die "darshan-parser failure: $! $?";
302

303
304
305
306
# Fudge one point at the end to make xrange match in read and write plots.
# For some reason I can't get the xrange command to work.  -Phil
print FA_READ "$runtime\t-1\t0\t0\n";
print FA_WRITE "$runtime\t-1\t0\t0\n";
307
308
print FA_READ_SH "$runtime\t0\t0\t0\n";
print FA_WRITE_SH "$runtime\t0\t0\t0\n";
309
310
close(FA_READ);
close(FA_WRITE);
311
312
close(FA_READ_SH);
close(FA_WRITE_SH);
313

314
# counts of operations
315
open(COUNTS, ">$tmp_dir/counts.dat") || die("error opening output file: $!\n");
316
317
print COUNTS "# P=POSIX, MI=MPI-IO indep., MC=MPI-IO coll., R=read, W=write\n";
print COUNTS "# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat\n";
318
my $total_posix_opens = $summary{CP_POSIX_OPENS} + $summary{CP_POSIX_FOPENS};
319
my $total_syncs = $summary{CP_POSIX_FSYNCS} + $summary{CP_POSIX_FDSYNCS};
320
321
322
323
print COUNTS "Read, ", $summary{CP_POSIX_READS}, ", ",
    $summary{CP_INDEP_READS}, ", ", $summary{CP_COLL_READS}, "\n",
    "Write, ", $summary{CP_POSIX_WRITES}, ", ", 
    $summary{CP_INDEP_WRITES}, ", ", $summary{CP_COLL_WRITES}, "\n",
324
    "Open, ", $total_posix_opens, ", ", $summary{CP_INDEP_OPENS},", ",
325
326
327
    $summary{CP_COLL_OPENS}, "\n",
    "Stat, ", $summary{CP_POSIX_STATS}, ", 0, 0\n",
    "Seek, ", $summary{CP_POSIX_SEEKS}, ", 0, 0\n",
328
329
    "Mmap, ", $summary{CP_POSIX_MMAPS}, ", 0, 0\n",
    "Fsync, ", $total_syncs, ", 0, 0\n";
330
331
332
close COUNTS;

# histograms of reads and writes
333
open (HIST, ">$tmp_dir/hist.dat") || die("error opening output file: $!\n");
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
print HIST "# size_range read write\n";
print HIST "0-100, ", $summary{CP_SIZE_READ_0_100}, ", ",
                 $summary{CP_SIZE_WRITE_0_100}, "\n";
print HIST "101-1K, ", $summary{CP_SIZE_READ_100_1K}, ", ",
                 $summary{CP_SIZE_WRITE_100_1K}, "\n";
print HIST "1K-10K, ", $summary{CP_SIZE_READ_1K_10K}, ", ",
                 $summary{CP_SIZE_WRITE_1K_10K}, "\n";
print HIST "10K-100K, ", $summary{CP_SIZE_READ_10K_100K}, ", ",
                 $summary{CP_SIZE_WRITE_10K_100K}, "\n";
print HIST "100K-1M, ", $summary{CP_SIZE_READ_100K_1M}, ", ",
                 $summary{CP_SIZE_WRITE_100K_1M}, "\n";
print HIST "1M-4M, ", $summary{CP_SIZE_READ_1M_4M}, ", ",
                 $summary{CP_SIZE_WRITE_1M_4M}, "\n";
print HIST "4M-10M, ", $summary{CP_SIZE_READ_4M_10M}, ", ",
                 $summary{CP_SIZE_WRITE_4M_10M}, "\n";
print HIST "10M-100M, ", $summary{CP_SIZE_READ_10M_100M}, ", ",
                 $summary{CP_SIZE_WRITE_10M_100M}, "\n";
print HIST "100M-1G, ", $summary{CP_SIZE_READ_100M_1G}, ", ",
                 $summary{CP_SIZE_WRITE_100M_1G}, "\n";
print HIST "1G+, ", $summary{CP_SIZE_READ_1G_PLUS}, ", ",
                 $summary{CP_SIZE_WRITE_1G_PLUS}, "\n";
close HIST;

# sequential and consecutive accesses
358
open (PATTERN, ">$tmp_dir/pattern.dat") || die("error opening output file: $!\n");
359
360
361
362
363
364
365
366
print PATTERN "# op total sequential consecutive\n";
print PATTERN "Read, ", $summary{CP_POSIX_READS}, ", ",
    $summary{CP_SEQ_READS}, ", ", $summary{CP_CONSEC_READS}, "\n";
print PATTERN "Write, ", $summary{CP_POSIX_WRITES}, ", ",
    $summary{CP_SEQ_WRITES}, ", ", $summary{CP_CONSEC_WRITES}, "\n";
close PATTERN;

# aligned I/O
367
open (ALIGN, ">$tmp_dir/align.dat") || die("error opening output file: $!\n");
368
369
370
371
372
373
print ALIGN "# total unaligned_mem unaligned_file align_mem align_file\n";
print ALIGN $summary{CP_POSIX_READS} + $summary{CP_POSIX_WRITES}, ", ",
    $summary{CP_MEM_NOT_ALIGNED}, ", ", $summary{CP_FILE_NOT_ALIGNED}, "\n";
close ALIGN;

# MPI types
374
open (TYPES, ">$tmp_dir/types.dat") || die("error opening output file: $!\n");
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
print TYPES "# type use_count\n";
print TYPES "Named, ", $summary{CP_COMBINER_NAMED}, "\n";
print TYPES "Dup, ", $summary{CP_COMBINER_DUP}, "\n";
print TYPES "Contig, ", $summary{CP_COMBINER_CONTIGUOUS}, "\n";
print TYPES "Vector, ", $summary{CP_COMBINER_VECTOR}, "\n";
print TYPES "HvecInt, ", $summary{CP_COMBINER_HVECTOR_INTEGER}, "\n";
print TYPES "Hvector, ", $summary{CP_COMBINER_HVECTOR}, "\n";
print TYPES "Indexed, ", $summary{CP_COMBINER_INDEXED}, "\n";
print TYPES "HindInt, ", $summary{CP_COMBINER_HINDEXED_INTEGER}, "\n";
print TYPES "Hindexed, ", $summary{CP_COMBINER_HINDEXED}, "\n";
print TYPES "IndBlk, ", $summary{CP_COMBINER_INDEXED_BLOCK}, "\n";
print TYPES "StructInt, ", $summary{CP_COMBINER_STRUCT_INTEGER}, "\n";
print TYPES "Struct, ", $summary{CP_COMBINER_STRUCT}, "\n";
print TYPES "Subarray, ", $summary{CP_COMBINER_SUBARRAY}, "\n";
print TYPES "Darray, ", $summary{CP_COMBINER_DARRAY}, "\n";
print TYPES "F90Real, ", $summary{CP_COMBINER_F90_REAL}, "\n";
print TYPES "F90Complex, ", $summary{CP_COMBINER_F90_COMPLEX}, "\n";
print TYPES "F90Int, ", $summary{CP_COMBINER_F90_INTEGER}, "\n";
print TYPES "Resized, ", $summary{CP_COMBINER_RESIZED}, "\n";
close TYPES;

# generate histogram of process I/O counts
#
# NOTE: NEED TO FILL IN ACTUAL WRITE DATA!!!
#
$minprocread = (defined $procreads[0]) ? $procreads[0] : 0;
$maxprocread = (defined $procreads[0]) ? $procreads[0] : 0;
for ($i=1; $i < $nprocs; $i++) {
    $rdi = (defined $procreads[$i]) ? $procreads[$i] : 0;
    $minprocread = ($rdi > $minprocread) ? $minprocread : $rdi;
    $maxprocread = ($rdi < $maxprocread) ? $maxprocread : $rdi;
}
$minprocread += $procreads[$nprocs];
$maxprocread += $procreads[$nprocs];
# print "$minprocread $maxprocread\n";

@bucket = ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 );

for ($i=0; $i < $nprocs; $i++) {
    $mysize = ((defined $procreads[$i]) ? $procreads[$i] : 0) +
	$procreads[$nprocs];
    $mysize -= $minprocread;
    $mybucket = ($mysize > 0) ?
	(($mysize * 10) / ($maxprocread - $minprocread)) : 0;
    $bucket[$mybucket]++;
}

422
open(IODIST, ">$tmp_dir/iodist.dat") || die("error opening output file: $!\n");
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
print IODIST "# bucket n_procs_rd n_procs_wr\n";
print IODIST "# NOTE: WRITES ARE A COPY OF READS FOR NOW!!!\n";

$bucketsize = $maxprocread - $minprocread / 10;
# TODO: do writes also, is dropping a 0 in for now
for ($i=0; $i < 10; $i++) {
    print IODIST $bucketsize * $i + $minprocread, "-",
    $bucketsize * ($i+1) + $minprocread, ", ", $bucket[$i], ", 0\n";
}
close IODIST;

# generate title for summary
($executable, $junk) = split(' ', $cmdline, 2);
@parts = split('/', $executable);
$cmd = $parts[$#parts];

@timearray = localtime($starttime);
$year = $timearray[5] + 1900;
$mon = $timearray[4] + 1;
$mday = $timearray[3];

444
open(TITLE, ">$tmp_dir/title.tex") || die("error opening output file:$!\n");
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
print TITLE "
\\rhead{\\thepage\\ of \\pageref{LastPage}}
\\chead[
\\large $cmd ($mon/$mday/$year)
]
{
\\large $cmd ($mon/$mday/$year)
}
\\cfoot[
\\scriptsize{$cmdline}
]
{
\\scriptsize{$cmdline}
}
";
close TITLE;

462
open(TABLES, ">$tmp_dir/job-table.tex") || die("error opening output file:$!\n");
463
print TABLES "
464
\\begin{tabular}{|p{.47\\columnwidth}|p{.35\\columnwidth}|p{.47\\columnwidth}|p{.6\\columnwidth}|}
465
\\hline
466
jobid: $jobid \& uid: $uid \& nprocs: $nprocs \& runtime: $runtime seconds\\\\
467
468
469
470
471
\\hline
\\end{tabular}
";
close TABLES;

472
open(TABLES, ">$tmp_dir/access-table.tex") || die("error opening output file:$!\n");
473
print TABLES "
Philip Carns's avatar
Philip Carns committed
474
\\begin{tabular}{r|r}
475
\\multicolumn{2}{c}{ } \\\\
476
\\multicolumn{2}{c}{Most Common Access Sizes} \\\\
477
478
479
\\hline
access size \& count \\\\
\\hline
Philip Carns's avatar
Philip Carns committed
480
\\hline
481
482
";

483
484
# sort access sizes (descending)
my $i = 0;
485
486
foreach $value (sort {$access_hash{$b} <=> $access_hash{$a} } keys %access_hash) {
    if($i == 4) {
487
488
489
490
491
492
493
494
495
        last;
    }
    if($access_hash{$value} == 0) {
        last;
    }
    print TABLES "$value \& $access_hash{$value} \\\\\n";
    $i++;
}

496
497
498
499
500
501
print TABLES "
\\hline
\\end{tabular}
";
close TABLES;

Philip Carns's avatar
Philip Carns committed
502
503
504
open(TABLES, ">$tmp_dir/file-count-table.tex") || die("error opening output file:$!\n");
print TABLES "
\\begin{tabular}{r|r|r|r}
505
\\multicolumn{4}{c}{ } \\\\
Philip Carns's avatar
Philip Carns committed
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
\\multicolumn{4}{c}{File Count Summary} \\\\
\\hline
type \& number of files \& avg. size \& max size \\\\
\\hline
\\hline
";
my $counter;
my $sum;
my $max;
my $key;
my $avg;

$counter = 0;
$sum = 0;
$max = 0;
foreach $key (keys %hash_files) {
    $counter++;
    if($hash_files{$key}{'min_open_size'} >
        $hash_files{$key}{'max_size'})
    {
        $sum += $hash_files{$key}{'min_open_size'};
        if($hash_files{$key}{'min_open_size'} > $max)
        {
            $max = $hash_files{$key}{'min_open_size'};
        }
    }
    else
    {
        $sum += $hash_files{$key}{'max_size'};
        if($hash_files{$key}{'max_size'} > $max)
        {
            $max = $hash_files{$key}{'max_size'};
        }
    }
}
541
542
if($counter > 0) { $avg = $sum / $counter; }
else { $avg = 0; }
543
544
$avg = format_bytes($avg);
$max = format_bytes($max);
Philip Carns's avatar
Philip Carns committed
545
546
print TABLES "total opened \& $counter \& $avg \& $max \\\\\n";

547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
$counter = 0;
$sum = 0;
$max = 0;
foreach $key (keys %hash_files) {
    if($hash_files{$key}{'was_read'} && !($hash_files{$key}{'was_written'}))
    {
        $counter++;
        if($hash_files{$key}{'min_open_size'} >
            $hash_files{$key}{'max_size'})
        {
            $sum += $hash_files{$key}{'min_open_size'};
            if($hash_files{$key}{'min_open_size'} > $max)
            {
                $max = $hash_files{$key}{'min_open_size'};
            }
        }
        else
        {
            $sum += $hash_files{$key}{'max_size'};
            if($hash_files{$key}{'max_size'} > $max)
            {
                $max = $hash_files{$key}{'max_size'};
            }
        }
    }
}
if($counter > 0) { $avg = $sum / $counter; }
else { $avg = 0; }
$avg = format_bytes($avg);
$max = format_bytes($max);
print TABLES "read-only files \& $counter \& $avg \& $max \\\\\n";

$counter = 0;
$sum = 0;
$max = 0;
foreach $key (keys %hash_files) {
    if(!($hash_files{$key}{'was_read'}) && $hash_files{$key}{'was_written'})
    {
        $counter++;
        if($hash_files{$key}{'min_open_size'} >
            $hash_files{$key}{'max_size'})
        {
            $sum += $hash_files{$key}{'min_open_size'};
            if($hash_files{$key}{'min_open_size'} > $max)
            {
                $max = $hash_files{$key}{'min_open_size'};
            }
        }
        else
        {
            $sum += $hash_files{$key}{'max_size'};
            if($hash_files{$key}{'max_size'} > $max)
            {
                $max = $hash_files{$key}{'max_size'};
            }
        }
    }
}
if($counter > 0) { $avg = $sum / $counter; }
else { $avg = 0; }
$avg = format_bytes($avg);
$max = format_bytes($max);
print TABLES "write-only files \& $counter \& $avg \& $max \\\\\n";

$counter = 0;
$sum = 0;
$max = 0;
foreach $key (keys %hash_files) {
    if($hash_files{$key}{'was_read'} && $hash_files{$key}{'was_written'})
    {
        $counter++;
        if($hash_files{$key}{'min_open_size'} >
            $hash_files{$key}{'max_size'})
        {
            $sum += $hash_files{$key}{'min_open_size'};
            if($hash_files{$key}{'min_open_size'} > $max)
            {
                $max = $hash_files{$key}{'min_open_size'};
            }
        }
        else
        {
            $sum += $hash_files{$key}{'max_size'};
            if($hash_files{$key}{'max_size'} > $max)
            {
                $max = $hash_files{$key}{'max_size'};
            }
        }
    }
}
if($counter > 0) { $avg = $sum / $counter; }
else { $avg = 0; }
$avg = format_bytes($avg);
$max = format_bytes($max);
print TABLES "read/write files \& $counter \& $avg \& $max \\\\\n";

$counter = 0;
$sum = 0;
$max = 0;
foreach $key (keys %hash_files) {
    if($hash_files{$key}{'was_written'} &&
        $hash_files{$key}{'min_open_size'} == 0 &&
        $hash_files{$key}{'max_size'} > 0)
    {
        $counter++;
        if($hash_files{$key}{'min_open_size'} >
            $hash_files{$key}{'max_size'})
        {
            $sum += $hash_files{$key}{'min_open_size'};
            if($hash_files{$key}{'min_open_size'} > $max)
            {
                $max = $hash_files{$key}{'min_open_size'};
            }
        }
        else
        {
            $sum += $hash_files{$key}{'max_size'};
            if($hash_files{$key}{'max_size'} > $max)
            {
                $max = $hash_files{$key}{'max_size'};
            }
        }
    }
}
if($counter > 0) { $avg = $sum / $counter; }
else { $avg = 0; }
$avg = format_bytes($avg);
$max = format_bytes($max);
print TABLES "created files \& $counter \& $avg \& $max \\\\\n";

Philip Carns's avatar
Philip Carns committed
677
678
679
680
681
682
683
print TABLES "
\\hline
\\end{tabular}
";
close(TABLES);


684
685
686
687
#
# Generate Per Filesystem Data
#
open(TABLES, ">$tmp_dir/fs-data-table.tex") || die("error opening output files:$!\n");
688
if (($major > 1) or ($minor > 23))
689
{
690
691
692
693
694
695
696
697
698
699
700
701
    print TABLES "
    \\begin{tabular}{c|r|r|r|r}
    \\multicolumn{5}{c}{ } \\\\
    \\multicolumn{5}{c}{Data Transfer Per Filesystem} \\\\
    \\hline
    \\multirow{2}{*}{File System} \& \\multicolumn{2}{c}{Write} \\vline \& \\multicolumn{2}{c}{Read} \\\\
    \\cline{2-5}
    \& MiB \& Ratio \& MiB \& Ratio \\\\\
    \\hline
    \\hline
    ";
    foreach $key (keys %fs_data)
702
    {
703
704
705
        my $wr_total_mb = ($fs_data{$key}->[1] / (1024*1024));
        my $rd_total_mb = ($fs_data{$key}->[0] / (1024*1024));
        my $wr_total_rt;
706

707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
        if ($cumul_write_bytes_shared+$cumul_write_bytes_shared)
        {
            $wr_total_rt = ($fs_data{$key}->[1] / ($cumul_write_bytes_shared + $cumul_write_bytes_indep));
        }
        else
        {
            $wr_total_rt = 0;
        }

        my $rd_total_rt;
        if ($cumul_read_bytes_shared+$cumul_read_bytes_indep)
        {
            $rd_total_rt = ($fs_data{$key}->[0] / ($cumul_read_bytes_shared + $cumul_read_bytes_indep));
        }
        else
        {
            $rd_total_rt = 0;
        }
725

726
727
        printf TABLES "%s \& %.5f \& %.5f \& %.5f \& %.5f \\\\\n",
            $key, $wr_total_mb, $wr_total_rt, $rd_total_mb, $rd_total_rt;
728
729
730
731
732
}
print TABLES "
\\hline
\\end{tabular}
";
733
734
735
736
737
738
739
740
}
else
{
    print TABLES "
\\rule{0in}{1in}
\\parbox{5in}{Log versions prior to 1.24 do not support per-filesystem data.}
";
}
741
742
743
close(TABLES);


744
open(TIME, ">$tmp_dir/time-summary.dat") || die("error opening output file:$!\n");
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
print TIME "# <type>, <app time>, <read>, <write>, <meta>\n";
print TIME "POSIX, ", ((($runtime * $nprocs - $summary{CP_F_POSIX_READ_TIME} -
    $summary{CP_F_POSIX_WRITE_TIME} -
    $summary{CP_F_POSIX_META_TIME})/($runtime * $nprocs)) * 100);
print TIME ", ", (($summary{CP_F_POSIX_READ_TIME}/($runtime * $nprocs))*100);
print TIME ", ", (($summary{CP_F_POSIX_WRITE_TIME}/($runtime * $nprocs))*100);
print TIME ", ", (($summary{CP_F_POSIX_META_TIME}/($runtime * $nprocs))*100), "\n";
print TIME "MPI-IO, ", ((($runtime * $nprocs - $summary{CP_F_MPI_READ_TIME} -
    $summary{CP_F_MPI_WRITE_TIME} -
    $summary{CP_F_MPI_META_TIME})/($runtime * $nprocs)) * 100);
print TIME ", ", (($summary{CP_F_MPI_READ_TIME}/($runtime * $nprocs))*100);
print TIME ", ", (($summary{CP_F_MPI_WRITE_TIME}/($runtime * $nprocs))*100);
print TIME ", ", (($summary{CP_F_MPI_META_TIME}/($runtime * $nprocs))*100), "\n";
close TIME;

760
# copy template files to tmp tmp_dir
761
762
system "$cp $PREFIX/share/*.gplt $tmp_dir/";
system "$cp $PREFIX/share/*.tex $tmp_dir/";
763

764
765
766
767
768
769
770
# generate template for file access plot (we have to set range)
my $ymax = $nprocs + 1;
open(FILEACC, ">$tmp_dir/file-access-read-eps.gplt") || die("error opening output file:$!\n");
print FILEACC "#!/usr/bin/gnuplot -persist

set terminal postscript eps color solid font \"Helvetica\" 18 size 10in,2.5in
set output \"file-access-read.eps\"
771
772
set ylabel \"MPI rank\"
set xlabel \"hours:minutes:seconds\"
773
774
775
set xdata time
set timefmt \"%s\"
set format x \"%H:%M:%S\"
776
set yrange [-1:$ymax]
Philip Carns's avatar
Philip Carns committed
777
set title \"Timespan from first to last read access on independent files\"
778
779
780
# the xrange doesn't work for some reason
#set xrange [0:$runtime]
#set ytics -1,1
781
set lmargin 4
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800

# color blindness work around
set style line 2 lc 3
set style line 3 lc 4
set style line 4 lc 5
set style line 5 lc 2
set style increment user

# lw 3 to make lines thicker...
# note that writes are slightly offset for better visibility
plot \"file-access-read.dat\" using 1:2:3:4 with vectors nohead filled notitle
";
close FILEACC;

open(FILEACC, ">$tmp_dir/file-access-write-eps.gplt") || die("error opening output file:$!\n");
print FILEACC "#!/usr/bin/gnuplot -persist

set terminal postscript eps color solid font \"Helvetica\" 18 size 10in,2.5in
set output \"file-access-write.eps\"
801
802
set ylabel \"MPI rank\"
set xlabel \"hours:minutes:seconds\"
803
804
805
set xdata time
set timefmt \"%s\"
set format x \"%H:%M:%S\"
Philip Carns's avatar
Philip Carns committed
806
set title \"Timespan from first to last write access on independent files\"
807
set yrange [-1:$ymax]
808
809
810
# the xrange doesn't work for some reason
# set xrange [0:$runtime]
#set ytics -1,1
811
set lmargin 4
812
813
814
815
816
817
818
819
820
821
822
823
824

# color blindness work around
set style line 2 lc 3
set style line 3 lc 4
set style line 4 lc 5
set style line 5 lc 2
set style increment user

# lw 3 to make lines thicker...
plot \"file-access-write.dat\" using 1:2:3:4 with vectors nohead filled lt 2 notitle
";
close FILEACC;

825
826
827
828
829
830
831
832
833
834
open(FILEACC, ">$tmp_dir/file-access-shared-eps.gplt") || die("error opening output file:$!\n");
print FILEACC "#!/usr/bin/gnuplot -persist

set terminal postscript eps color solid font \"Helvetica\" 18 size 10in,2.5in
set output \"file-access-shared.eps\"
set xlabel \"hours:minutes:seconds\"
set xdata time
set timefmt \"%s\"
set format x \"%H:%M:%S\"
unset ytics
835
set ylabel \"All processes\"
836
837
838
# the xrange doesn't work for some reason
# set xrange [0:$runtime]
set yrange [-1:1]
Philip Carns's avatar
Philip Carns committed
839
set title \"Timespan from first to last access on files shared by all processes\"
840
set lmargin 4
841
842
843
844
845
846
847
848
849
850
851
852
853

# color blindness work around
set style line 2 lc 3
set style line 3 lc 4
set style line 4 lc 5
set style line 5 lc 2
set style increment user

plot \"file-access-read-sh.dat\" using 1:2:3:4 with vectors nohead filled lw 10 title \"read\", \\
\"file-access-write-sh.dat\" using 1:((\$2)-.2):3:4 with vectors nohead filled lw 10 title \"write\"
";
close FILEACC;

854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
$cumul_read_indep /= $nprocs;
$cumul_read_bytes_indep /= $nprocs;
$cumul_read_bytes_indep /= 1048576.0;

$cumul_write_indep /= $nprocs;
$cumul_write_bytes_indep /= $nprocs;
$cumul_write_bytes_indep /= 1048576.0;

$cumul_read_shared /= $nprocs;
$cumul_read_bytes_shared /= $nprocs;
$cumul_read_bytes_shared /= 1048576.0;

$cumul_write_shared /= $nprocs;
$cumul_write_bytes_shared /= $nprocs;
$cumul_write_bytes_shared /= 1048576.0;

870
871
872
$cumul_meta_shared /= $nprocs;
$cumul_meta_indep /= $nprocs;

873
open(FILEACC, ">$tmp_dir/file-access-table.tex") || die("error opening output file:$!\n");
874
print FILEACC "
Philip Carns's avatar
Philip Carns committed
875
876
877
878
\\begin{tabular}{l|p{1.7in}r}
\\multicolumn{3}{c}{Average I/O per process} \\\\
\\hline
 \& Cumulative time spent in I/O functions (seconds) \& Amount of I/O (MB) \\\\
879
880
\\hline
\\hline
Philip Carns's avatar
Philip Carns committed
881
882
";

Philip Carns's avatar
Philip Carns committed
883
884
885
886
887
# printf to get consistent precision in output
printf(FILEACC "Independent reads \& \\multicolumn{1}{r}{%f} \& \\multicolumn{1}{r}{%f} \\\\", 
    $cumul_read_indep, $cumul_read_bytes_indep);
printf(FILEACC "Independent writes \& \\multicolumn{1}{r}{%f} \& \\multicolumn{1}{r}{%f} \\\\", 
    $cumul_write_indep, $cumul_write_bytes_indep);
888
889
printf(FILEACC "Independent metadata \& \\multicolumn{1}{r}{%f} \& \\multicolumn{1}{r}{N/A} \\\\", 
    $cumul_meta_indep);
Philip Carns's avatar
Philip Carns committed
890
891
892
893
printf(FILEACC "Shared reads \& \\multicolumn{1}{r}{%f} \& \\multicolumn{1}{r}{%f} \\\\", 
    $cumul_read_shared, $cumul_read_bytes_shared);
printf(FILEACC "Shared writes \& \\multicolumn{1}{r}{%f} \& \\multicolumn{1}{r}{%f} \\\\", 
    $cumul_write_shared, $cumul_write_bytes_shared);
894
895
printf(FILEACC "Shared metadata \& \\multicolumn{1}{r}{%f} \& \\multicolumn{1}{r}{N/A} \\\\", 
    $cumul_meta_shared);
Philip Carns's avatar
Philip Carns committed
896
897

print FILEACC "
898
899
900
\\hline
\\end{tabular}
";
901
902
close(FILEACC);

903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
#
# Variance Data
#
open(VARP, ">$tmp_dir/variance-table.tex") || die("error opening output file:$!\n");
print VARP "
\\begin{tabular}{c|r|r|r|r|r|r|r|r|r}
\\multicolumn{10}{c}{} \\\\
\\multicolumn{10}{c}{Variance in Shared Files} \\\\
\\hline
File \& Processes \& \\multicolumn{3}{c}{Fastest} \\vline \&
\\multicolumn{3}{c}{Slowest} \\vline \& \\multicolumn{2}{c}{\$\\sigma\$} \\\\
\\cline{3-10}
Suffix \&  \& Rank \& Time \& Bytes \& Rank \& Time \& Bytes \& Time \& Bytes \\\\
\\hline
\\hline
";

920
921
922
923
924
my $curcount = 1;
foreach $key (sort { $hash_files{$b}{'slowest_time'} <=> $hash_files{$a}{'slowest_time'} } keys %hash_files) {

    if ($curcount > 20) { last; }

925
926
927
928
929
930
    if ($hash_files{$key}{'procs'} > 1)
    {
        my $vt = sprintf("%.3g", sqrt($hash_files{$key}{'variance_time'}));
        my $vb = sprintf("%.3g", sqrt($hash_files{$key}{'variance_bytes'}));
        my $fast_bytes = format_bytes($hash_files{$key}{'fastest_bytes'});
        my $slow_bytes = format_bytes($hash_files{$key}{'slowest_bytes'});
931
        my $name = encode('latex', $hash_files{$key}{'name'});
932
933

        print VARP "
934
               $name \&
935
936
937
938
939
940
941
942
943
944
               $hash_files{$key}{'procs'} \&
               $hash_files{$key}{'fastest_rank'} \&
               $hash_files{$key}{'fastest_time'} \&
               $fast_bytes \&
               $hash_files{$key}{'slowest_rank'} \&
               $hash_files{$key}{'slowest_time'} \& 
               $slow_bytes \&
               $vt \&
               $vb \\\\
         ";
945
        $curcount++;
946
947
948
949
950
951
952
953
954
    }
}

print VARP "
\\hline
\\end{tabular}
";
close(VARP);

955
956
957
958
959
960
961
962
963
964
# calculate performance
##########################################################################

# what was the slowest time by any proc for unique file access?
my $slowest_uniq_time = 0;
if(keys %hash_unique_file_time > 0)
{
    $slowest_uniq_time < $_ and $slowest_uniq_time = $_ for values %hash_unique_file_time;
}
print("Slowest unique file time: $slowest_uniq_time\n");
965
print("Slowest shared file time: $shared_file_time\n");
966
967
968
print("Total bytes read and written by app (may be incorrect): $total_job_bytes\n");
my $tmp_total_time = $slowest_uniq_time+$shared_file_time;
print("Total absolute I/O time: $tmp_total_time\n");
969

970
971
972
# move to tmp_dir
chdir $tmp_dir;

973
# execute gnuplot scripts
974
system "$gnuplot counts-eps.gplt";
975
system "$epstopdf counts.eps";
976
system "$gnuplot hist-eps.gplt";
977
system "$epstopdf hist.eps";
978
system "$gnuplot pattern-eps.gplt";
979
system "$epstopdf pattern.eps";
980
system "$gnuplot time-summary-eps.gplt";
981
system "$epstopdf time-summary.eps";
982
system "$gnuplot file-access-read-eps.gplt";
983
system "$epstopdf file-access-read.eps";
984
system "$gnuplot file-access-write-eps.gplt";
985
system "$epstopdf file-access-write.eps";
986
system "$gnuplot file-access-shared-eps.gplt";
987
system "$epstopdf file-access-shared.eps";
988

989
990
991
992
#system "gnuplot align-pdf.gplt";
#system "gnuplot iodist-pdf.gplt";
#system "gnuplot types-pdf.gplt";

993
# generate summary PDF
994
995
996
# NOTE: an autoconf test determines if -halt-on-error is available and sets
# __CP_PDFLATEX_HALT_ON_ERROR accordingly
$system_rc = system "$pdflatex @__CP_PDFLATEX_HALT_ON_ERROR@ summary.tex > latex.output";
997
998
999
1000
if($system_rc)
{
    print("LaTeX generation (phase1) failed [$system_rc], aborting summary creation.\n");
    print("error log:\n");
For faster browsing, not all history is shown. View entire blame