Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
darshan
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
72
Issues
72
List
Boards
Labels
Milestones
Merge Requests
4
Merge Requests
4
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
darshan
darshan
Commits
fab0224e
Commit
fab0224e
authored
Aug 25, 2015
by
Shane Snyder
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
stubbed out version of job summary script
parent
e5172a88
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
1347 deletions
+3
-1347
darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
...an-util/darshan-job-summary/bin/darshan-job-summary.pl.in
+3
-1347
No files found.
darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
View file @
fab0224e
#!/usr/bin/perl -w
#
# (C) 20
09
by Argonne National Laboratory.
# (C) 20
15
by Argonne National Laboratory.
# See COPYRIGHT in top-level directory.
#
...
...
@@ -18,1315 +18,6 @@ use English;
use
Number::Bytes::
Human
qw(format_bytes)
;
use
POSIX
qw(strftime)
;
#
# system commands used
#
my
$darshan_parser
=
"
$PREFIX
/bin/darshan-parser
";
my
$pdflatex
=
"
pdflatex
";
my
$epstopdf
=
"
epstopdf
";
my
$cp
=
"
cp
";
my
$mv
=
"
mv
";
my
$gnuplot
;
# Prefer gnuplot installed with darshan, if present.
if
(
-
x
"
$PREFIX
/bin/gnuplot
")
{
$gnuplot
=
"
$PREFIX
/bin/gnuplot
";
}
else
{
$gnuplot
=
"
gnuplot
";
}
my
$orig_dir
=
getcwd
;
my
$output_file
=
"
summary.pdf
";
my
$verbose_flag
=
0
;
my
$input_file
=
"";
my
%
access_hash
=
();
my
@access_size
=
();
my
%
hash_files
=
();
my
$size_est_flag
=
0
;
my
$read_interval_overflow_flag
=
0
;
my
$write_interval_overflow_flag
=
0
;
# data structures for calculating performance
my
%
hash_unique_file_time
=
();
my
$shared_file_time
=
0
;
my
$total_job_bytes
=
0
;
process_args
();
check_prereqs
();
my
$tmp_dir
=
tempdir
(
CLEANUP
=>
!
$verbose_flag
);
if
(
$verbose_flag
)
{
print
"
verbose:
$tmp_dir
\n
";
}
open
(
TRACE
,
"
$darshan_parser
$input_file
|
")
||
die
("
Can't execute
\"
$darshan_parser
$input_file
\"
: $!
\n
");
open
(
FA_READ
,
"
>
$tmp_dir
/file-access-read.dat
")
||
die
("
error opening output file: $!
\n
");
open
(
FA_WRITE
,
"
>
$tmp_dir
/file-access-write.dat
")
||
die
("
error opening output file: $!
\n
");
open
(
FA_READ_SH
,
"
>
$tmp_dir
/file-access-read-sh.dat
")
||
die
("
error opening output file: $!
\n
");
open
(
FA_WRITE_SH
,
"
>
$tmp_dir
/file-access-write-sh.dat
")
||
die
("
error opening output file: $!
\n
");
my
$last_read_start
=
0
;
my
$last_write_start
=
0
;
my
$cumul_read_indep
=
0
;
my
$cumul_read_bytes_indep
=
0
;
my
$cumul_write_indep
=
0
;
my
$cumul_write_bytes_indep
=
0
;
my
$cumul_read_shared
=
0
;
my
$cumul_read_bytes_shared
=
0
;
my
$cumul_write_shared
=
0
;
my
$cumul_write_bytes_shared
=
0
;
my
$cumul_meta_shared
=
0
;
my
$cumul_meta_indep
=
0
;
my
$first_data_line
=
1
;
my
$current_rank
=
0
;
my
$current_hash
=
0
;
my
%
file_record_hash
=
();
my
%
fs_data
=
();
while
(
$line
=
<
TRACE
>
)
{
chomp
(
$line
);
if
(
$line
=~
/^\s*$/
)
{
# ignore blank lines
}
elsif
(
$line
=~
/^#/
)
{
if
(
$line
=~
/^# exe: /
)
{
(
$junk
,
$cmdline
)
=
split
('
:
',
$line
,
2
);
# add escape characters if needed for special characters in
# command line
$cmdline
=
encode
('
latex
',
$cmdline
);
}
if
(
$line
=~
/^# nprocs: /
)
{
(
$junk
,
$nprocs
)
=
split
('
:
',
$line
,
2
);
$procreads
[
$nprocs
]
=
0
;
}
if
(
$line
=~
/^# run time: /
)
{
(
$junk
,
$runtime
)
=
split
('
:
',
$line
,
2
);
}
if
(
$line
=~
/^# start_time: /
)
{
(
$junk
,
$starttime
)
=
split
('
:
',
$line
,
2
);
}
if
(
$line
=~
/^# uid: /
)
{
(
$junk
,
$uid
)
=
split
('
:
',
$line
,
2
);
}
if
(
$line
=~
/^# jobid: /
)
{
(
$junk
,
$jobid
)
=
split
('
:
',
$line
,
2
);
}
if
(
$line
=~
/^# darshan log version: /
)
{
(
$junk
,
$version
)
=
split
('
:
',
$line
,
2
);
$version
=~
s/^\s+//
;
(
$major
,
$minor
)
=
split
(
/\./
,
$version
,
2
);
}
}
else
{
# parse line
@fields
=
split
(
/[\t ]+/
,
$line
);
# encode the file system name to protect against special characters
$fields
[
5
]
=
encode
('
latex
',
$fields
[
5
]);
# is this our first piece of data?
if
(
$first_data_line
)
{
$current_rank
=
$fields
[
0
];
$current_hash
=
$fields
[
1
];
$first_data_line
=
0
;
}
# is this a new file record?
if
(
$fields
[
0
]
!=
$current_rank
||
$fields
[
1
]
!=
$current_hash
)
{
# process previous record
process_file_record
(
$current_rank
,
$current_hash
,
\%
file_record_hash
);
# reset variables for next record
$current_rank
=
$fields
[
0
];
$current_hash
=
$fields
[
1
];
%
file_record_hash
=
();
$file_record_hash
{
CP_NAME_SUFFIX
}
=
$fields
[
4
];
}
$file_record_hash
{
$fields
[
2
]}
=
$fields
[
3
];
$summary
{
$fields
[
2
]}
+=
$fields
[
3
];
# record per-process POSIX read count
if
(
$fields
[
2
]
eq
"
CP_POSIX_READS
"
||
$fields
[
2
]
eq
"
CP_POSIX_FREADS
")
{
if
(
$fields
[
0
]
==
-
1
)
{
$procreads
[
$nprocs
]
+=
$fields
[
3
];
}
else
{
$procreads
[
$fields
[
0
]]
+=
$fields
[
3
];
}
}
# record per-proces POSIX write count
if
(
$fields
[
2
]
eq
"
CP_POSIX_WRITES
"
||
$fields
[
2
]
eq
"
CP_POSIX_FWRITES
")
{
if
(
$fields
[
0
]
==
-
1
)
{
$procwrites
[
$nprocs
]
+=
$fields
[
3
];
}
else
{
$procwrites
[
$fields
[
0
]]
+=
$fields
[
3
];
}
}
# seperate accumulators for independent and shared reads and writes
if
(
$fields
[
2
]
eq
"
CP_F_POSIX_READ_TIME
"
&&
$fields
[
0
]
==
-
1
){
$cumul_read_shared
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_POSIX_READ_TIME
"
&&
$fields
[
0
]
!=
-
1
){
$cumul_read_indep
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_POSIX_WRITE_TIME
"
&&
$fields
[
0
]
==
-
1
){
$cumul_write_shared
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_POSIX_WRITE_TIME
"
&&
$fields
[
0
]
!=
-
1
){
$cumul_write_indep
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_POSIX_META_TIME
"
&&
$fields
[
0
]
==
-
1
){
$cumul_meta_shared
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_POSIX_META_TIME
"
&&
$fields
[
0
]
!=
-
1
){
$cumul_meta_indep
+=
$fields
[
3
];
}
if
(((
$fields
[
2
]
eq
"
CP_BYTES_READ
")
or
(
$fields
[
2
]
eq
"
CP_BYTES_WRITTEN
"))
and
not
defined
(
$fs_data
{
$fields
[
5
]}))
{
$fs_data
{
$fields
[
5
]}
=
[
0
,
0
];
}
if
(
$fields
[
2
]
eq
"
CP_BYTES_READ
"
&&
$fields
[
0
]
==
-
1
){
$cumul_read_bytes_shared
+=
$fields
[
3
];
$fs_data
{
$fields
[
5
]}
->
[
0
]
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_BYTES_READ
"
&&
$fields
[
0
]
!=
-
1
){
$cumul_read_bytes_indep
+=
$fields
[
3
];
$fs_data
{
$fields
[
5
]}
->
[
0
]
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_BYTES_WRITTEN
"
&&
$fields
[
0
]
==
-
1
){
$cumul_write_bytes_shared
+=
$fields
[
3
];
$fs_data
{
$fields
[
5
]}
->
[
1
]
+=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_BYTES_WRITTEN
"
&&
$fields
[
0
]
!=
-
1
){
$cumul_write_bytes_indep
+=
$fields
[
3
];
$fs_data
{
$fields
[
5
]}
->
[
1
]
+=
$fields
[
3
];
}
# record start and end of reads and writes
if
(
$fields
[
2
]
eq
"
CP_F_READ_START_TIMESTAMP
")
{
# store until we find the end
# adjust for systems that give absolute time stamps
$last_read_start
=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_READ_END_TIMESTAMP
"
&&
$fields
[
3
]
!=
0
)
{
# assume we got the read start already
my
$xdelta
=
$fields
[
3
]
-
$last_read_start
;
# adjust for systems that have absolute time stamps
if
(
$last_read_start
>
$starttime
)
{
$last_read_start
-=
$starttime
;
}
if
(
$fields
[
3
]
>
$runtime
&&
!
$read_interval_overflow_flag
)
{
$read_interval_overflow_flag
=
1
;
print
"
Warning: detected read access at time
$fields
[3] but runtime is only
$runtime
seconds.
\n
";
}
if
(
$fields
[
0
]
==
-
1
){
print
FA_READ_SH
"
$last_read_start
\t
0
\t
$xdelta
\t
0
\n
";
}
else
{
print
FA_READ
"
$last_read_start
\t
$fields
[0]
\t
$xdelta
\t
0
\n
";
}
}
if
(
$fields
[
2
]
eq
"
CP_F_WRITE_START_TIMESTAMP
")
{
# store until we find the end
$last_write_start
=
$fields
[
3
];
}
if
(
$fields
[
2
]
eq
"
CP_F_WRITE_END_TIMESTAMP
"
&&
$fields
[
3
]
!=
0
)
{
# assume we got the write start already
my
$xdelta
=
$fields
[
3
]
-
$last_write_start
;
# adjust for systems that have absolute time stamps
if
(
$last_write_start
>
$starttime
)
{
$last_write_start
-=
$starttime
;
}
if
(
$fields
[
3
]
>
$runtime
&&
!
$write_interval_overflow_flag
)
{
$write_interval_overflow_flag
=
1
;
print
"
Warning: detected write access at time
$fields
[3] but runtime is only
$runtime
seconds.
\n
";
}
if
(
$fields
[
0
]
==
-
1
){
print
FA_WRITE_SH
"
$last_write_start
\t
0
\t
$xdelta
\t
0
\n
";
}
else
{
print
FA_WRITE
"
$last_write_start
\t
$fields
[0]
\t
$xdelta
\t
0
\n
";
}
}
if
(
$fields
[
2
]
=~
/^CP_ACCESS(.)_ACCESS/
)
{
$access_size
[
$1
]
=
$fields
[
3
];
}
if
(
$fields
[
2
]
=~
/^CP_ACCESS(.)_COUNT/
)
{
my
$tmp_access_size
=
$access_size
[
$1
];
if
(
defined
$access_hash
{
$tmp_access_size
}){
$access_hash
{
$tmp_access_size
}
+=
$fields
[
3
];
}
else
{
$access_hash
{
$tmp_access_size
}
=
$fields
[
3
];
}
}
}
}
close
(
TRACE
)
||
die
"
darshan-parser failure: $! $?
";
#
# Exit out if there are no actual file accesses
#
if
(
$first_data_line
)
{
$strtm
=
strftime
("
%a %b %e %H:%M:%S %Y
",
localtime
(
$starttime
));
print
"
This darshan log has no file records. No summary was produced.
\n
";
print
"
jobid:
$jobid
\n
";
print
"
uid:
$uid
\n
";
print
"
starttime:
$strtm
(
$starttime
)
\n
";
print
"
runtime:
$runtime
(seconds)
\n
";
print
"
nprocs:
$nprocs
\n
";
print
"
version:
$version
\n
";
exit
(
1
);
}
# process last file record
$file_record_hash
{
CP_NAME_SUFFIX
}
=
$fields
[
4
];
process_file_record
(
$current_rank
,
$current_hash
,
\%
file_record_hash
);
# Fudge one point at the end to make xrange match in read and write plots.
# For some reason I can't get the xrange command to work. -Phil
print
FA_READ
"
$runtime
\t
-1
\t
0
\t
0
\n
";
print
FA_WRITE
"
$runtime
\t
-1
\t
0
\t
0
\n
";
print
FA_READ_SH
"
$runtime
\t
0
\t
0
\t
0
\n
";
print
FA_WRITE_SH
"
$runtime
\t
0
\t
0
\t
0
\n
";
close
(
FA_READ
);
close
(
FA_WRITE
);
close
(
FA_READ_SH
);
close
(
FA_WRITE_SH
);
# counts of operations
open
(
COUNTS
,
"
>
$tmp_dir
/counts.dat
")
||
die
("
error opening output file: $!
\n
");
print
COUNTS
"
# P=POSIX, MI=MPI-IO indep., MC=MPI-IO coll., R=read, W=write
\n
";
print
COUNTS
"
# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
\n
";
my
$total_posix_opens
=
$summary
{
CP_POSIX_OPENS
}
+
$summary
{
CP_POSIX_FOPENS
};
my
$total_syncs
=
$summary
{
CP_POSIX_FSYNCS
}
+
$summary
{
CP_POSIX_FDSYNCS
};
print
COUNTS
"
Read,
",
$summary
{
CP_POSIX_READS
}
+
$summary
{
CP_POSIX_FREADS
},
"
,
",
$summary
{
CP_INDEP_READS
},
"
,
",
$summary
{
CP_COLL_READS
},
"
\n
",
"
Write,
",
$summary
{
CP_POSIX_WRITES
}
+
$summary
{
CP_POSIX_FWRITES
},
"
,
",
$summary
{
CP_INDEP_WRITES
},
"
,
",
$summary
{
CP_COLL_WRITES
},
"
\n
",
"
Open,
",
$total_posix_opens
,
"
,
",
$summary
{
CP_INDEP_OPENS
},"
,
",
$summary
{
CP_COLL_OPENS
},
"
\n
",
"
Stat,
",
$summary
{
CP_POSIX_STATS
},
"
, 0, 0
\n
",
"
Seek,
",
$summary
{
CP_POSIX_SEEKS
},
"
, 0, 0
\n
",
"
Mmap,
",
$summary
{
CP_POSIX_MMAPS
},
"
, 0, 0
\n
",
"
Fsync,
",
$total_syncs
,
"
, 0, 0
\n
";
close
COUNTS
;
# histograms of reads and writes
open
(
HIST
,
"
>
$tmp_dir
/hist.dat
")
||
die
("
error opening output file: $!
\n
");
print
HIST
"
# size_range read write
\n
";
print
HIST
"
0-100,
",
$summary
{
CP_SIZE_READ_0_100
},
"
,
",
$summary
{
CP_SIZE_WRITE_0_100
},
"
\n
";
print
HIST
"
101-1K,
",
$summary
{
CP_SIZE_READ_100_1K
},
"
,
",
$summary
{
CP_SIZE_WRITE_100_1K
},
"
\n
";
print
HIST
"
1K-10K,
",
$summary
{
CP_SIZE_READ_1K_10K
},
"
,
",
$summary
{
CP_SIZE_WRITE_1K_10K
},
"
\n
";
print
HIST
"
10K-100K,
",
$summary
{
CP_SIZE_READ_10K_100K
},
"
,
",
$summary
{
CP_SIZE_WRITE_10K_100K
},
"
\n
";
print
HIST
"
100K-1M,
",
$summary
{
CP_SIZE_READ_100K_1M
},
"
,
",
$summary
{
CP_SIZE_WRITE_100K_1M
},
"
\n
";
print
HIST
"
1M-4M,
",
$summary
{
CP_SIZE_READ_1M_4M
},
"
,
",
$summary
{
CP_SIZE_WRITE_1M_4M
},
"
\n
";
print
HIST
"
4M-10M,
",
$summary
{
CP_SIZE_READ_4M_10M
},
"
,
",
$summary
{
CP_SIZE_WRITE_4M_10M
},
"
\n
";
print
HIST
"
10M-100M,
",
$summary
{
CP_SIZE_READ_10M_100M
},
"
,
",
$summary
{
CP_SIZE_WRITE_10M_100M
},
"
\n
";
print
HIST
"
100M-1G,
",
$summary
{
CP_SIZE_READ_100M_1G
},
"
,
",
$summary
{
CP_SIZE_WRITE_100M_1G
},
"
\n
";
print
HIST
"
1G+,
",
$summary
{
CP_SIZE_READ_1G_PLUS
},
"
,
",
$summary
{
CP_SIZE_WRITE_1G_PLUS
},
"
\n
";
close
HIST
;
# sequential and consecutive accesses
open
(
PATTERN
,
"
>
$tmp_dir
/pattern.dat
")
||
die
("
error opening output file: $!
\n
");
print
PATTERN
"
# op total sequential consecutive
\n
";
print
PATTERN
"
Read,
",
$summary
{
CP_POSIX_READS
}
+
$summary
{
CP_POSIX_FREADS
},
"
,
",
$summary
{
CP_SEQ_READS
},
"
,
",
$summary
{
CP_CONSEC_READS
},
"
\n
";
print
PATTERN
"
Write,
",
$summary
{
CP_POSIX_WRITES
}
+
$summary
{
CP_POSIX_FWRITES
},
"
,
",
$summary
{
CP_SEQ_WRITES
},
"
,
",
$summary
{
CP_CONSEC_WRITES
},
"
\n
";
close
PATTERN
;
# aligned I/O
open
(
ALIGN
,
"
>
$tmp_dir
/align.dat
")
||
die
("
error opening output file: $!
\n
");
print
ALIGN
"
# total unaligned_mem unaligned_file align_mem align_file
\n
";
print
ALIGN
$summary
{
CP_POSIX_READS
}
+
$summary
{
CP_POSIX_WRITES
}
+
$summary
{
CP_POSIX_FREADS
}
+
$summary
{
CP_POSIX_FWRITES
}
,
"
,
",
$summary
{
CP_MEM_NOT_ALIGNED
},
"
,
",
$summary
{
CP_FILE_NOT_ALIGNED
},
"
\n
";
close
ALIGN
;
# MPI types
open
(
TYPES
,
"
>
$tmp_dir
/types.dat
")
||
die
("
error opening output file: $!
\n
");
print
TYPES
"
# type use_count
\n
";
print
TYPES
"
Named,
",
$summary
{
CP_COMBINER_NAMED
},
"
\n
";
print
TYPES
"
Dup,
",
$summary
{
CP_COMBINER_DUP
},
"
\n
";
print
TYPES
"
Contig,
",
$summary
{
CP_COMBINER_CONTIGUOUS
},
"
\n
";
print
TYPES
"
Vector,
",
$summary
{
CP_COMBINER_VECTOR
},
"
\n
";
print
TYPES
"
HvecInt,
",
$summary
{
CP_COMBINER_HVECTOR_INTEGER
},
"
\n
";
print
TYPES
"
Hvector,
",
$summary
{
CP_COMBINER_HVECTOR
},
"
\n
";
print
TYPES
"
Indexed,
",
$summary
{
CP_COMBINER_INDEXED
},
"
\n
";
print
TYPES
"
HindInt,
",
$summary
{
CP_COMBINER_HINDEXED_INTEGER
},
"
\n
";
print
TYPES
"
Hindexed,
",
$summary
{
CP_COMBINER_HINDEXED
},
"
\n
";
print
TYPES
"
IndBlk,
",
$summary
{
CP_COMBINER_INDEXED_BLOCK
},
"
\n
";
print
TYPES
"
StructInt,
",
$summary
{
CP_COMBINER_STRUCT_INTEGER
},
"
\n
";
print
TYPES
"
Struct,
",
$summary
{
CP_COMBINER_STRUCT
},
"
\n
";
print
TYPES
"
Subarray,
",
$summary
{
CP_COMBINER_SUBARRAY
},
"
\n
";
print
TYPES
"
Darray,
",
$summary
{
CP_COMBINER_DARRAY
},
"
\n
";
print
TYPES
"
F90Real,
",
$summary
{
CP_COMBINER_F90_REAL
},
"
\n
";
print
TYPES
"
F90Complex,
",
$summary
{
CP_COMBINER_F90_COMPLEX
},
"
\n
";
print
TYPES
"
F90Int,
",
$summary
{
CP_COMBINER_F90_INTEGER
},
"
\n
";
print
TYPES
"
Resized,
",
$summary
{
CP_COMBINER_RESIZED
},
"
\n
";
close
TYPES
;
# generate histogram of process I/O counts
#
# NOTE: NEED TO FILL IN ACTUAL WRITE DATA!!!
#
$minprocread
=
(
defined
$procreads
[
0
])
?
$procreads
[
0
]
:
0
;
$maxprocread
=
(
defined
$procreads
[
0
])
?
$procreads
[
0
]
:
0
;
for
(
$i
=
1
;
$i
<
$nprocs
;
$i
++
)
{
$rdi
=
(
defined
$procreads
[
$i
])
?
$procreads
[
$i
]
:
0
;
$minprocread
=
(
$rdi
>
$minprocread
)
?
$minprocread
:
$rdi
;
$maxprocread
=
(
$rdi
<
$maxprocread
)
?
$maxprocread
:
$rdi
;
}
$minprocread
+=
$procreads
[
$nprocs
];
$maxprocread
+=
$procreads
[
$nprocs
];
# print "$minprocread $maxprocread\n";
@bucket
=
(
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
);
for
(
$i
=
0
;
$i
<
$nprocs
;
$i
++
)
{
$mysize
=
((
defined
$procreads
[
$i
])
?
$procreads
[
$i
]
:
0
)
+
$procreads
[
$nprocs
];
$mysize
-=
$minprocread
;
$mybucket
=
(
$mysize
>
0
)
?
((
$mysize
*
10
)
/
(
$maxprocread
-
$minprocread
))
:
0
;
$bucket
[
$mybucket
]
++
;
}
open
(
IODIST
,
"
>
$tmp_dir
/iodist.dat
")
||
die
("
error opening output file: $!
\n
");
print
IODIST
"
# bucket n_procs_rd n_procs_wr
\n
";
print
IODIST
"
# NOTE: WRITES ARE A COPY OF READS FOR NOW!!!
\n
";
$bucketsize
=
$maxprocread
-
$minprocread
/
10
;
# TODO: do writes also, is dropping a 0 in for now
for
(
$i
=
0
;
$i
<
10
;
$i
++
)
{
print
IODIST
$bucketsize
*
$i
+
$minprocread
,
"
-
",
$bucketsize
*
(
$i
+
1
)
+
$minprocread
,
"
,
",
$bucket
[
$i
],
"
, 0
\n
";
}
close
IODIST
;
# generate title for summary
(
$executable
,
$junk
)
=
split
('
',
$cmdline
,
2
);
@parts
=
split
('
/
',
$executable
);
$cmd
=
$parts
[
$#parts
];
@timearray
=
localtime
(
$starttime
);
$year
=
$timearray
[
5
]
+
1900
;
$mon
=
$timearray
[
4
]
+
1
;
$mday
=
$timearray
[
3
];
open
(
TITLE
,
"
>
$tmp_dir
/title.tex
")
||
die
("
error opening output file:$!
\n
");
print
TITLE
"
\\
rhead{
\\
thepage
\\
of
\\
pageref{LastPage}}
\\
chead[
\\
large
$cmd
(
$mon
/
$mday
/
$year
)
]
{
\\
large
$cmd
(
$mon
/
$mday
/
$year
)
}
\\
cfoot[
\\
scriptsize{
$cmdline
}
]
{
\\
scriptsize{
$cmdline
}
}
";
close
TITLE
;
open
(
TABLES
,
"
>
$tmp_dir
/job-table.tex
")
||
die
("
error opening output file:$!
\n
");
print
TABLES
"
\\
begin{tabular}{|p{.47
\\
columnwidth}|p{.35
\\
columnwidth}|p{.47
\\
columnwidth}|p{.6
\\
columnwidth}|}
\\
hline
jobid:
$jobid
\
& uid:
$uid
\
& nprocs:
$nprocs
\
& runtime:
$runtime
seconds
\\\\
\\
hline
\\
end{tabular}
";
close
TABLES
;
open
(
TABLES
,
"
>
$tmp_dir
/access-table.tex
")
||
die
("
error opening output file:$!
\n
");
print
TABLES
"
\\
begin{tabular}{r|r}
\\
multicolumn{2}{c}{ }
\\\\
\\
multicolumn{2}{c}{Most Common Access Sizes}
\\\\
\\
hline
access size
\
& count
\\\\
\\
hline
\\
hline
";
# sort access sizes (descending)
my
$i
=
0
;
foreach
$value
(
sort
{
$access_hash
{
$b
}
<=>
$access_hash
{
$a
}
}
keys
%
access_hash
)
{
if
(
$i
==
4
)
{
last
;
}
if
(
$access_hash
{
$value
}
==
0
)
{
last
;
}
print
TABLES
"
$value
\
&
$access_hash
{
$value
}
\\\\\n
";
$i
++
;
}
print
TABLES
"
\\
hline
\\
end{tabular}
";
close
TABLES
;
open
(
TABLES
,
"
>
$tmp_dir
/file-count-table.tex
")
||
die
("
error opening output file:$!
\n
");
print
TABLES
"
\\
begin{tabular}{r|r|r|r}
\\
multicolumn{4}{c}{ }
\\\\
\\
multicolumn{4}{c}{File Count Summary}
\\\\
";
if
(
$size_est_flag
==
1
)
{
print
TABLES
"
\\
multicolumn{4}{c}{(estimated by I/O access offsets)}
\\\\
";
}
print
TABLES
"
\\
hline
type
\
& number of files
\
& avg. size
\
& max size
\\\\
\\
hline
\\
hline
";
my
$counter
;
my
$sum
;
my
$max
;
my
$key
;
my
$avg
;
$counter
=
0
;
$sum
=
0
;
$max
=
0
;
foreach
$key
(
keys
%
hash_files
)
{
$counter
++
;
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$hash_files
{
$key
}{'
max_size
'})
{
$sum
+=
$hash_files
{
$key
}{'
min_open_size
'};
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
min_open_size
'};
}
}
else
{
$sum
+=
$hash_files
{
$key
}{'
max_size
'};
if
(
$hash_files
{
$key
}{'
max_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
max_size
'};
}
}
}
if
(
$counter
>
0
)
{
$avg
=
$sum
/
$counter
;
}
else
{
$avg
=
0
;
}
$avg
=
format_bytes
(
$avg
);
$max
=
format_bytes
(
$max
);
print
TABLES
"
total opened
\
&
$counter
\
&
$avg
\
&
$max
\\\\\n
";
$counter
=
0
;
$sum
=
0
;
$max
=
0
;
foreach
$key
(
keys
%
hash_files
)
{
if
(
$hash_files
{
$key
}{'
was_read
'}
&&
!
(
$hash_files
{
$key
}{'
was_written
'}))
{
$counter
++
;
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$hash_files
{
$key
}{'
max_size
'})
{
$sum
+=
$hash_files
{
$key
}{'
min_open_size
'};
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
min_open_size
'};
}
}
else
{
$sum
+=
$hash_files
{
$key
}{'
max_size
'};
if
(
$hash_files
{
$key
}{'
max_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
max_size
'};
}
}
}
}
if
(
$counter
>
0
)
{
$avg
=
$sum
/
$counter
;
}
else
{
$avg
=
0
;
}
$avg
=
format_bytes
(
$avg
);
$max
=
format_bytes
(
$max
);
print
TABLES
"
read-only files
\
&
$counter
\
&
$avg
\
&
$max
\\\\\n
";
$counter
=
0
;
$sum
=
0
;
$max
=
0
;
foreach
$key
(
keys
%
hash_files
)
{
if
(
!
(
$hash_files
{
$key
}{'
was_read
'})
&&
$hash_files
{
$key
}{'
was_written
'})
{
$counter
++
;
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$hash_files
{
$key
}{'
max_size
'})
{
$sum
+=
$hash_files
{
$key
}{'
min_open_size
'};
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
min_open_size
'};
}
}
else
{
$sum
+=
$hash_files
{
$key
}{'
max_size
'};
if
(
$hash_files
{
$key
}{'
max_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
max_size
'};
}
}
}
}
if
(
$counter
>
0
)
{
$avg
=
$sum
/
$counter
;
}
else
{
$avg
=
0
;
}
$avg
=
format_bytes
(
$avg
);
$max
=
format_bytes
(
$max
);
print
TABLES
"
write-only files
\
&
$counter
\
&
$avg
\
&
$max
\\\\\n
";
$counter
=
0
;
$sum
=
0
;
$max
=
0
;
foreach
$key
(
keys
%
hash_files
)
{
if
(
$hash_files
{
$key
}{'
was_read
'}
&&
$hash_files
{
$key
}{'
was_written
'})
{
$counter
++
;
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$hash_files
{
$key
}{'
max_size
'})
{
$sum
+=
$hash_files
{
$key
}{'
min_open_size
'};
if
(
$hash_files
{
$key
}{'
min_open_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
min_open_size
'};
}
}
else
{
$sum
+=
$hash_files
{
$key
}{'
max_size
'};
if
(
$hash_files
{
$key
}{'
max_size
'}
>
$max
)
{
$max
=
$hash_files
{
$key
}{'
max_size
'};
}
}
}
}
if
(
$counter
>
0
)
{
$avg
=
$sum
/
$counter
;
}
else
{
$avg
=
0
;
}
$avg
=
format_bytes
(
$avg
);
$max
=
format_bytes
(
$max
);
print
TABLES
"
read/write files
\
&
$counter
\
&
$avg
\
&
$max
\\\\\n
";
$counter
=
0
;
$sum
=
0
;
$max
=
0
;
foreach
$key
(
keys
%
hash_files
)
{
if
(
$hash_files
{
$key
}{'
was_written
'}
&&
$hash_files
{
$key
}{'
min_open_size
'}
==
0
&&
$hash_files
{
$key
}{'
max_size
'}
>
0
)
{