Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Sudheer Chunduri
darshan
Commits
48dbd2e8
Commit
48dbd2e8
authored
Oct 13, 2017
by
Shane Snyder
Browse files
Merge branch 'snyder-wrap-mod'
parents
c7b5ac90
ccddff36
Changes
12
Expand all
Hide whitespace changes
Inline
Side-by-side
darshan-runtime/Makefile.in
View file @
48dbd2e8
...
...
@@ -199,6 +199,7 @@ ifdef BUILD_HDF5_MODULE
endif
install
-m
644
$(srcdir)
/share/ld-opts/darshan-pnetcdf-ld-opts
$(datarootdir)
/ld-opts/darshan-pnetcdf-ld-opts
install
-m
644
$(srcdir)
/share/ld-opts/darshan-stdio-ld-opts
$(datarootdir)
/ld-opts/darshan-stdio-ld-opts
install
-m
644
$(srcdir)
/share/ld-opts/darshan-mpiio-ld-opts
$(datarootdir)
/ld-opts/darshan-mpiio-ld-opts
ifdef
ENABLE_MMAP_LOGS
install
-m
755 share/darshan-mmap-epilog.sh
$(datarootdir)
/darshan-mmap-epilog.sh
endif
...
...
darshan-runtime/darshan.h
View file @
48dbd2e8
...
...
@@ -28,15 +28,27 @@
#define DARSHAN_DECL(__func) __func
#define DARSHAN_MPI_CALL(__func) __real_ ## __func
/* creates P* variant of MPI symbols for LD_PRELOAD so that we can handle
* language bindings that map to MPI or PMPI symbols under the covers.
*/
#define DARSHAN_WRAPPER_MAP(__func,__ret,__args,__fcall) \
__ret __func __args { \
__ret i; \
i = __fcall; \
return i; \
}
/* Map the desired function call to a pointer called __real_NAME at run
* time. Note that we fall back to looking for the same symbol with a P
* prefix to handle MPI bindings that call directly to the PMPI layer.
*/
#define MAP_OR_FAIL(__func) \
if (!(__real_ ## __func)) \
{ \
__real_ ## __func = dlsym(RTLD_NEXT, #__func); \
if(!(__real_ ## __func)) { \
fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
exit(1); \
fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
exit(1); \
} \
}
...
...
@@ -47,7 +59,15 @@
#define DARSHAN_DECL(__name) __wrap_ ## __name
#define DARSHAN_MPI_CALL(__func) __func
/* creates P* variant of MPI symbols for static linking so that we can handle
* language bindings that map to MPI or PMPI symbols under the covers.
*/
#define DARSHAN_WRAPPER_MAP(__func,__ret,__args,__fcall) \
__ret __wrap_ ## __func __args { \
__ret i; \
i = __wrap_ ## __fcall; \
return i; \
}
#define MAP_OR_FAIL(__func)
...
...
darshan-runtime/lib/darshan-bgq.c
View file @
48dbd2e8
...
...
@@ -196,13 +196,13 @@ static void bgq_shutdown(
{
bgq_runtime
->
record
->
base_rec
.
rank
=
-
1
;
DARSHAN_MPI_CALL
(
PMPI_Comm_size
)
(
mod_comm
,
&
nprocs
);
PMPI_Comm_size
(
mod_comm
,
&
nprocs
);
ion_ids
=
malloc
(
sizeof
(
*
ion_ids
)
*
nprocs
);
result
=
(
ion_ids
!=
NULL
);
if
(
!
result
)
bgq_runtime
->
record
->
counters
[
BGQ_INODES
]
=
-
1
;
}
DARSHAN_MPI_CALL
(
PMPI_Bcast
)
(
&
result
,
1
,
MPI_INT
,
0
,
mod_comm
);
PMPI_Bcast
(
&
result
,
1
,
MPI_INT
,
0
,
mod_comm
);
/* caclulate the number of I/O nodes */
if
(
result
)
...
...
@@ -210,7 +210,7 @@ static void bgq_shutdown(
int
i
,
found
;
uint64_t
val
;
DARSHAN_MPI_CALL
(
PMPI_Gather
)
(
&
bgq_runtime
->
record
->
counters
[
BGQ_INODES
],
PMPI_Gather
(
&
bgq_runtime
->
record
->
counters
[
BGQ_INODES
],
1
,
MPI_LONG_LONG_INT
,
ion_ids
,
...
...
darshan-runtime/lib/darshan-core-init-finalize.c
View file @
48dbd2e8
...
...
@@ -17,203 +17,17 @@
#include "darshan-core.h"
#include "darshan-dynamic.h"
#ifdef DARSHAN_PRELOAD
DARSHAN_FORWARD_DECL
(
PMPI_File_close
,
int
,
(
MPI_File
*
fh
));
DARSHAN_FORWARD_DECL
(
PMPI_File_iread_at
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
DARSHAN_FORWARD_DECL
(
PMPI_File_iread
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
DARSHAN_FORWARD_DECL
(
PMPI_File_iread_shared
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_iwrite_at
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_iwrite_at
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_iwrite
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_iwrite
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_iwrite_shared
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_iwrite_shared
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
__D_MPI_REQUEST
*
request
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_open
,
int
,
(
MPI_Comm
comm
,
const
char
*
filename
,
int
amode
,
MPI_Info
info
,
MPI_File
*
fh
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_open
,
int
,
(
MPI_Comm
comm
,
char
*
filename
,
int
amode
,
MPI_Info
info
,
MPI_File
*
fh
));
#endif
DARSHAN_FORWARD_DECL
(
PMPI_File_read_all_begin
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_all
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_at_all
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_at_all_begin
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_at
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_ordered_begin
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_ordered
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
DARSHAN_FORWARD_DECL
(
PMPI_File_read_shared
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_set_view
,
int
,
(
MPI_File
fh
,
MPI_Offset
disp
,
MPI_Datatype
etype
,
MPI_Datatype
filetype
,
const
char
*
datarep
,
MPI_Info
info
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_set_view
,
int
,
(
MPI_File
fh
,
MPI_Offset
disp
,
MPI_Datatype
etype
,
MPI_Datatype
filetype
,
char
*
datarep
,
MPI_Info
info
));
#endif
DARSHAN_FORWARD_DECL
(
PMPI_File_sync
,
int
,
(
MPI_File
fh
));
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_all_begin
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_all_begin
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_all
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_all
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_at_all_begin
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_at_all_begin
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_at_all
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_at_all
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_at
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_at
,
int
,
(
MPI_File
fh
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_ordered_begin
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_ordered_begin
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_ordered
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_ordered
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_File_write_shared
,
int
,
(
MPI_File
fh
,
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_File_write_shared
,
int
,
(
MPI_File
fh
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
));
#endif
DARSHAN_FORWARD_DECL
(
PMPI_Finalize
,
int
,
());
DARSHAN_FORWARD_DECL
(
PMPI_Init
,
int
,
(
int
*
argc
,
char
***
argv
));
DARSHAN_FORWARD_DECL
(
PMPI_Init_thread
,
int
,
(
int
*
argc
,
char
***
argv
,
int
required
,
int
*
provided
));
DARSHAN_FORWARD_DECL
(
PMPI_Wtime
,
double
,
());
DARSHAN_FORWARD_DECL
(
PMPI_Allreduce
,
int
,
(
void
*
sendbuf
,
void
*
recvbuf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Op
op
,
MPI_Comm
comm
));
DARSHAN_FORWARD_DECL
(
PMPI_Bcast
,
int
,
(
void
*
buffer
,
int
count
,
MPI_Datatype
datatype
,
int
root
,
MPI_Comm
comm
));
DARSHAN_FORWARD_DECL
(
PMPI_Comm_rank
,
int
,
(
MPI_Comm
comm
,
int
*
rank
));
DARSHAN_FORWARD_DECL
(
PMPI_Comm_size
,
int
,
(
MPI_Comm
comm
,
int
*
size
));
DARSHAN_FORWARD_DECL
(
PMPI_Scan
,
int
,
(
void
*
sendbuf
,
void
*
recvbuf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Op
op
,
MPI_Comm
comm
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_commit
,
int
,
(
MPI_Datatype
*
datatype
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_contiguous
,
int
,
(
int
count
,
MPI_Datatype
oldtype
,
MPI_Datatype
*
newtype
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_extent
,
int
,
(
MPI_Datatype
datatype
,
MPI_Aint
*
extent
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_free
,
int
,
(
MPI_Datatype
*
datatype
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_hindexed
,
int
,
(
int
count
,
int
*
array_of_blocklengths
,
MPI_Aint
*
array_of_displacements
,
MPI_Datatype
oldtype
,
MPI_Datatype
*
newtype
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_get_envelope
,
int
,
(
MPI_Datatype
datatype
,
int
*
num_integers
,
int
*
num_addresses
,
int
*
num_datatypes
,
int
*
combiner
));
DARSHAN_FORWARD_DECL
(
PMPI_Type_size
,
int
,
(
MPI_Datatype
datatype
,
int
*
size
));
DARSHAN_FORWARD_DECL
(
PMPI_Op_create
,
int
,
(
MPI_User_function
*
function
,
int
commute
,
MPI_Op
*
op
));
DARSHAN_FORWARD_DECL
(
PMPI_Op_free
,
int
,
(
MPI_Op
*
op
));
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_Reduce
,
int
,
(
const
void
*
sendbuf
,
void
*
recvbuf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Op
op
,
int
root
,
MPI_Comm
comm
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_Reduce
,
int
,
(
void
*
sendbuf
,
void
*
recvbuf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Op
op
,
int
root
,
MPI_Comm
comm
));
#endif
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_Send
,
int
,
(
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
dest
,
int
tag
,
MPI_Comm
comm
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_Send
,
int
,
(
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
dest
,
int
tag
,
MPI_Comm
comm
));
#endif
DARSHAN_FORWARD_DECL
(
PMPI_Recv
,
int
,
(
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
source
,
int
tag
,
MPI_Comm
comm
,
MPI_Status
*
status
));
#ifdef HAVE_MPIIO_CONST
DARSHAN_FORWARD_DECL
(
PMPI_Gather
,
int
,
(
const
void
*
sendbuf
,
int
sendcount
,
MPI_Datatype
sendtype
,
void
*
recvbuf
,
int
recvcount
,
MPI_Datatype
recvtype
,
int
root
,
MPI_Comm
comm
));
#else
DARSHAN_FORWARD_DECL
(
PMPI_Gather
,
int
,
(
void
*
sendbuf
,
int
sendcount
,
MPI_Datatype
sendtype
,
void
*
recvbuf
,
int
recvcount
,
MPI_Datatype
recvtype
,
int
root
,
MPI_Comm
comm
));
#endif
DARSHAN_FORWARD_DECL
(
PMPI_Barrier
,
int
,
(
MPI_Comm
comm
));
void
resolve_mpi_symbols
(
void
)
{
/*
* Overloaded functions
*/
MAP_OR_FAIL
(
PMPI_File_close
);
MAP_OR_FAIL
(
PMPI_File_iread_at
);
MAP_OR_FAIL
(
PMPI_File_iread
);
MAP_OR_FAIL
(
PMPI_File_iread_shared
);
MAP_OR_FAIL
(
PMPI_File_iwrite_at
);
MAP_OR_FAIL
(
PMPI_File_iwrite
);
MAP_OR_FAIL
(
PMPI_File_iwrite_shared
);
MAP_OR_FAIL
(
PMPI_File_open
);
MAP_OR_FAIL
(
PMPI_File_read_all_begin
);
MAP_OR_FAIL
(
PMPI_File_read_all
);
MAP_OR_FAIL
(
PMPI_File_read_at_all_begin
);
MAP_OR_FAIL
(
PMPI_File_read_at_all
);
MAP_OR_FAIL
(
PMPI_File_read_at
);
MAP_OR_FAIL
(
PMPI_File_read
);
MAP_OR_FAIL
(
PMPI_File_read_ordered_begin
);
MAP_OR_FAIL
(
PMPI_File_read_ordered
);
MAP_OR_FAIL
(
PMPI_File_read_shared
);
MAP_OR_FAIL
(
PMPI_File_set_view
);
MAP_OR_FAIL
(
PMPI_File_sync
);
MAP_OR_FAIL
(
PMPI_File_write_all_begin
);
MAP_OR_FAIL
(
PMPI_File_write_all
);
MAP_OR_FAIL
(
PMPI_File_write_at_all_begin
);
MAP_OR_FAIL
(
PMPI_File_write_at_all
);
MAP_OR_FAIL
(
PMPI_File_write_at
);
MAP_OR_FAIL
(
PMPI_File_write
);
MAP_OR_FAIL
(
PMPI_File_write_ordered_begin
);
MAP_OR_FAIL
(
PMPI_File_write_ordered
);
MAP_OR_FAIL
(
PMPI_File_write_shared
);
MAP_OR_FAIL
(
PMPI_Finalize
);
MAP_OR_FAIL
(
PMPI_Init
);
MAP_OR_FAIL
(
PMPI_Init_thread
);
/*
* These function are not intercepted but are used
* by darshan itself.
*/
MAP_OR_FAIL
(
PMPI_Wtime
);
MAP_OR_FAIL
(
PMPI_Allreduce
);
MAP_OR_FAIL
(
PMPI_Bcast
);
MAP_OR_FAIL
(
PMPI_Comm_rank
);
MAP_OR_FAIL
(
PMPI_Comm_size
);
MAP_OR_FAIL
(
PMPI_Scan
);
MAP_OR_FAIL
(
PMPI_Type_commit
);
MAP_OR_FAIL
(
PMPI_Type_contiguous
);
MAP_OR_FAIL
(
PMPI_Type_extent
);
MAP_OR_FAIL
(
PMPI_Type_free
);
MAP_OR_FAIL
(
PMPI_Type_hindexed
);
MAP_OR_FAIL
(
PMPI_Type_get_envelope
);
MAP_OR_FAIL
(
PMPI_Type_size
);
MAP_OR_FAIL
(
PMPI_Op_create
);
MAP_OR_FAIL
(
PMPI_Op_free
);
MAP_OR_FAIL
(
PMPI_Reduce
);
MAP_OR_FAIL
(
PMPI_Send
);
MAP_OR_FAIL
(
PMPI_Recv
);
MAP_OR_FAIL
(
PMPI_Gather
);
MAP_OR_FAIL
(
PMPI_Barrier
);
return
;
}
#endif
int
MPI_Init
(
int
*
argc
,
char
***
argv
)
int
DARSHAN_DECL
(
MPI_Init
)(
int
*
argc
,
char
***
argv
)
{
int
ret
;
#ifdef DARSHAN_PRELOAD
resolve_mpi_symbols
();
#endif
MAP_OR_FAIL
(
PMPI_Init
);
ret
=
DARSHAN_MPI_CALL
(
PMPI_Init
)
(
argc
,
argv
);
ret
=
__real_
PMPI_Init
(
argc
,
argv
);
if
(
ret
!=
MPI_SUCCESS
)
{
return
(
ret
);
...
...
@@ -231,16 +45,15 @@ int MPI_Init(int *argc, char ***argv)
return
(
ret
);
}
DARSHAN_WRAPPER_MAP
(
PMPI_Init
,
int
,
(
int
*
argc
,
char
***
argv
),
MPI_Init
(
argc
,
argv
))
int
MPI_Init_thread
(
int
*
argc
,
char
***
argv
,
int
required
,
int
*
provided
)
int
DARSHAN_DECL
(
MPI_Init_thread
)
(
int
*
argc
,
char
***
argv
,
int
required
,
int
*
provided
)
{
int
ret
;
#ifdef DARSHAN_PRELOAD
resolve_mpi_symbols
();
#endif
MAP_OR_FAIL
(
PMPI_Init_thread
);
ret
=
DARSHAN_MPI_CALL
(
PMPI_Init_thread
)
(
argc
,
argv
,
required
,
provided
);
ret
=
__real_
PMPI_Init_thread
(
argc
,
argv
,
required
,
provided
);
if
(
ret
!=
MPI_SUCCESS
)
{
return
(
ret
);
...
...
@@ -258,16 +71,20 @@ int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
return
(
ret
);
}
DARSHAN_WRAPPER_MAP
(
PMPI_Init_thread
,
int
,
(
int
*
argc
,
char
***
argv
,
int
required
,
int
*
provided
),
MPI_Init_thread
(
argc
,
argv
,
required
,
provided
))
int
MPI_Finalize
(
void
)
int
DARSHAN_DECL
(
MPI_Finalize
)
(
void
)
{
int
ret
;
MAP_OR_FAIL
(
PMPI_Finalize
);
darshan_core_shutdown
();
ret
=
DARSHAN_MPI_CALL
(
PMPI_Finalize
)
();
ret
=
__real_
PMPI_Finalize
();
return
(
ret
);
}
DARSHAN_WRAPPER_MAP
(
PMPI_Finalize
,
int
,
(
void
),
MPI_Finalize
())
/*
* Local variables:
...
...
darshan-runtime/lib/darshan-core.c
View file @
48dbd2e8
...
...
@@ -163,14 +163,14 @@ void darshan_core_initialize(int argc, char **argv)
int
tmpval
;
double
tmpfloat
;
DARSHAN_MPI_CALL
(
PMPI_Comm_size
)
(
MPI_COMM_WORLD
,
&
nprocs
);
DARSHAN_MPI_CALL
(
PMPI_Comm_rank
)
(
MPI_COMM_WORLD
,
&
my_rank
);
PMPI_Comm_size
(
MPI_COMM_WORLD
,
&
nprocs
);
PMPI_Comm_rank
(
MPI_COMM_WORLD
,
&
my_rank
);
if
(
getenv
(
"DARSHAN_INTERNAL_TIMING"
))
internal_timing_flag
=
1
;
if
(
internal_timing_flag
)
init_start
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
init_start
=
PMPI_Wtime
();
/* setup darshan runtime if darshan is enabled and hasn't been initialized already */
if
(
!
getenv
(
"DARSHAN_DISABLE"
)
&&
!
darshan_core
)
...
...
@@ -236,7 +236,7 @@ void darshan_core_initialize(int argc, char **argv)
if
(
init_core
)
{
memset
(
init_core
,
0
,
sizeof
(
*
init_core
));
init_core
->
wtime_offset
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
init_core
->
wtime_offset
=
PMPI_Wtime
();
/* TODO: do we alloc new memory as we go or just do everything up front? */
...
...
@@ -325,8 +325,8 @@ void darshan_core_initialize(int argc, char **argv)
if
(
internal_timing_flag
)
{
init_time
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
()
-
init_start
;
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
init_time
,
&
init_max
,
1
,
init_time
=
PMPI_Wtime
()
-
init_start
;
PMPI_Reduce
(
&
init_time
,
&
init_max
,
1
,
MPI_DOUBLE
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
if
(
my_rank
==
0
)
{
...
...
@@ -371,8 +371,8 @@ void darshan_core_shutdown()
internal_timing_flag
=
1
;
/* synchronize before getting start time */
DARSHAN_MPI_CALL
(
PMPI_Barrier
)
(
MPI_COMM_WORLD
);
start_log_time
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
PMPI_Barrier
(
MPI_COMM_WORLD
);
start_log_time
=
PMPI_Wtime
();
/* disable darhan-core while we shutdown */
DARSHAN_CORE_LOCK
();
...
...
@@ -398,9 +398,9 @@ void darshan_core_shutdown()
final_core
->
log_job_p
->
end_time
=
time
(
NULL
);
/* reduce to report first start and last end time across all ranks at rank 0 */
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
final_core
->
log_job_p
->
start_time
,
&
first_start_time
,
PMPI_Reduce
(
&
final_core
->
log_job_p
->
start_time
,
&
first_start_time
,
1
,
MPI_INT64_T
,
MPI_MIN
,
0
,
MPI_COMM_WORLD
);
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
final_core
->
log_job_p
->
end_time
,
&
last_end_time
,
PMPI_Reduce
(
&
final_core
->
log_job_p
->
end_time
,
&
last_end_time
,
1
,
MPI_INT64_T
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
if
(
my_rank
==
0
)
{
...
...
@@ -433,7 +433,7 @@ void darshan_core_shutdown()
}
/* broadcast log file name */
DARSHAN_MPI_CALL
(
PMPI_Bcast
)
(
logfile_name
,
PATH_MAX
,
MPI_CHAR
,
0
,
PMPI_Bcast
(
logfile_name
,
PATH_MAX
,
MPI_CHAR
,
0
,
MPI_COMM_WORLD
);
if
(
strlen
(
logfile_name
)
==
0
)
...
...
@@ -456,21 +456,21 @@ void darshan_core_shutdown()
}
/* reduce the number of times a module was opened globally and bcast to everyone */
DARSHAN_MPI_CALL
(
PMPI_Allreduce
)
(
local_mod_use
,
global_mod_use_count
,
PMPI_Allreduce
(
local_mod_use
,
global_mod_use_count
,
DARSHAN_MAX_MODS
,
MPI_INT
,
MPI_SUM
,
MPI_COMM_WORLD
);
/* get a list of records which are shared across all processes */
darshan_get_shared_records
(
final_core
,
&
shared_recs
,
&
shared_rec_cnt
);
if
(
internal_timing_flag
)
open1
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
open1
=
PMPI_Wtime
();
/* collectively open the darshan log file */
ret
=
darshan_log_open_all
(
logfile_name
,
&
log_fh
);
if
(
internal_timing_flag
)
open2
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
open2
=
PMPI_Wtime
();
/* error out if unable to open log file */
DARSHAN_MPI_CALL
(
PMPI_Allreduce
)
(
&
ret
,
&
all_ret
,
1
,
MPI_INT
,
PMPI_Allreduce
(
&
ret
,
&
all_ret
,
1
,
MPI_INT
,
MPI_LOR
,
MPI_COMM_WORLD
);
if
(
all_ret
!=
0
)
{
...
...
@@ -485,7 +485,7 @@ void darshan_core_shutdown()
}
if
(
internal_timing_flag
)
job1
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
job1
=
PMPI_Wtime
();
/* rank 0 is responsible for writing the compressed darshan job information */
if
(
my_rank
==
0
)
{
...
...
@@ -505,7 +505,7 @@ void darshan_core_shutdown()
{
/* write the job information, preallocing space for the log header */
gz_fp
+=
sizeof
(
struct
darshan_header
);
all_ret
=
DARSHAN_MPI_CALL
(
PMPI_File_write_at
)
(
log_fh
,
gz_fp
,
all_ret
=
PMPI_File_write_at
(
log_fh
,
gz_fp
,
final_core
->
comp_buf
,
comp_buf_sz
,
MPI_BYTE
,
&
status
);
if
(
all_ret
!=
MPI_SUCCESS
)
{
...
...
@@ -520,7 +520,7 @@ void darshan_core_shutdown()
}
/* error out if unable to write job information */
DARSHAN_MPI_CALL
(
PMPI_Bcast
)
(
&
all_ret
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
PMPI_Bcast
(
&
all_ret
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
if
(
all_ret
!=
0
)
{
free
(
logfile_name
);
...
...
@@ -528,17 +528,17 @@ void darshan_core_shutdown()
return
;
}
if
(
internal_timing_flag
)
job2
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
job2
=
PMPI_Wtime
();
if
(
internal_timing_flag
)
rec1
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
rec1
=
PMPI_Wtime
();
/* write the record name->id hash to the log file */
final_core
->
log_hdr_p
->
name_map
.
off
=
gz_fp
;
ret
=
darshan_log_write_name_record_hash
(
log_fh
,
final_core
,
&
gz_fp
);
final_core
->
log_hdr_p
->
name_map
.
len
=
gz_fp
-
final_core
->
log_hdr_p
->
name_map
.
off
;
/* error out if unable to write the name record hash */
DARSHAN_MPI_CALL
(
PMPI_Allreduce
)
(
&
ret
,
&
all_ret
,
1
,
MPI_INT
,
PMPI_Allreduce
(
&
ret
,
&
all_ret
,
1
,
MPI_INT
,
MPI_LOR
,
MPI_COMM_WORLD
);
if
(
all_ret
!=
0
)
{
...
...
@@ -554,7 +554,7 @@ void darshan_core_shutdown()
return
;
}
if
(
internal_timing_flag
)
rec2
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
rec2
=
PMPI_Wtime
();
mod_shared_recs
=
malloc
(
shared_rec_cnt
*
sizeof
(
darshan_record_id
));
assert
(
mod_shared_recs
);
...
...
@@ -586,7 +586,7 @@ void darshan_core_shutdown()
}
if
(
internal_timing_flag
)
mod1
[
i
]
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
mod1
[
i
]
=
PMPI_Wtime
();
/* set the shared record list for this module */
for
(
j
=
0
;
j
<
shared_rec_cnt
;
j
++
)
...
...
@@ -626,7 +626,7 @@ void darshan_core_shutdown()
free
(
mod_buf
);
/* error out if the log append failed */
DARSHAN_MPI_CALL
(
PMPI_Allreduce
)
(
&
ret
,
&
all_ret
,
1
,
MPI_INT
,
PMPI_Allreduce
(
&
ret
,
&
all_ret
,
1
,
MPI_INT
,
MPI_LOR
,
MPI_COMM_WORLD
);
if
(
all_ret
!=
0
)
{
...
...
@@ -643,11 +643,11 @@ void darshan_core_shutdown()
}
if
(
internal_timing_flag
)
mod2
[
i
]
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
mod2
[
i
]
=
PMPI_Wtime
();
}
if
(
internal_timing_flag
)
header1
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
header1
=
PMPI_Wtime
();
/* write out log header, after running 2 reductions on header variables:
* 1) reduce 'partial_flag' variable to determine which modules ran out
* of memory for storing data
...
...
@@ -659,14 +659,14 @@ void darshan_core_shutdown()
/* rank 0 is responsible for writing the log header */
final_core
->
log_hdr_p
->
comp_type
=
DARSHAN_ZLIB_COMP
;
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
PMPI_Reduce
(
MPI_IN_PLACE
,
&
(
final_core
->
log_hdr_p
->
partial_flag
),
1
,
MPI_UINT32_T
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
PMPI_Reduce
(
MPI_IN_PLACE
,
&
(
final_core
->
log_hdr_p
->
mod_ver
),
DARSHAN_MAX_MODS
,
MPI_UINT32_T
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
all_ret
=
DARSHAN_MPI_CALL
(
PMPI_File_write_at
)
(
log_fh
,
0
,
final_core
->
log_hdr_p
,
all_ret
=
PMPI_File_write_at
(
log_fh
,
0
,
final_core
->
log_hdr_p
,
sizeof
(
struct
darshan_header
),
MPI_BYTE
,
&
status
);
if
(
all_ret
!=
MPI_SUCCESS
)
{
...
...
@@ -677,16 +677,16 @@ void darshan_core_shutdown()
}
else
{
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
PMPI_Reduce
(
&
(
final_core
->
log_hdr_p
->
partial_flag
),
&
(
final_core
->
log_hdr_p
->
partial_flag
),
1
,
MPI_UINT32_T
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
PMPI_Reduce
(
&
(
final_core
->
log_hdr_p
->
mod_ver
),
&
(
final_core
->
log_hdr_p
->
mod_ver
),
DARSHAN_MAX_MODS
,
MPI_UINT32_T
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
}
/* error out if unable to write log header */
DARSHAN_MPI_CALL
(
PMPI_Bcast
)
(
&
all_ret
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
PMPI_Bcast
(
&
all_ret
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
if
(
all_ret
!=
0
)
{
free
(
logfile_name
);
...
...
@@ -694,9 +694,9 @@ void darshan_core_shutdown()
return
;
}
if
(
internal_timing_flag
)
header2
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
header2
=
PMPI_Wtime
();
DARSHAN_MPI_CALL
(
PMPI_File_close
)
(
&
log_fh
);
PMPI_File_close
(
&
log_fh
);
/* if we got this far, there are no errors, so rename from *.darshan_partial
* to *-<logwritetime>.darshan, which indicates that this log file is
...
...
@@ -723,7 +723,7 @@ void darshan_core_shutdown()
if
(
new_logfile_name
)
{
new_logfile_name
[
0
]
=
'\0'
;
end_log_time
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
end_log_time
=
PMPI_Wtime
();
strcat
(
new_logfile_name
,
logfile_name
);
tmp_index
=
strstr
(
new_logfile_name
,
".darshan_partial"
);
sprintf
(
tmp_index
,
"_%d.darshan"
,
(
int
)(
end_log_time
-
start_log_time
+
1
));
...
...
@@ -749,7 +749,7 @@ void darshan_core_shutdown()
double
mod_tm
[
DARSHAN_MAX_MODS
],
mod_slowest
[
DARSHAN_MAX_MODS
];
double
all_tm
,
all_slowest
;
tm_end
=
DARSHAN_MPI_CALL
(
PMPI_Wtime
)
();
tm_end
=
PMPI_Wtime
();
open_tm
=
open2
-
open1
;
header_tm
=
header2
-
header1
;
...
...
@@ -761,17 +761,17 @@ void darshan_core_shutdown()
mod_tm
[
i
]
=
mod2
[
i
]
-
mod1
[
i
];
}
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
open_tm
,
&
open_slowest
,
1
,
PMPI_Reduce
(
&
open_tm
,
&
open_slowest
,
1
,
MPI_DOUBLE
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
header_tm
,
&
header_slowest
,
1
,
PMPI_Reduce
(
&
header_tm
,
&
header_slowest
,
1
,
MPI_DOUBLE
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
job_tm
,
&
job_slowest
,
1
,
PMPI_Reduce
(
&
job_tm
,
&
job_slowest
,
1
,
MPI_DOUBLE
,
MPI_MAX
,
0
,
MPI_COMM_WORLD
);
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
&
rec_tm
,
&
rec_slowest
,
1
,
PMPI_Reduce
(
&
rec_tm
,
&
rec_slowest
,
1
,