Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
darshan
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
73
Issues
73
List
Boards
Labels
Milestones
Merge Requests
5
Merge Requests
5
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
darshan
darshan
Commits
48dbd2e8
Commit
48dbd2e8
authored
Oct 13, 2017
by
Shane Snyder
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'snyder-wrap-mod'
parents
c7b5ac90
ccddff36
Changes
12
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
596 additions
and
388 deletions
+596
-388
darshan-runtime/Makefile.in
darshan-runtime/Makefile.in
+1
-0
darshan-runtime/darshan.h
darshan-runtime/darshan.h
+25
-5
darshan-runtime/lib/darshan-bgq.c
darshan-runtime/lib/darshan-bgq.c
+3
-3
darshan-runtime/lib/darshan-core-init-finalize.c
darshan-runtime/lib/darshan-core-init-finalize.c
+13
-196
darshan-runtime/lib/darshan-core.c
darshan-runtime/lib/darshan-core.c
+59
-59
darshan-runtime/lib/darshan-hdf5.c
darshan-runtime/lib/darshan-hdf5.c
+6
-6
darshan-runtime/lib/darshan-mpiio.c
darshan-runtime/lib/darshan-mpiio.c
+365
-86
darshan-runtime/lib/darshan-pnetcdf.c
darshan-runtime/lib/darshan-pnetcdf.c
+7
-7
darshan-runtime/lib/darshan-posix.c
darshan-runtime/lib/darshan-posix.c
+13
-13
darshan-runtime/lib/darshan-stdio.c
darshan-runtime/lib/darshan-stdio.c
+13
-13
darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
+7
-0
darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts
darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts
+84
-0
No files found.
darshan-runtime/Makefile.in
View file @
48dbd2e8
...
...
@@ -199,6 +199,7 @@ ifdef BUILD_HDF5_MODULE
endif
install
-m
644
$(srcdir)
/share/ld-opts/darshan-pnetcdf-ld-opts
$(datarootdir)
/ld-opts/darshan-pnetcdf-ld-opts
install
-m
644
$(srcdir)
/share/ld-opts/darshan-stdio-ld-opts
$(datarootdir)
/ld-opts/darshan-stdio-ld-opts
install
-m
644
$(srcdir)
/share/ld-opts/darshan-mpiio-ld-opts
$(datarootdir)
/ld-opts/darshan-mpiio-ld-opts
ifdef
ENABLE_MMAP_LOGS
install
-m
755 share/darshan-mmap-epilog.sh
$(datarootdir)
/darshan-mmap-epilog.sh
endif
...
...
darshan-runtime/darshan.h
View file @
48dbd2e8
...
...
@@ -28,15 +28,27 @@
#define DARSHAN_DECL(__func) __func
#define DARSHAN_MPI_CALL(__func) __real_ ## __func
/* creates P* variant of MPI symbols for LD_PRELOAD so that we can handle
* language bindings that map to MPI or PMPI symbols under the covers.
*/
#define DARSHAN_WRAPPER_MAP(__func,__ret,__args,__fcall) \
__ret __func __args { \
__ret i; \
i = __fcall; \
return i; \
}
/* Map the desired function call to a pointer called __real_NAME at run
* time. Note that we fall back to looking for the same symbol with a P
* prefix to handle MPI bindings that call directly to the PMPI layer.
*/
#define MAP_OR_FAIL(__func) \
if (!(__real_ ## __func)) \
{ \
__real_ ## __func = dlsym(RTLD_NEXT, #__func); \
if(!(__real_ ## __func)) { \
fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
exit(1); \
fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
exit(1); \
} \
}
...
...
@@ -47,7 +59,15 @@
#define DARSHAN_DECL(__name) __wrap_ ## __name
#define DARSHAN_MPI_CALL(__func) __func
/* creates P* variant of MPI symbols for static linking so that we can handle
* language bindings that map to MPI or PMPI symbols under the covers.
*/
#define DARSHAN_WRAPPER_MAP(__func,__ret,__args,__fcall) \
__ret __wrap_ ## __func __args { \
__ret i; \
i = __wrap_ ## __fcall; \
return i; \
}
#define MAP_OR_FAIL(__func)
...
...
darshan-runtime/lib/darshan-bgq.c
View file @
48dbd2e8
...
...
@@ -196,13 +196,13 @@ static void bgq_shutdown(
{
bgq_runtime
->
record
->
base_rec
.
rank
=
-
1
;
DARSHAN_MPI_CALL
(
PMPI_Comm_size
)
(
mod_comm
,
&
nprocs
);
PMPI_Comm_size
(
mod_comm
,
&
nprocs
);
ion_ids
=
malloc
(
sizeof
(
*
ion_ids
)
*
nprocs
);
result
=
(
ion_ids
!=
NULL
);
if
(
!
result
)
bgq_runtime
->
record
->
counters
[
BGQ_INODES
]
=
-
1
;
}
DARSHAN_MPI_CALL
(
PMPI_Bcast
)
(
&
result
,
1
,
MPI_INT
,
0
,
mod_comm
);
PMPI_Bcast
(
&
result
,
1
,
MPI_INT
,
0
,
mod_comm
);
/* caclulate the number of I/O nodes */
if
(
result
)
...
...
@@ -210,7 +210,7 @@ static void bgq_shutdown(
int
i
,
found
;
uint64_t
val
;
DARSHAN_MPI_CALL
(
PMPI_Gather
)
(
&
bgq_runtime
->
record
->
counters
[
BGQ_INODES
],
PMPI_Gather
(
&
bgq_runtime
->
record
->
counters
[
BGQ_INODES
],
1
,
MPI_LONG_LONG_INT
,
ion_ids
,
...
...
darshan-runtime/lib/darshan-core-init-finalize.c
View file @
48dbd2e8
This diff is collapsed.
Click to expand it.
darshan-runtime/lib/darshan-core.c
View file @
48dbd2e8
This diff is collapsed.
Click to expand it.
darshan-runtime/lib/darshan-hdf5.c
View file @
48dbd2e8
...
...
@@ -449,15 +449,15 @@ static void hdf5_shutdown(
/* construct a datatype for a HDF5 file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL
(
PMPI_Type_contiguous
)
(
sizeof
(
struct
darshan_hdf5_file
),
PMPI_Type_contiguous
(
sizeof
(
struct
darshan_hdf5_file
),
MPI_BYTE
,
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Type_commit
)
(
&
red_type
);
PMPI_Type_commit
(
&
red_type
);
/* register a HDF5 file record reduction operator */
DARSHAN_MPI_CALL
(
PMPI_Op_create
)
(
hdf5_record_reduction_op
,
1
,
&
red_op
);
PMPI_Op_create
(
hdf5_record_reduction_op
,
1
,
&
red_op
);
/* reduce shared HDF5 file records */
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
red_send_buf
,
red_recv_buf
,
PMPI_Reduce
(
red_send_buf
,
red_recv_buf
,
shared_rec_count
,
red_type
,
red_op
,
0
,
mod_comm
);
/* clean up reduction state */
...
...
@@ -473,8 +473,8 @@ static void hdf5_shutdown(
hdf5_rec_count
-=
shared_rec_count
;
}
DARSHAN_MPI_CALL
(
PMPI_Type_free
)
(
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Op_free
)
(
&
red_op
);
PMPI_Type_free
(
&
red_type
);
PMPI_Op_free
(
&
red_op
);
}
/* update output buffer size to account for shared file reduction */
...
...
darshan-runtime/lib/darshan-mpiio.c
View file @
48dbd2e8
This diff is collapsed.
Click to expand it.
darshan-runtime/lib/darshan-pnetcdf.c
View file @
48dbd2e8
...
...
@@ -94,7 +94,7 @@ static int my_rank = -1;
if(newpath != __path) free(newpath); \
break; \
} \
DARSHAN_MPI_CALL(PMPI_Comm_size)
(__comm, &comm_size); \
PMPI_Comm_size
(__comm, &comm_size); \
if(rec_ref->file_rec->fcounters[PNETCDF_F_OPEN_TIMESTAMP] == 0) \
rec_ref->file_rec->fcounters[PNETCDF_F_OPEN_TIMESTAMP] = __tm1; \
if(comm_size == 1) rec_ref->file_rec->counters[PNETCDF_INDEP_OPENS] += 1; \
...
...
@@ -406,15 +406,15 @@ static void pnetcdf_shutdown(
/* construct a datatype for a PNETCDF file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL
(
PMPI_Type_contiguous
)
(
sizeof
(
struct
darshan_pnetcdf_file
),
PMPI_Type_contiguous
(
sizeof
(
struct
darshan_pnetcdf_file
),
MPI_BYTE
,
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Type_commit
)
(
&
red_type
);
PMPI_Type_commit
(
&
red_type
);
/* register a PNETCDF file record reduction operator */
DARSHAN_MPI_CALL
(
PMPI_Op_create
)
(
pnetcdf_record_reduction_op
,
1
,
&
red_op
);
PMPI_Op_create
(
pnetcdf_record_reduction_op
,
1
,
&
red_op
);
/* reduce shared PNETCDF file records */
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
red_send_buf
,
red_recv_buf
,
PMPI_Reduce
(
red_send_buf
,
red_recv_buf
,
shared_rec_count
,
red_type
,
red_op
,
0
,
mod_comm
);
/* clean up reduction state */
...
...
@@ -430,8 +430,8 @@ static void pnetcdf_shutdown(
pnetcdf_rec_count
-=
shared_rec_count
;
}
DARSHAN_MPI_CALL
(
PMPI_Type_free
)
(
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Op_free
)
(
&
red_op
);
PMPI_Type_free
(
&
red_type
);
PMPI_Op_free
(
&
red_op
);
}
/* update output buffer size to account for shared file reduction */
...
...
darshan-runtime/lib/darshan-posix.c
View file @
48dbd2e8
...
...
@@ -1656,11 +1656,11 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
struct
darshan_variance_dt
*
var_send_buf
=
NULL
;
struct
darshan_variance_dt
*
var_recv_buf
=
NULL
;
DARSHAN_MPI_CALL
(
PMPI_Type_contiguous
)
(
sizeof
(
struct
darshan_variance_dt
),
PMPI_Type_contiguous
(
sizeof
(
struct
darshan_variance_dt
),
MPI_BYTE
,
&
var_dt
);
DARSHAN_MPI_CALL
(
PMPI_Type_commit
)
(
&
var_dt
);
PMPI_Type_commit
(
&
var_dt
);
DARSHAN_MPI_CALL
(
PMPI_Op_create
)
(
darshan_variance_reduce
,
1
,
&
var_op
);
PMPI_Op_create
(
darshan_variance_reduce
,
1
,
&
var_op
);
var_send_buf
=
malloc
(
shared_rec_count
*
sizeof
(
struct
darshan_variance_dt
));
if
(
!
var_send_buf
)
...
...
@@ -1685,7 +1685,7 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
inrec_array
[
i
].
fcounters
[
POSIX_F_META_TIME
];
}
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
PMPI_Reduce
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
var_dt
,
var_op
,
0
,
mod_comm
);
if
(
my_rank
==
0
)
...
...
@@ -1708,7 +1708,7 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
inrec_array
[
i
].
counters
[
POSIX_BYTES_WRITTEN
];
}
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
PMPI_Reduce
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
var_dt
,
var_op
,
0
,
mod_comm
);
if
(
my_rank
==
0
)
...
...
@@ -1720,8 +1720,8 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
}
}
DARSHAN_MPI_CALL
(
PMPI_Type_free
)
(
&
var_dt
);
DARSHAN_MPI_CALL
(
PMPI_Op_free
)
(
&
var_op
);
PMPI_Type_free
(
&
var_dt
);
PMPI_Op_free
(
&
var_op
);
free
(
var_send_buf
);
free
(
var_recv_buf
);
...
...
@@ -1906,15 +1906,15 @@ static void posix_shutdown(
/* construct a datatype for a POSIX file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL
(
PMPI_Type_contiguous
)
(
sizeof
(
struct
darshan_posix_file
),
PMPI_Type_contiguous
(
sizeof
(
struct
darshan_posix_file
),
MPI_BYTE
,
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Type_commit
)
(
&
red_type
);
PMPI_Type_commit
(
&
red_type
);
/* register a POSIX file record reduction operator */
DARSHAN_MPI_CALL
(
PMPI_Op_create
)
(
posix_record_reduction_op
,
1
,
&
red_op
);
PMPI_Op_create
(
posix_record_reduction_op
,
1
,
&
red_op
);
/* reduce shared POSIX file records */
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
red_send_buf
,
red_recv_buf
,
PMPI_Reduce
(
red_send_buf
,
red_recv_buf
,
shared_rec_count
,
red_type
,
red_op
,
0
,
mod_comm
);
/* get the time and byte variances for shared files */
...
...
@@ -1934,8 +1934,8 @@ static void posix_shutdown(
posix_rec_count
-=
shared_rec_count
;
}
DARSHAN_MPI_CALL
(
PMPI_Type_free
)
(
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Op_free
)
(
&
red_op
);
PMPI_Type_free
(
&
red_type
);
PMPI_Op_free
(
&
red_op
);
}
/* update output buffer size to account for shared file reduction */
...
...
darshan-runtime/lib/darshan-stdio.c
View file @
48dbd2e8
...
...
@@ -1184,15 +1184,15 @@ static void stdio_shutdown(
/* construct a datatype for a STDIO file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL
(
PMPI_Type_contiguous
)
(
sizeof
(
struct
darshan_stdio_file
),
PMPI_Type_contiguous
(
sizeof
(
struct
darshan_stdio_file
),
MPI_BYTE
,
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Type_commit
)
(
&
red_type
);
PMPI_Type_commit
(
&
red_type
);
/* register a STDIO file record reduction operator */
DARSHAN_MPI_CALL
(
PMPI_Op_create
)
(
stdio_record_reduction_op
,
1
,
&
red_op
);
PMPI_Op_create
(
stdio_record_reduction_op
,
1
,
&
red_op
);
/* reduce shared STDIO file records */
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
red_send_buf
,
red_recv_buf
,
PMPI_Reduce
(
red_send_buf
,
red_recv_buf
,
shared_rec_count
,
red_type
,
red_op
,
0
,
mod_comm
);
/* get the time and byte variances for shared files */
...
...
@@ -1212,8 +1212,8 @@ static void stdio_shutdown(
stdio_rec_count
-=
shared_rec_count
;
}
DARSHAN_MPI_CALL
(
PMPI_Type_free
)
(
&
red_type
);
DARSHAN_MPI_CALL
(
PMPI_Op_free
)
(
&
red_op
);
PMPI_Type_free
(
&
red_type
);
PMPI_Op_free
(
&
red_op
);
}
/* filter out any records that have no activity on them; this is
...
...
@@ -1323,11 +1323,11 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
struct
darshan_variance_dt
*
var_send_buf
=
NULL
;
struct
darshan_variance_dt
*
var_recv_buf
=
NULL
;
DARSHAN_MPI_CALL
(
PMPI_Type_contiguous
)
(
sizeof
(
struct
darshan_variance_dt
),
PMPI_Type_contiguous
(
sizeof
(
struct
darshan_variance_dt
),
MPI_BYTE
,
&
var_dt
);
DARSHAN_MPI_CALL
(
PMPI_Type_commit
)
(
&
var_dt
);
PMPI_Type_commit
(
&
var_dt
);
DARSHAN_MPI_CALL
(
PMPI_Op_create
)
(
darshan_variance_reduce
,
1
,
&
var_op
);
PMPI_Op_create
(
darshan_variance_reduce
,
1
,
&
var_op
);
var_send_buf
=
malloc
(
shared_rec_count
*
sizeof
(
struct
darshan_variance_dt
));
if
(
!
var_send_buf
)
...
...
@@ -1352,7 +1352,7 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
inrec_array
[
i
].
fcounters
[
STDIO_F_META_TIME
];
}
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
PMPI_Reduce
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
var_dt
,
var_op
,
0
,
mod_comm
);
if
(
my_rank
==
0
)
...
...
@@ -1375,7 +1375,7 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
inrec_array
[
i
].
counters
[
STDIO_BYTES_WRITTEN
];
}
DARSHAN_MPI_CALL
(
PMPI_Reduce
)
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
PMPI_Reduce
(
var_send_buf
,
var_recv_buf
,
shared_rec_count
,
var_dt
,
var_op
,
0
,
mod_comm
);
if
(
my_rank
==
0
)
...
...
@@ -1387,8 +1387,8 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
}
}
DARSHAN_MPI_CALL
(
PMPI_Type_free
)
(
&
var_dt
);
DARSHAN_MPI_CALL
(
PMPI_Op_free
)
(
&
var_op
);
PMPI_Type_free
(
&
var_dt
);
PMPI_Op_free
(
&
var_op
);
free
(
var_send_buf
);
free
(
var_recv_buf
);
...
...
darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
View file @
48dbd2e8
--undefined=MPI_Init
--undefined=MPI_Wtime
--wrap=MPI_Init
--wrap=MPI_Init_thread
--wrap=MPI_Finalize
--wrap=PMPI_Init
--wrap=PMPI_Init_thread
--wrap=PMPI_Finalize
@@darshan_share_path@/ld-opts/darshan-posix-ld-opts
@@darshan_share_path@/ld-opts/darshan-pnetcdf-ld-opts
@@darshan_share_path@/ld-opts/darshan-stdio-ld-opts
@@darshan_share_path@/ld-opts/darshan-mpiio-ld-opts
@DARSHAN_HDF5_LD_OPTS@
darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts
0 → 100644
View file @
48dbd2e8
--wrap=MPI_File_close
--wrap=MPI_File_iread_at
--wrap=MPI_File_iread
--wrap=MPI_File_iread_shared
--wrap=MPI_File_iwrite_at
--wrap=MPI_File_iwrite_at
--wrap=MPI_File_iwrite
--wrap=MPI_File_iwrite
--wrap=MPI_File_iwrite_shared
--wrap=MPI_File_iwrite_shared
--wrap=MPI_File_open
--wrap=MPI_File_open
--wrap=MPI_File_read_all_begin
--wrap=MPI_File_read_all
--wrap=MPI_File_read_at_all
--wrap=MPI_File_read_at_all_begin
--wrap=MPI_File_read_at
--wrap=MPI_File_read
--wrap=MPI_File_read_ordered_begin
--wrap=MPI_File_read_ordered
--wrap=MPI_File_read_shared
--wrap=MPI_File_set_view
--wrap=MPI_File_set_view
--wrap=MPI_File_sync
--wrap=MPI_File_write_all_begin
--wrap=MPI_File_write_all_begin
--wrap=MPI_File_write_all
--wrap=MPI_File_write_all
--wrap=MPI_File_write_at_all_begin
--wrap=MPI_File_write_at_all_begin
--wrap=MPI_File_write_at_all
--wrap=MPI_File_write_at_all
--wrap=MPI_File_write_at
--wrap=MPI_File_write_at
--wrap=MPI_File_write
--wrap=MPI_File_write
--wrap=MPI_File_write_ordered_begin
--wrap=MPI_File_write_ordered_begin
--wrap=MPI_File_write_ordered
--wrap=MPI_File_write_ordered
--wrap=MPI_File_write_shared
--wrap=MPI_File_write_shared
--wrap=PMPI_File_close
--wrap=PMPI_File_iread_at
--wrap=PMPI_File_iread
--wrap=PMPI_File_iread_shared
--wrap=PMPI_File_iwrite_at
--wrap=PMPI_File_iwrite_at
--wrap=PMPI_File_iwrite
--wrap=PMPI_File_iwrite
--wrap=PMPI_File_iwrite_shared
--wrap=PMPI_File_iwrite_shared
--wrap=PMPI_File_open
--wrap=PMPI_File_open
--wrap=PMPI_File_read_all_begin
--wrap=PMPI_File_read_all
--wrap=PMPI_File_read_at_all
--wrap=PMPI_File_read_at_all_begin
--wrap=PMPI_File_read_at
--wrap=PMPI_File_read
--wrap=PMPI_File_read_ordered_begin
--wrap=PMPI_File_read_ordered
--wrap=PMPI_File_read_shared
--wrap=PMPI_File_set_view
--wrap=PMPI_File_set_view
--wrap=PMPI_File_sync
--wrap=PMPI_File_write_all_begin
--wrap=PMPI_File_write_all_begin
--wrap=PMPI_File_write_all
--wrap=PMPI_File_write_all
--wrap=PMPI_File_write_at_all_begin
--wrap=PMPI_File_write_at_all_begin
--wrap=PMPI_File_write_at_all
--wrap=PMPI_File_write_at_all
--wrap=PMPI_File_write_at
--wrap=PMPI_File_write_at
--wrap=PMPI_File_write
--wrap=PMPI_File_write
--wrap=PMPI_File_write_ordered_begin
--wrap=PMPI_File_write_ordered_begin
--wrap=PMPI_File_write_ordered
--wrap=PMPI_File_write_ordered
--wrap=PMPI_File_write_shared
--wrap=PMPI_File_write_shared
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment