Commit 1dabc539 authored by Philip Carns's avatar Philip Carns

refactor LD_PRELOAD MPI wrappers

- match convention of other modules where possible
- avoid trampoline for non-PMPI calls
- resolve underlying symbols at run time
- use symbol name direclty in wrapper
parent 4727d0a1
......@@ -28,29 +28,30 @@
#define DARSHAN_DECL(__func) __func
#define DARSHAN_MPI_DECL(__func) __wrap_ ## __func
#define DARSHAN_MPI_MAP(__func,__ret,__args,__fcall) \
__ret __func __args { \
__ret i; \
i = __wrap_ ## __fcall; \
return i; \
} \
/* creates P* variant of MPI symbols for LD_PRELOAD so that we can handle
* language bindings that map to MPI or PMPI symbols under the covers.
*/
#define DARSHAN_PMPI_MAP(__func,__ret,__args,__fcall) \
__ret P ## __func __args { \
__ret i; \
i = __wrap_ ## __fcall; \
i = __fcall; \
return i; \
}
#define DARSHAN_MPI_CALL(__func) __real_ ## __func
/* Map the desired function call to a pointer called __real_NAME at run
* time. Note that we fall back to looking for the same symbol with a P
* prefix to handle MPI bindings that call directly to the PMPI layer.
*/
#define MAP_OR_FAIL(__func) \
if (!(__real_ ## __func)) \
{ \
__real_ ## __func = dlsym(RTLD_NEXT, #__func); \
if(!(__real_ ## __func)) { \
fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
exit(1); \
__real_ ## __func = dlsym(RTLD_NEXT, "P" #__func); \
if(!(__real_ ## __func)) { \
fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
exit(1); \
} \
} \
}
......@@ -61,11 +62,7 @@
#define DARSHAN_DECL(__name) __wrap_ ## __name
#define DARSHAN_MPI_DECL(__func) __func
#define DARSHAN_MPI_MAP(__func,__ret,__args,__fcall)
#define DARSHAN_MPI_CALL(__func) __func
#define DARSHAN_PMPI_MAP(__func,__ret,__args,__fcall)
#define MAP_OR_FAIL(__func)
......@@ -200,5 +197,4 @@ int darshan_core_excluded_path(
*/
int darshan_core_disabled_instrumentation(void);
#endif /* __DARSHAN_H */
......@@ -196,13 +196,13 @@ static void bgq_shutdown(
{
bgq_runtime->record->base_rec.rank = -1;
DARSHAN_MPI_CALL(PMPI_Comm_size)(mod_comm, &nprocs);
PMPI_Comm_size(mod_comm, &nprocs);
ion_ids = malloc(sizeof(*ion_ids)*nprocs);
result = (ion_ids != NULL);
if(!result)
bgq_runtime->record->counters[BGQ_INODES] = -1;
}
DARSHAN_MPI_CALL(PMPI_Bcast)(&result, 1, MPI_INT, 0, mod_comm);
PMPI_Bcast(&result, 1, MPI_INT, 0, mod_comm);
/* caclulate the number of I/O nodes */
if (result)
......@@ -210,7 +210,7 @@ static void bgq_shutdown(
int i, found;
uint64_t val;
DARSHAN_MPI_CALL(PMPI_Gather)(&bgq_runtime->record->counters[BGQ_INODES],
PMPI_Gather(&bgq_runtime->record->counters[BGQ_INODES],
1,
MPI_LONG_LONG_INT,
ion_ids,
......
This diff is collapsed.
......@@ -449,15 +449,15 @@ static void hdf5_shutdown(
/* construct a datatype for a HDF5 file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_hdf5_file),
PMPI_Type_contiguous(sizeof(struct darshan_hdf5_file),
MPI_BYTE, &red_type);
DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
PMPI_Type_commit(&red_type);
/* register a HDF5 file record reduction operator */
DARSHAN_MPI_CALL(PMPI_Op_create)(hdf5_record_reduction_op, 1, &red_op);
PMPI_Op_create(hdf5_record_reduction_op, 1, &red_op);
/* reduce shared HDF5 file records */
DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* clean up reduction state */
......@@ -473,8 +473,8 @@ static void hdf5_shutdown(
hdf5_rec_count -= shared_rec_count;
}
DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
PMPI_Type_free(&red_type);
PMPI_Op_free(&red_op);
}
/* update output buffer size to account for shared file reduction */
......
This diff is collapsed.
......@@ -94,7 +94,7 @@ static int my_rank = -1;
if(newpath != __path) free(newpath); \
break; \
} \
DARSHAN_MPI_CALL(PMPI_Comm_size)(__comm, &comm_size); \
PMPI_Comm_size(__comm, &comm_size); \
if(rec_ref->file_rec->fcounters[PNETCDF_F_OPEN_TIMESTAMP] == 0) \
rec_ref->file_rec->fcounters[PNETCDF_F_OPEN_TIMESTAMP] = __tm1; \
if(comm_size == 1) rec_ref->file_rec->counters[PNETCDF_INDEP_OPENS] += 1; \
......@@ -406,15 +406,15 @@ static void pnetcdf_shutdown(
/* construct a datatype for a PNETCDF file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_pnetcdf_file),
PMPI_Type_contiguous(sizeof(struct darshan_pnetcdf_file),
MPI_BYTE, &red_type);
DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
PMPI_Type_commit(&red_type);
/* register a PNETCDF file record reduction operator */
DARSHAN_MPI_CALL(PMPI_Op_create)(pnetcdf_record_reduction_op, 1, &red_op);
PMPI_Op_create(pnetcdf_record_reduction_op, 1, &red_op);
/* reduce shared PNETCDF file records */
DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* clean up reduction state */
......@@ -430,8 +430,8 @@ static void pnetcdf_shutdown(
pnetcdf_rec_count -= shared_rec_count;
}
DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
PMPI_Type_free(&red_type);
PMPI_Op_free(&red_op);
}
/* update output buffer size to account for shared file reduction */
......
......@@ -1656,11 +1656,11 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
struct darshan_variance_dt *var_send_buf = NULL;
struct darshan_variance_dt *var_recv_buf = NULL;
DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_variance_dt),
PMPI_Type_contiguous(sizeof(struct darshan_variance_dt),
MPI_BYTE, &var_dt);
DARSHAN_MPI_CALL(PMPI_Type_commit)(&var_dt);
PMPI_Type_commit(&var_dt);
DARSHAN_MPI_CALL(PMPI_Op_create)(darshan_variance_reduce, 1, &var_op);
PMPI_Op_create(darshan_variance_reduce, 1, &var_op);
var_send_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
if(!var_send_buf)
......@@ -1685,7 +1685,7 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].fcounters[POSIX_F_META_TIME];
}
DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
......@@ -1708,7 +1708,7 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].counters[POSIX_BYTES_WRITTEN];
}
DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
......@@ -1720,8 +1720,8 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
}
}
DARSHAN_MPI_CALL(PMPI_Type_free)(&var_dt);
DARSHAN_MPI_CALL(PMPI_Op_free)(&var_op);
PMPI_Type_free(&var_dt);
PMPI_Op_free(&var_op);
free(var_send_buf);
free(var_recv_buf);
......@@ -1906,15 +1906,15 @@ static void posix_shutdown(
/* construct a datatype for a POSIX file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_posix_file),
PMPI_Type_contiguous(sizeof(struct darshan_posix_file),
MPI_BYTE, &red_type);
DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
PMPI_Type_commit(&red_type);
/* register a POSIX file record reduction operator */
DARSHAN_MPI_CALL(PMPI_Op_create)(posix_record_reduction_op, 1, &red_op);
PMPI_Op_create(posix_record_reduction_op, 1, &red_op);
/* reduce shared POSIX file records */
DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* get the time and byte variances for shared files */
......@@ -1934,8 +1934,8 @@ static void posix_shutdown(
posix_rec_count -= shared_rec_count;
}
DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
PMPI_Type_free(&red_type);
PMPI_Op_free(&red_op);
}
/* update output buffer size to account for shared file reduction */
......
......@@ -1184,15 +1184,15 @@ static void stdio_shutdown(
/* construct a datatype for a STDIO file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_stdio_file),
PMPI_Type_contiguous(sizeof(struct darshan_stdio_file),
MPI_BYTE, &red_type);
DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
PMPI_Type_commit(&red_type);
/* register a STDIO file record reduction operator */
DARSHAN_MPI_CALL(PMPI_Op_create)(stdio_record_reduction_op, 1, &red_op);
PMPI_Op_create(stdio_record_reduction_op, 1, &red_op);
/* reduce shared STDIO file records */
DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* get the time and byte variances for shared files */
......@@ -1212,8 +1212,8 @@ static void stdio_shutdown(
stdio_rec_count -= shared_rec_count;
}
DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
PMPI_Type_free(&red_type);
PMPI_Op_free(&red_op);
}
/* filter out any records that have no activity on them; this is
......@@ -1323,11 +1323,11 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
struct darshan_variance_dt *var_send_buf = NULL;
struct darshan_variance_dt *var_recv_buf = NULL;
DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_variance_dt),
PMPI_Type_contiguous(sizeof(struct darshan_variance_dt),
MPI_BYTE, &var_dt);
DARSHAN_MPI_CALL(PMPI_Type_commit)(&var_dt);
PMPI_Type_commit(&var_dt);
DARSHAN_MPI_CALL(PMPI_Op_create)(darshan_variance_reduce, 1, &var_op);
PMPI_Op_create(darshan_variance_reduce, 1, &var_op);
var_send_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
if(!var_send_buf)
......@@ -1352,7 +1352,7 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].fcounters[STDIO_F_META_TIME];
}
DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
......@@ -1375,7 +1375,7 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].counters[STDIO_BYTES_WRITTEN];
}
DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
......@@ -1387,8 +1387,8 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
}
}
DARSHAN_MPI_CALL(PMPI_Type_free)(&var_dt);
DARSHAN_MPI_CALL(PMPI_Op_free)(&var_op);
PMPI_Type_free(&var_dt);
PMPI_Op_free(&var_op);
free(var_send_buf);
free(var_recv_buf);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment