darshan-mpiio.c 60.3 KB
Newer Older
Philip Carns's avatar
Philip Carns committed
1
/*
Shane Snyder's avatar
Shane Snyder committed
2 3 4
 * Copyright (C) 2015 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
Philip Carns's avatar
Philip Carns committed
5 6
 */

7 8 9
#define _XOPEN_SOURCE 500
#define _GNU_SOURCE

Philip Carns's avatar
Philip Carns committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include "darshan-runtime-config.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#include <errno.h>
#include <search.h>
#include <assert.h>
#include <pthread.h>

#include "darshan.h"
26
#include "darshan-dynamic.h"
Philip Carns's avatar
Philip Carns committed
27

28 29 30 31
DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
32
#ifdef HAVE_MPIIO_CONST
33
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
34
#else
35
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
36 37
#endif
#ifdef HAVE_MPIIO_CONST
38
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
39
#else
40
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
41 42
#endif
#ifdef HAVE_MPIIO_CONST
43
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
44
#else
45
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
46 47
#endif
#ifdef HAVE_MPIIO_CONST
48
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
49
#else
50
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
51
#endif
52 53 54 55 56 57 58 59 60
DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
61
#ifdef HAVE_MPIIO_CONST
62
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
63
#else
64
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
65
#endif
66
DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
67
#ifdef HAVE_MPIIO_CONST
68
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
69
#else
70
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
71 72
#endif
#ifdef HAVE_MPIIO_CONST
73
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
74
#else
75
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
76 77
#endif
#ifdef HAVE_MPIIO_CONST
78
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
79
#else
80
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
81 82
#endif
#ifdef HAVE_MPIIO_CONST
83
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
84
#else
85
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
86 87
#endif
#ifdef HAVE_MPIIO_CONST
88
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
89
#else
90
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
91 92
#endif
#ifdef HAVE_MPIIO_CONST
93
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
94
#else
95
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
96 97
#endif
#ifdef HAVE_MPIIO_CONST
98
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
99
#else
100
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
101 102
#endif
#ifdef HAVE_MPIIO_CONST
103
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
104
#else
105
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
106 107
#endif
#ifdef HAVE_MPIIO_CONST
108
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
109
#else
110
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
111 112
#endif

113
/* The mpiio_file_record_ref structure maintains necessary runtime metadata
114
 * for the MPIIO file record (darshan_mpiio_file structure, defined in
115
 * darshan-mpiio-log-format.h) pointed to by 'file_rec'. This metadata
116 117 118 119 120 121
 * assists with the instrumenting of specific statistics in the file record.
 *
 * RATIONALE: the MPIIO module needs to track some stateful, volatile 
 * information about each open file (like the current file offset, most recent 
 * access time, etc.) to aid in instrumentation, but this information can't be
 * stored in the darshan_mpiio_file struct because we don't want it to appear in
122 123 124 125
 * the final darshan log file.  We therefore associate a mpiio_file_record_ref
 * struct with each darshan_mpiio_file struct in order to track this information
 * (i.e., the mapping between mpiio_file_record_ref structs to darshan_mpiio_file
 * structs is one-to-one).
126
 *
127 128 129 130 131 132 133
 * NOTE: we use the 'darshan_record_ref' interface (in darshan-common) to
 * associate different types of handles with this mpiio_file_record_ref struct.
 * This allows us to index this struct (and the underlying file record) by using
 * either the corresponding Darshan record identifier (derived from the filename)
 * or by a generated MPI file handle, for instance. So, while there should only
 * be a single Darshan record identifier that indexes a mpiio_file_record_ref,
 * there could be multiple open file handles that index it.
134
 */
135
struct mpiio_file_record_ref
Philip Carns's avatar
Philip Carns committed
136
{
137
    struct darshan_mpiio_file *file_rec;
Shane Snyder's avatar
Shane Snyder committed
138
    enum darshan_io_type last_io_type;
139 140 141
    double last_meta_end;
    double last_read_end;
    double last_write_end;
142 143
    void *access_root;
    int access_count;
Philip Carns's avatar
Philip Carns committed
144 145
};

146 147 148 149
/* The mpiio_runtime structure maintains necessary state for storing
 * MPI-IO file records and for coordinating with darshan-core at 
 * shutdown time.
 */
Philip Carns's avatar
Philip Carns committed
150 151
struct mpiio_runtime
{
152 153 154
    void *rec_id_hash;
    void *fh_hash;
    int file_rec_count;
Philip Carns's avatar
Philip Carns committed
155 156
};

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void mpiio_runtime_initialize(
    void);
static struct mpiio_file_record_ref *mpiio_track_new_file_record(
    darshan_record_id rec_id, const char *path);
static void mpiio_finalize_file_records(
    void *rec_ref_p);
static void mpiio_record_reduction_op(
    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
static void mpiio_shared_record_variance(
    MPI_Comm mod_comm, struct darshan_mpiio_file *inrec_array,
    struct darshan_mpiio_file *outrec_array, int shared_rec_count);
static void mpiio_cleanup_runtime(
    void);

static void mpiio_shutdown(
    MPI_Comm mod_comm, darshan_record_id *shared_recs,
    int shared_rec_count, void **mpiio_buf, int *mpiio_buf_sz);

175 176
/* extern DXT function defs */
extern void dxt_mpiio_write(darshan_record_id rec_id, int64_t length,
177
    double start_time, double end_time);
178
extern void dxt_mpiio_read(darshan_record_id rec_id, int64_t length,
179 180
    double start_time, double end_time);

Philip Carns's avatar
Philip Carns committed
181 182 183
static struct mpiio_runtime *mpiio_runtime = NULL;
static pthread_mutex_t mpiio_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static int my_rank = -1;
184
static int enable_dxt_io_trace = 0;
Philip Carns's avatar
Philip Carns committed
185

186 187 188
#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)

189 190
#define MPIIO_PRE_RECORD() do { \
    MPIIO_LOCK(); \
191
    if(!darshan_core_disabled_instrumentation()) { \
192 193 194
        if(!mpiio_runtime) { \
            mpiio_runtime_initialize(); \
        } \
195
        if(mpiio_runtime) break; \
196
    } \
197 198
    MPIIO_UNLOCK(); \
    return(ret); \
199 200 201 202 203 204
} while(0)

#define MPIIO_POST_RECORD() do { \
    MPIIO_UNLOCK(); \
} while(0)

205
#define MPIIO_RECORD_OPEN(__ret, __path, __fh, __comm, __mode, __info, __tm1, __tm2) do { \
206 207 208
    darshan_record_id rec_id; \
    struct mpiio_file_record_ref *rec_ref; \
    char *newpath; \
209 210
    int comm_size; \
    if(__ret != MPI_SUCCESS) break; \
211 212 213 214 215 216 217 218
    newpath = darshan_clean_file_path(__path); \
    if(!newpath) newpath = (char *)__path; \
    if(darshan_core_excluded_path(newpath)) { \
        if(newpath != __path) free(newpath); \
        break; \
    } \
    rec_id = darshan_core_gen_record_id(newpath); \
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->rec_id_hash, &rec_id, sizeof(darshan_record_id)); \
219
    if(!rec_ref) rec_ref = mpiio_track_new_file_record(rec_id, newpath); \
220 221 222
    if(!rec_ref) { \
        if(newpath != __path) free(newpath); \
        break; \
223
    } \
224
    rec_ref->file_rec->counters[MPIIO_MODE] = __mode; \
225
    PMPI_Comm_size(__comm, &comm_size); \
226
    if(comm_size == 1) \
227
        rec_ref->file_rec->counters[MPIIO_INDEP_OPENS] += 1; \
228
    else \
229
        rec_ref->file_rec->counters[MPIIO_COLL_OPENS] += 1; \
230
    if(__info != MPI_INFO_NULL) \
231 232 233 234 235 236 237 238
        rec_ref->file_rec->counters[MPIIO_HINTS] += 1; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_OPEN_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_OPEN_TIMESTAMP] > __tm1) \
        rec_ref->file_rec->fcounters[MPIIO_F_OPEN_TIMESTAMP] = __tm1; \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_META_TIME], \
        __tm1, __tm2, rec_ref->last_meta_end); \
    darshan_add_record_ref(&(mpiio_runtime->fh_hash), &__fh, sizeof(MPI_File), rec_ref); \
    if(newpath != __path) free(newpath); \
239 240
} while(0)

241
#define MPIIO_RECORD_READ(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
242
    struct mpiio_file_record_ref *rec_ref; \
243
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
244
    double __elapsed = __tm2-__tm1; \
245
    if(__ret != MPI_SUCCESS) break; \
246 247
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
248
    PMPI_Type_size(__datatype, &size);  \
249
    size = size * __count; \
250
    /* DXT to record detailed read tracing information */ \
251 252
    if(enable_dxt_io_trace) { \
        dxt_mpiio_read(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
253
    } \
254 255 256 257 258 259 260 261 262
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_READ_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_READ] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_WRITE) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_READ; \
263 264
    if(rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] > __tm1) \
265 266 267 268 269 270 271
        rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_READ_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_READ_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_READ_TIME], \
        __tm1, __tm2, rec_ref->last_read_end); \
272
} while(0)
273

274
#define MPIIO_RECORD_WRITE(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
275
    struct mpiio_file_record_ref *rec_ref; \
276
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
277 278
    double __elapsed = __tm2-__tm1; \
    if(__ret != MPI_SUCCESS) break; \
279 280
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
281
    PMPI_Type_size(__datatype, &size);  \
282
    size = size * __count; \
283
     /* DXT to record detailed write tracing information */ \
284 285
    if(enable_dxt_io_trace) { \
        dxt_mpiio_write(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
286
    } \
287 288 289 290 291 292 293 294 295
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_WRITE_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_WRITTEN] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_READ) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_WRITE; \
296
    if(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] == 0 || \
297
     rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] > __tm1) \
298 299 300 301 302 303 304
        rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_WRITE_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_WRITE_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME], \
        __tm1, __tm2, rec_ref->last_write_end); \
305 306
} while(0)

307 308 309
/**********************************************************
 *        Wrappers for MPI-IO functions of interest       * 
 **********************************************************/
310

Philip Carns's avatar
Philip Carns committed
311
#ifdef HAVE_MPIIO_CONST
312
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
313
#else
314
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
315 316 317
#endif
{
    int ret;
318
    MPI_File tmp_fh;
Philip Carns's avatar
Philip Carns committed
319 320 321
    char* tmp;
    double tm1, tm2;

322
    MAP_OR_FAIL(PMPI_File_open);
323

Philip Carns's avatar
Philip Carns committed
324
    tm1 = darshan_core_wtime();
325
    ret = __real_PMPI_File_open(comm, filename, amode, info, fh);
Philip Carns's avatar
Philip Carns committed
326 327
    tm2 = darshan_core_wtime();

328 329 330 331 332 333 334 335
    /* use ROMIO approach to strip prefix if present */
    /* strip off prefix if there is one, but only skip prefixes
     * if they are greater than length one to allow for windows
     * drive specifications (e.g. c:\...) 
     */
    tmp = strchr(filename, ':');
    if (tmp > filename + 1) {
        filename = tmp + 1;
Philip Carns's avatar
Philip Carns committed
336 337
    }

338 339 340 341 342
    MPIIO_PRE_RECORD();
    tmp_fh = *fh;
    MPIIO_RECORD_OPEN(ret, filename, tmp_fh, comm, amode, info, tm1, tm2);
    MPIIO_POST_RECORD();

Philip Carns's avatar
Philip Carns committed
343 344
    return(ret);
}
345 346 347 348 349
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh)) 
#endif
350

351
int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
352 353 354 355 356
    MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

357
    MAP_OR_FAIL(PMPI_File_read);
358

359
    tm1 = darshan_core_wtime();
360
    ret = __real_PMPI_File_read(fh, buf, count, datatype, status);
361 362
    tm2 = darshan_core_wtime();

363
    MPIIO_PRE_RECORD();
364
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
365 366
    MPIIO_POST_RECORD();

367 368
    return(ret);
}
369 370
DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_read(fh,buf,count,datatype,status))
371 372

#ifdef HAVE_MPIIO_CONST
373
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
374 375
    MPI_Datatype datatype, MPI_Status *status)
#else
376
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
377 378 379 380 381 382
    MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

383
    MAP_OR_FAIL(PMPI_File_write);
384

385
    tm1 = darshan_core_wtime();
386
    ret = __real_PMPI_File_write(fh, buf, count, datatype, status);
387 388
    tm2 = darshan_core_wtime();

389
    MPIIO_PRE_RECORD();
390
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
391 392
    MPIIO_POST_RECORD();

393 394
    return(ret);
}
395 396 397 398 399 400 401
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#endif
402

403
int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
404 405 406 407 408
    int count, MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

409
    MAP_OR_FAIL(PMPI_File_read_at);
410

411
    tm1 = darshan_core_wtime();
412
    ret = __real_PMPI_File_read_at(fh, offset, buf,
413 414 415
        count, datatype, status);
    tm2 = darshan_core_wtime();

416
    MPIIO_PRE_RECORD();
417
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
418 419
    MPIIO_POST_RECORD();

420 421
    return(ret);
}
422 423
DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at(fh, offset, buf, count, datatype, status))
424 425

#ifdef HAVE_MPIIO_CONST
426
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
427 428
    int count, MPI_Datatype datatype, MPI_Status *status)
#else
429
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
430 431 432 433 434 435
    int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

436
    MAP_OR_FAIL(PMPI_File_write_at);
437

438
    tm1 = darshan_core_wtime();
439
    ret = __real_PMPI_File_write_at(fh, offset, buf,
440 441 442
        count, datatype, status);
    tm2 = darshan_core_wtime();

443
    MPIIO_PRE_RECORD();
444
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
445 446
    MPIIO_POST_RECORD();

447 448
    return(ret);
}
449 450 451 452 453 454 455
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#endif
456

457
int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
458 459 460 461
{
    int ret;
    double tm1, tm2;

462
    MAP_OR_FAIL(PMPI_File_write_at);
463

464
    tm1 = darshan_core_wtime();
465
    ret = __real_PMPI_File_read_all(fh, buf, count,
466 467 468
        datatype, status);
    tm2 = darshan_core_wtime();

469
    MPIIO_PRE_RECORD();
470
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
471 472
    MPIIO_POST_RECORD();

473 474
    return(ret);
}
475 476
DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_all(fh,buf,count,datatype,status))
477 478

#ifdef HAVE_MPIIO_CONST
479
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
480
#else
481
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
482 483 484 485 486
#endif
{
    int ret;
    double tm1, tm2;

487
    MAP_OR_FAIL(PMPI_File_write_all);
488

489
    tm1 = darshan_core_wtime();
490
    ret = __real_PMPI_File_write_all(fh, buf, count,
491 492 493
        datatype, status);
    tm2 = darshan_core_wtime();

494
    MPIIO_PRE_RECORD();
495
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
496 497
    MPIIO_POST_RECORD();

498 499
    return(ret);
}
500 501 502 503 504 505 506
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#endif
507

508
int DARSHAN_DECL(MPI_File_read_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
509 510 511 512 513
    int count, MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

514
    MAP_OR_FAIL(PMPI_File_read_at_all);
515

516
    tm1 = darshan_core_wtime();
517
    ret = __real_PMPI_File_read_at_all(fh, offset, buf,
518 519 520
        count, datatype, status);
    tm2 = darshan_core_wtime();

521
    MPIIO_PRE_RECORD();
522
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
523 524
    MPIIO_POST_RECORD();

525 526
    return(ret);
}
527
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
528
    int count, MPI_Datatype datatype, MPI_Status * status),
529
        MPI_File_read_at_all(fh,offset,buf,count,datatype,status))
530

531
#ifdef HAVE_MPIIO_CONST
532
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
533 534
    int count, MPI_Datatype datatype, MPI_Status * status)
#else
535
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
536 537 538 539 540 541
    int count, MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

542
    MAP_OR_FAIL(PMPI_File_write_at_all);
543

544
    tm1 = darshan_core_wtime();
545
    ret = __real_PMPI_File_write_at_all(fh, offset, buf,
546 547 548
        count, datatype, status);
    tm2 = darshan_core_wtime();

549
    MPIIO_PRE_RECORD();
550
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
551 552
    MPIIO_POST_RECORD();

553 554
    return(ret);
}
555 556 557 558 559 560 561 562 563
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#endif
564

565
int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
566 567 568 569
{
    int ret;
    double tm1, tm2;

570
    MAP_OR_FAIL(PMPI_File_read_shared);
571

572
    tm1 = darshan_core_wtime();
573
    ret = __real_PMPI_File_read_shared(fh, buf, count,
574 575 576
        datatype, status);
    tm2 = darshan_core_wtime();

577
    MPIIO_PRE_RECORD();
578
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
579 580
    MPIIO_POST_RECORD();

581 582
    return(ret);
}
583 584
DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_shared(fh, buf, count, datatype, status))
585 586

#ifdef HAVE_MPIIO_CONST
587
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
588
#else
589
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
590 591 592 593 594
#endif
{
    int ret;
    double tm1, tm2;

595
    MAP_OR_FAIL(PMPI_File_write_shared);
596

597
    tm1 = darshan_core_wtime();
598
    ret = __real_PMPI_File_write_shared(fh, buf, count,
599 600 601
        datatype, status);
    tm2 = darshan_core_wtime();

602
    MPIIO_PRE_RECORD();
603
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
604 605
    MPIIO_POST_RECORD();

606 607
    return(ret);
}
608 609 610 611 612 613 614
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#endif
615

616

617
int DARSHAN_DECL(MPI_File_read_ordered)(MPI_File fh, void * buf, int count,
618 619 620 621 622
    MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

623
    MAP_OR_FAIL(PMPI_File_read_ordered);
624

625
    tm1 = darshan_core_wtime();
626
    ret = __real_PMPI_File_read_ordered(fh, buf, count,
627 628 629
        datatype, status);
    tm2 = darshan_core_wtime();

630
    MPIIO_PRE_RECORD();
631
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
632 633
    MPIIO_POST_RECORD();

634 635
    return(ret);
}
636
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int count,
637
    MPI_Datatype datatype, MPI_Status * status),
638
        MPI_File_read_ordered(fh, buf, count, datatype, status))
639

640
#ifdef HAVE_MPIIO_CONST
641
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
642 643
    MPI_Datatype datatype, MPI_Status * status)
#else
644
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
645 646 647 648 649 650
    MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

651
    MAP_OR_FAIL(PMPI_File_write_ordered);
652

653
    tm1 = darshan_core_wtime();
654
    ret = __real_PMPI_File_write_ordered(fh, buf, count,
655 656 657
         datatype, status);
    tm2 = darshan_core_wtime();

658
    MPIIO_PRE_RECORD();
659
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
660 661
    MPIIO_POST_RECORD();

662 663
    return(ret);
}
664 665 666 667 668 669 670 671 672
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#endif
673

674
int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
675 676 677 678
{
    int ret;
    double tm1, tm2;

679
    MAP_OR_FAIL(PMPI_File_read_all_begin);
680

681
    tm1 = darshan_core_wtime();
682
    ret = __real_PMPI_File_read_all_begin(fh, buf, count, datatype);
683 684
    tm2 = darshan_core_wtime();

685
    MPIIO_PRE_RECORD();
686
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
687 688
    MPIIO_POST_RECORD();

689 690
    return(ret);
}
691 692
DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_all_begin(fh, buf, count, datatype))
693 694

#ifdef HAVE_MPIIO_CONST
695
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
696
#else
697
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
698 699 700 701 702
#endif
{
    int ret;
    double tm1, tm2;

703
    MAP_OR_FAIL(PMPI_File_write_all_begin);
704

705
    tm1 = darshan_core_wtime();
706
    ret = __real_PMPI_File_write_all_begin(fh, buf, count, datatype);
707 708
    tm2 = darshan_core_wtime();

709
    MPIIO_PRE_RECORD();
710
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
711 712
    MPIIO_POST_RECORD();

713 714
    return(ret);
}
715 716 717 718 719 720 721
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#endif
722

723
int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
724 725 726 727 728
    int count, MPI_Datatype datatype)
{
    int ret;
    double tm1, tm2;

729
    MAP_OR_FAIL(PMPI_File_read_at_all_begin);
730

731
    tm1 = darshan_core_wtime();
732
    ret = __real_PMPI_File_read_at_all_begin(fh, offset, buf,
733 734 735
        count, datatype);
    tm2 = darshan_core_wtime();
    
736
    MPIIO_PRE_RECORD();
737
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
738 739
    MPIIO_POST_RECORD();

740 741
    return(ret);
}
742 743 744
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_read_at_all_begin(fh, offset, buf, count,
        datatype))
745 746

#ifdef HAVE_MPIIO_CONST
747
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
748 749
    int count, MPI_Datatype datatype)
#else
750
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
751 752 753 754 755 756
    int count, MPI_Datatype datatype)
#endif
{
    int ret;
    double tm1, tm2;

757
    MAP_OR_FAIL(PMPI_File_write_at_all_begin);
758

759
    tm1 = darshan_core_wtime();
760
    ret = __real_PMPI_File_write_at_all_begin(fh, offset,
761 762 763
        buf, count, datatype);
    tm2 = darshan_core_wtime();

764
    MPIIO_PRE_RECORD();
765
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
766 767
    MPIIO_POST_RECORD();

768 769
    return(ret);
}
770 771 772 773 774 775 776
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#endif
777

778
int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
779 780 781 782
{
    int ret;
    double tm1, tm2;

783
    MAP_OR_FAIL(PMPI_File_read_ordered_begin);
784

785
    tm1 = darshan_core_wtime();
786
    ret = __real_PMPI_File_read_ordered_begin(fh, buf, count,
787 788 789
        datatype);
    tm2 = darshan_core_wtime();

790
    MPIIO_PRE_RECORD();
791
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
792 793
    MPIIO_POST_RECORD();

794 795
    return(ret);
}
796 797
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_ordered_begin(fh, buf, count, datatype))
798 799

#ifdef HAVE_MPIIO_CONST
800
int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
801
#else
802
int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
803 804 805 806 807
#endif
{
    int ret;
    double tm1, tm2;

808
    MAP_OR_FAIL(PMPI_File_write_ordered_begin);
809

810
    tm1 = darshan_core_wtime();
811
    ret = __real_PMPI_File_write_ordered_begin(fh, buf, count,
812 813 814
        datatype);
    tm2 = darshan_core_wtime();

815
    MPIIO_PRE_RECORD();
816
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
817 818
    MPIIO_POST_RECORD();

819 820
    return(ret);
}
821 822 823 824 825 826 827
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_ordered_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_ordered_begin(fh, buf, count, datatype))
#endif
828

829
int DARSHAN_DECL(MPI_File_iread)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request)
830 831 832 833
{
    int ret;
    double tm1, tm2;

834
    MAP_OR_FAIL(PMPI_File_iread);
835

836
    tm1 = darshan_core_wtime();
837
    ret = __real_PMPI_File_iread(fh, buf, count, datatype, request);
838 839
    tm2 = darshan_core_wtime();

840
    MPIIO_PRE_RECORD();
841
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
842 843
    MPIIO_POST_RECORD();

844 845
    return(ret);
}
846 847
DARSHAN_WRAPPER_MAP(PMPI_File_iread, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iread(fh, buf, count, datatype, request))
848 849

#ifdef HAVE_MPIIO_CONST
850
int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, const void * buf, int count,
851 852
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#else
853
int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, void * buf, int count,
854 855 856 857 858 859
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#endif
{
    int ret;
    double tm1, tm2;

860
    MAP_OR_FAIL(PMPI_File_iwrite);
861

862
    tm1 = darshan_core_wtime();
863
    ret = __real_PMPI_File_iwrite(fh, buf, count, datatype, request);
864 865
    tm2 = darshan_core_wtime();

866
    MPIIO_PRE_RECORD();
867
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
868 869
    MPIIO_POST_RECORD();

870 871
    return(ret);
}
872 873 874 875 876 877 878 879 880
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite(fh, buf, count, datatype, request))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite(fh, buf, count, datatype, request))
#endif
881

882
int DARSHAN_DECL(MPI_File_iread_at)(MPI_File fh, MPI_Offset offset, void * buf,
883 884 885 886 887
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
{
    int ret;
    double tm1, tm2;

888
    MAP_OR_FAIL(PMPI_File_iread_at);
889

890
    tm1 = darshan_core_wtime();
891
    ret = __real_PMPI_File_iread_at(fh, offset, buf, count,
892 893 894
        datatype, request);
    tm2 = darshan_core_wtime();

895
    MPIIO_PRE_RECORD();
896
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
897 898
    MPIIO_POST_RECORD();

899 900
    return(ret);
}
901
DARSHAN_WRAPPER_MAP(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void * buf,
902
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
903
        MPI_File_iread_at(fh, offset,buf,count,datatype,request))
904

905
#ifdef HAVE_MPIIO_CONST
906
int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, const void * buf,
907 908
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
#else
909
int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, void * buf,
910 911 912 913 914 915
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
#endif
{
    int ret;
    double tm1, tm2;

916
    MAP_OR_FAIL(PMPI_File_iwrite_at);
917

918
    tm1 = darshan_core_wtime();
919
    ret = __real_PMPI_File_iwrite_at(fh, offset, buf,
920 921 922
        count, datatype, request);
    tm2 = darshan_core_wtime();

923
    MPIIO_PRE_RECORD();
924
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
925 926
    MPIIO_POST_RECORD();

927 928
    return(ret);
}
929 930 931 932 933 934 935 936 937
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
        MPI_File_iwrite_at(fh, offset, buf, count, datatype, request))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
        MPI_File_iwrite_at(fh, offset, buf, count, datatype, request))
#endif
938

939
int DARSHAN_DECL(MPI_File_iread_shared)(MPI_File fh, void * buf, int count,
940 941 942 943 944
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
{
    int ret;
    double tm1, tm2;

945
    MAP_OR_FAIL(PMPI_File_iread_shared);
946

947
    tm1 = darshan_core_wtime();
948
    ret = __real_PMPI_File_iread_shared(fh, buf, count,
949 950 951
        datatype, request);
    tm2 = darshan_core_wtime();

952
    MPIIO_PRE_RECORD();
953
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
954 955
    MPIIO_POST_RECORD();

956 957
    return(ret);
}
958
DARSHAN_WRAPPER_MAP(PMPI_File_iread_shared, int, (MPI_File fh, void * buf, int count,
959
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
960
        MPI_File_iread_shared(fh, buf, count, datatype, request))
961

962
#ifdef HAVE_MPIIO_CONST
963
int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, const void * buf, int count,
964 965
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#else
966
int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, void * buf, int count,
967 968 969 970 971 972
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#endif
{
    int ret;
    double tm1, tm2;

973
    MAP_OR_FAIL(PMPI_File_iwrite_shared);
974

975
    tm1 = darshan_core_wtime();
976
    ret = __real_PMPI_File_iwrite_shared(fh, buf, count,
977 978 979
        datatype, request);
    tm2 = darshan_core_wtime();

980
    MPIIO_PRE_RECORD();
981
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
982 983
    MPIIO_POST_RECORD();

984 985
    return(ret);
}
986 987 988 989 990 991 992 993 994
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite_shared(fh, buf, count, datatype, request))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite_shared(fh, buf, count, datatype, request))
#endif
995

996
int DARSHAN_DECL(MPI_File_sync)(MPI_File fh)
997 998
{
    int ret;
999
    struct mpiio_file_record_ref *rec_ref;
1000 1001
    double tm1, tm2;

1002
    MAP_OR_FAIL(PMPI_File_sync);
1003

1004
    tm1 = darshan_core_wtime();
1005
    ret = __real_PMPI_File_sync(fh);
1006 1007 1008 1009
    tm2 = darshan_core_wtime();

    if(ret == MPI_SUCCESS)
    {
1010 1011 1012 1013
        MPIIO_PRE_RECORD();
        rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash,
            &fh, sizeof(MPI_File));
        if(rec_ref)
1014
        {
1015
            rec_ref->file_rec->counters[MPIIO_SYNCS] += 1;
1016
            DARSHAN_TIMER_INC_NO_OVERLAP(
1017 1018
                rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME],
                tm1, tm2, rec_ref->last_write_end);
1019
        }
1020
        MPIIO_POST_RECORD();
1021 1022 1023 1024
    }

    return(ret);
}
1025
DARSHAN_WRAPPER_MAP(PMPI_File_sync, int, (MPI_File fh), MPI_File_sync(fh))
1026 1027

#ifdef HAVE_MPIIO_CONST
1028
int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
1029 1030
    MPI_Datatype filetype, const char *datarep, MPI_Info info)
#else
1031
int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
1032 1033 1034 1035
    MPI_Datatype filetype, char *datarep, MPI_Info info)
#endif
{
    int ret;
1036
    struct mpiio_file_record_ref *rec_ref;
1037 1038
    double tm1, tm2;

1039
    MAP_OR_FAIL(PMPI_File_set_view);
1040

1041
    tm1 = darshan_core_wtime();
1042
    ret = __real_PMPI_File_set_view(fh, disp, etype, filetype,
1043 1044 1045 1046 1047
        datarep, info);
    tm2 = darshan_core_wtime();

    if(ret == MPI_SUCCESS)
    {
1048 1049 1050 1051
        MPIIO_PRE_RECORD();
        rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash,
            &fh, sizeof(MPI_File));
        if(rec_ref)
1052
        {
1053
            rec_ref->file_rec->counters[MPIIO_VIEWS] += 1;
1054 1055
            if(info != MPI_INFO_NULL)
            {
1056
                rec_ref->file_rec->counters[MPIIO_HINTS] += 1;
1057
                DARSHAN_TIMER_INC_NO_OVERLAP(
1058 1059
                    rec_ref->file_rec->fcounters[MPIIO_F_META_TIME],
                    tm1, tm2, rec_ref->last_meta_end);
1060 1061
           }
        }
1062
        MPIIO_POST_RECORD();
1063 1064 1065 1066
    }

    return(ret);
}
1067 1068 1069 1070 1071 1072 1073
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
    MPI_Datatype filetype, const char *datarep, MPI_Info info), MPI_File_set_view(fh, disp, etype, filetype, datarep, info))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
    MPI_Datatype filetype, char *datarep, MPI_Info info), MPI_File_set_view(fh, disp, etype, filetype, datarep, info))
#endif
1074

1075
int DARSHAN_DECL(MPI_File_close)(MPI_File *fh)
1076 1077
{
    int ret;
1078
    struct mpiio_file_record_ref *rec_ref;
1079
    MPI_File tmp_fh = *fh;
1080 1081
    double tm1, tm2;

1082
    MAP_OR_FAIL(PMPI_File_close);
1083

1084
    tm1 = darshan_core_wtime();
1085
    ret = __real_PMPI_File_close(fh);
1086 1087
    tm2 = darshan_core_wtime();

1088 1089 1090 1091
    MPIIO_PRE_RECORD();
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash,
        &tmp_fh, sizeof(MPI_File));<