darshan-mpiio.c 60.3 KB
Newer Older
Philip Carns's avatar
Philip Carns committed
1
/*
Shane Snyder's avatar
Shane Snyder committed
2 3 4
 * Copyright (C) 2015 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
Philip Carns's avatar
Philip Carns committed
5 6
 */

7 8 9
#define _XOPEN_SOURCE 500
#define _GNU_SOURCE

Philip Carns's avatar
Philip Carns committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include "darshan-runtime-config.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#include <errno.h>
#include <search.h>
#include <assert.h>
#include <pthread.h>

#include "darshan.h"
26
#include "darshan-dynamic.h"
Philip Carns's avatar
Philip Carns committed
27

28 29 30 31
DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
32
#ifdef HAVE_MPIIO_CONST
33
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
34
#else
35
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
36 37
#endif
#ifdef HAVE_MPIIO_CONST
38
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
39
#else
40
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
41 42
#endif
#ifdef HAVE_MPIIO_CONST
43
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
44
#else
45
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
46 47
#endif
#ifdef HAVE_MPIIO_CONST
48
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
49
#else
50
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
51
#endif
52 53 54 55 56 57 58 59 60
DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
61
#ifdef HAVE_MPIIO_CONST
62
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
63
#else
64
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
65
#endif
66
DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
67
#ifdef HAVE_MPIIO_CONST
68
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
69
#else
70
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
71 72
#endif
#ifdef HAVE_MPIIO_CONST
73
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
74
#else
75
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
76 77
#endif
#ifdef HAVE_MPIIO_CONST
78
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
79
#else
80
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
81 82
#endif
#ifdef HAVE_MPIIO_CONST
83
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
84
#else
85
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
86 87
#endif
#ifdef HAVE_MPIIO_CONST
88
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
89
#else
90
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
91 92
#endif
#ifdef HAVE_MPIIO_CONST
93
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
94
#else
95
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
96 97
#endif
#ifdef HAVE_MPIIO_CONST
98
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
99
#else
100
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
101 102
#endif
#ifdef HAVE_MPIIO_CONST
103
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
104
#else
105
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
106 107
#endif
#ifdef HAVE_MPIIO_CONST
108
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
109
#else
110
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
111 112
#endif

113
/* The mpiio_file_record_ref structure maintains necessary runtime metadata
114
 * for the MPIIO file record (darshan_mpiio_file structure, defined in
115
 * darshan-mpiio-log-format.h) pointed to by 'file_rec'. This metadata
116 117 118 119 120 121
 * assists with the instrumenting of specific statistics in the file record.
 *
 * RATIONALE: the MPIIO module needs to track some stateful, volatile 
 * information about each open file (like the current file offset, most recent 
 * access time, etc.) to aid in instrumentation, but this information can't be
 * stored in the darshan_mpiio_file struct because we don't want it to appear in
122 123 124 125
 * the final darshan log file.  We therefore associate a mpiio_file_record_ref
 * struct with each darshan_mpiio_file struct in order to track this information
 * (i.e., the mapping between mpiio_file_record_ref structs to darshan_mpiio_file
 * structs is one-to-one).
126
 *
127 128 129 130 131 132 133
 * NOTE: we use the 'darshan_record_ref' interface (in darshan-common) to
 * associate different types of handles with this mpiio_file_record_ref struct.
 * This allows us to index this struct (and the underlying file record) by using
 * either the corresponding Darshan record identifier (derived from the filename)
 * or by a generated MPI file handle, for instance. So, while there should only
 * be a single Darshan record identifier that indexes a mpiio_file_record_ref,
 * there could be multiple open file handles that index it.
134
 */
135
struct mpiio_file_record_ref
Philip Carns's avatar
Philip Carns committed
136
{
137
    struct darshan_mpiio_file *file_rec;
Shane Snyder's avatar
Shane Snyder committed
138
    enum darshan_io_type last_io_type;
139 140 141
    double last_meta_end;
    double last_read_end;
    double last_write_end;
142 143
    void *access_root;
    int access_count;
Philip Carns's avatar
Philip Carns committed
144 145
};

146 147 148 149
/* The mpiio_runtime structure maintains necessary state for storing
 * MPI-IO file records and for coordinating with darshan-core at 
 * shutdown time.
 */
Philip Carns's avatar
Philip Carns committed
150 151
struct mpiio_runtime
{
152 153 154
    void *rec_id_hash;
    void *fh_hash;
    int file_rec_count;
Philip Carns's avatar
Philip Carns committed
155 156
};

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void mpiio_runtime_initialize(
    void);
static struct mpiio_file_record_ref *mpiio_track_new_file_record(
    darshan_record_id rec_id, const char *path);
static void mpiio_finalize_file_records(
    void *rec_ref_p);
static void mpiio_record_reduction_op(
    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
static void mpiio_shared_record_variance(
    MPI_Comm mod_comm, struct darshan_mpiio_file *inrec_array,
    struct darshan_mpiio_file *outrec_array, int shared_rec_count);
static void mpiio_cleanup_runtime(
    void);

static void mpiio_shutdown(
    MPI_Comm mod_comm, darshan_record_id *shared_recs,
    int shared_rec_count, void **mpiio_buf, int *mpiio_buf_sz);

175 176
/* extern DXT function defs */
extern void dxt_mpiio_write(darshan_record_id rec_id, int64_t length,
177
    double start_time, double end_time);
178
extern void dxt_mpiio_read(darshan_record_id rec_id, int64_t length,
179 180
    double start_time, double end_time);

Philip Carns's avatar
Philip Carns committed
181 182 183
static struct mpiio_runtime *mpiio_runtime = NULL;
static pthread_mutex_t mpiio_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static int my_rank = -1;
184
static int enable_dxt_io_trace = 0;
Philip Carns's avatar
Philip Carns committed
185

186 187 188
#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)

189 190
#define MPIIO_PRE_RECORD() do { \
    MPIIO_LOCK(); \
191
    if(!darshan_core_disabled_instrumentation()) { \
192 193 194
        if(!mpiio_runtime) { \
            mpiio_runtime_initialize(); \
        } \
195
        if(mpiio_runtime) break; \
196
    } \
197 198
    MPIIO_UNLOCK(); \
    return(ret); \
199 200 201 202 203 204
} while(0)

#define MPIIO_POST_RECORD() do { \
    MPIIO_UNLOCK(); \
} while(0)

205
#define MPIIO_RECORD_OPEN(__ret, __path, __fh, __comm, __mode, __info, __tm1, __tm2) do { \
206 207 208
    darshan_record_id rec_id; \
    struct mpiio_file_record_ref *rec_ref; \
    char *newpath; \
209 210
    int comm_size; \
    if(__ret != MPI_SUCCESS) break; \
211 212 213 214 215 216 217 218
    newpath = darshan_clean_file_path(__path); \
    if(!newpath) newpath = (char *)__path; \
    if(darshan_core_excluded_path(newpath)) { \
        if(newpath != __path) free(newpath); \
        break; \
    } \
    rec_id = darshan_core_gen_record_id(newpath); \
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->rec_id_hash, &rec_id, sizeof(darshan_record_id)); \
219
    if(!rec_ref) rec_ref = mpiio_track_new_file_record(rec_id, newpath); \
220 221 222
    if(!rec_ref) { \
        if(newpath != __path) free(newpath); \
        break; \
223
    } \
224
    rec_ref->file_rec->counters[MPIIO_MODE] = __mode; \
225
    PMPI_Comm_size(__comm, &comm_size); \
226
    if(comm_size == 1) \
227
        rec_ref->file_rec->counters[MPIIO_INDEP_OPENS] += 1; \
228
    else \
229
        rec_ref->file_rec->counters[MPIIO_COLL_OPENS] += 1; \
230
    if(__info != MPI_INFO_NULL) \
231 232 233 234 235 236 237 238
        rec_ref->file_rec->counters[MPIIO_HINTS] += 1; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_OPEN_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_OPEN_TIMESTAMP] > __tm1) \
        rec_ref->file_rec->fcounters[MPIIO_F_OPEN_TIMESTAMP] = __tm1; \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_META_TIME], \
        __tm1, __tm2, rec_ref->last_meta_end); \
    darshan_add_record_ref(&(mpiio_runtime->fh_hash), &__fh, sizeof(MPI_File), rec_ref); \
    if(newpath != __path) free(newpath); \
239 240
} while(0)

241
#define MPIIO_RECORD_READ(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
242
    struct mpiio_file_record_ref *rec_ref; \
243
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
244
    double __elapsed = __tm2-__tm1; \
245
    if(__ret != MPI_SUCCESS) break; \
246 247
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
248
    PMPI_Type_size(__datatype, &size);  \
249
    size = size * __count; \
250
    /* DXT to record detailed read tracing information */ \
251 252
    if(enable_dxt_io_trace) { \
        dxt_mpiio_read(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
253
    } \
254 255 256 257 258 259 260 261 262
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_READ_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_READ] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_WRITE) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_READ; \
263 264
    if(rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] > __tm1) \
265 266 267 268 269 270 271
        rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_READ_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_READ_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_READ_TIME], \
        __tm1, __tm2, rec_ref->last_read_end); \
272
} while(0)
273

274
#define MPIIO_RECORD_WRITE(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
275
    struct mpiio_file_record_ref *rec_ref; \
276
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
277 278
    double __elapsed = __tm2-__tm1; \
    if(__ret != MPI_SUCCESS) break; \
279 280
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
281
    PMPI_Type_size(__datatype, &size);  \
282
    size = size * __count; \
283
     /* DXT to record detailed write tracing information */ \
284 285
    if(enable_dxt_io_trace) { \
        dxt_mpiio_write(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
286
    } \
287 288 289 290 291 292 293 294 295
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_WRITE_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_WRITTEN] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_READ) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_WRITE; \
296
    if(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] == 0 || \
297
     rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] > __tm1) \
298 299 300 301 302 303 304
        rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_WRITE_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_WRITE_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME], \
        __tm1, __tm2, rec_ref->last_write_end); \
305 306
} while(0)

307 308 309
/**********************************************************
 *        Wrappers for MPI-IO functions of interest       * 
 **********************************************************/
310

Philip Carns's avatar
Philip Carns committed
311
#ifdef HAVE_MPIIO_CONST
312
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
313
#else
314
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
315 316 317
#endif
{
    int ret;
318
    MPI_File tmp_fh;
Philip Carns's avatar
Philip Carns committed
319 320 321
    char* tmp;
    double tm1, tm2;

322
    MAP_OR_FAIL(PMPI_File_open);
323

Philip Carns's avatar
Philip Carns committed
324
    tm1 = darshan_core_wtime();
325
    ret = __real_PMPI_File_open(comm, filename, amode, info, fh);
Philip Carns's avatar
Philip Carns committed
326 327
    tm2 = darshan_core_wtime();

328 329 330 331 332 333 334 335
    /* use ROMIO approach to strip prefix if present */
    /* strip off prefix if there is one, but only skip prefixes
     * if they are greater than length one to allow for windows
     * drive specifications (e.g. c:\...) 
     */
    tmp = strchr(filename, ':');
    if (tmp > filename + 1) {
        filename = tmp + 1;
Philip Carns's avatar
Philip Carns committed
336 337
    }

338 339 340 341 342
    MPIIO_PRE_RECORD();
    tmp_fh = *fh;
    MPIIO_RECORD_OPEN(ret, filename, tmp_fh, comm, amode, info, tm1, tm2);
    MPIIO_POST_RECORD();

Philip Carns's avatar
Philip Carns committed
343 344
    return(ret);
}
345 346 347 348 349
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh)) 
#endif
350

351
int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
352 353 354 355 356
    MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

357
    MAP_OR_FAIL(PMPI_File_read);
358

359
    tm1 = darshan_core_wtime();
360
    ret = __real_PMPI_File_read(fh, buf, count, datatype, status);
361 362
    tm2 = darshan_core_wtime();

363
    MPIIO_PRE_RECORD();
364
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
365 366
    MPIIO_POST_RECORD();

367 368
    return(ret);
}
369 370
DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_read(fh,buf,count,datatype,status))
371 372

#ifdef HAVE_MPIIO_CONST
373
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
374 375
    MPI_Datatype datatype, MPI_Status *status)
#else
376
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
377 378 379 380 381 382
    MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

383
    MAP_OR_FAIL(PMPI_File_write);
384

385
    tm1 = darshan_core_wtime();
386
    ret = __real_PMPI_File_write(fh, buf, count, datatype, status);
387 388
    tm2 = darshan_core_wtime();

389
    MPIIO_PRE_RECORD();
390
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
391 392
    MPIIO_POST_RECORD();

393 394
    return(ret);
}
395 396 397 398 399 400 401
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#endif
402

403
int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
404 405 406 407 408
    int count, MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

409
    MAP_OR_FAIL(PMPI_File_read_at);
410

411
    tm1 = darshan_core_wtime();
412
    ret = __real_PMPI_File_read_at(fh, offset, buf,
413 414 415
        count, datatype, status);
    tm2 = darshan_core_wtime();

416
    MPIIO_PRE_RECORD();
417
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
418 419
    MPIIO_POST_RECORD();

420 421
    return(ret);
}
422 423
DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at(fh, offset, buf, count, datatype, status))
424 425

#ifdef HAVE_MPIIO_CONST
426
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
427 428
    int count, MPI_Datatype datatype, MPI_Status *status)
#else
429
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
430 431 432 433 434 435
    int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

436
    MAP_OR_FAIL(PMPI_File_write_at);
437

438
    tm1 = darshan_core_wtime();
439
    ret = __real_PMPI_File_write_at(fh, offset, buf,
440 441 442
        count, datatype, status);
    tm2 = darshan_core_wtime();

443
    MPIIO_PRE_RECORD();
444
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
445 446
    MPIIO_POST_RECORD();

447 448
    return(ret);
}
449 450 451 452 453 454 455
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#endif
456

457
int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
458 459 460 461
{
    int ret;
    double tm1, tm2;

462
    MAP_OR_FAIL(PMPI_File_write_at);
463

464
    tm1 = darshan_core_wtime();
465
    ret = __real_PMPI_File_read_all(fh, buf, count,
466 467 468
        datatype, status);
    tm2 = darshan_core_wtime();

469
    MPIIO_PRE_RECORD();
470
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
471 472
    MPIIO_POST_RECORD();

473 474
    return(ret);
}
475 476
DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_all(fh,buf,count,datatype,status))
477 478

#ifdef HAVE_MPIIO_CONST
479
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
480
#else
481
        MPI_File_write_all(fh, buf, count, datatype, status))
482
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
483 484 485 486 487
#endif
{
    int ret;
    double tm1, tm2;

488
    MAP_OR_FAIL(PMPI_File_write_all);
489

490
    tm1 = darshan_core_wtime();
491
    ret = __real_PMPI_File_write_all(fh, buf, count,
492 493 494
        datatype, status);
    tm2 = darshan_core_wtime();

495
    MPIIO_PRE_RECORD();
496
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
497 498
    MPIIO_POST_RECORD();

499 500
    return(ret);
}
501 502 503 504 505 506 507
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#endif
508

509
int DARSHAN_DECL(MPI_File_read_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
510 511 512 513 514
    int count, MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

515
    MAP_OR_FAIL(PMPI_File_read_at_all);
516

517
    tm1 = darshan_core_wtime();
518
    ret = __real_PMPI_File_read_at_all(fh, offset, buf,
519 520 521
        count, datatype, status);
    tm2 = darshan_core_wtime();

522
    MPIIO_PRE_RECORD();
523
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
524 525
    MPIIO_POST_RECORD();

526 527
    return(ret);
}
528
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
529
    int count, MPI_Datatype datatype, MPI_Status * status),
530
        MPI_File_read_at_all(fh,offset,buf,count,datatype,status))
531

532
#ifdef HAVE_MPIIO_CONST
533
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
534 535
    int count, MPI_Datatype datatype, MPI_Status * status)
#else
536
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
537 538 539 540 541 542
    int count, MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

543
    MAP_OR_FAIL(PMPI_File_write_at_all);
544

545
    tm1 = darshan_core_wtime();
546
    ret = __real_PMPI_File_write_at_all(fh, offset, buf,
547 548 549
        count, datatype, status);
    tm2 = darshan_core_wtime();

550
    MPIIO_PRE_RECORD();
551
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
552 553
    MPIIO_POST_RECORD();

554 555
    return(ret);
}
556 557 558 559 560 561 562 563 564
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#endif
565

566
int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
567 568 569 570
{
    int ret;
    double tm1, tm2;

571
    MAP_OR_FAIL(PMPI_File_read_shared);
572

573
    tm1 = darshan_core_wtime();
574
    ret = __real_PMPI_File_read_shared(fh, buf, count,
575 576 577
        datatype, status);
    tm2 = darshan_core_wtime();

578
    MPIIO_PRE_RECORD();
579
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
580 581
    MPIIO_POST_RECORD();

582 583
    return(ret);
}
584 585
DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_shared(fh, buf, count, datatype, status))
586 587

#ifdef HAVE_MPIIO_CONST
588
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
589
#else
590
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
591 592 593 594 595
#endif
{
    int ret;
    double tm1, tm2;

596
    MAP_OR_FAIL(PMPI_File_write_shared);
597

598
    tm1 = darshan_core_wtime();
599
    ret = __real_PMPI_File_write_shared(fh, buf, count,
600 601 602
        datatype, status);
    tm2 = darshan_core_wtime();

603
    MPIIO_PRE_RECORD();
604
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
605 606
    MPIIO_POST_RECORD();

607 608
    return(ret);
}
609 610 611 612 613 614 615
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#endif
616

617

618
int DARSHAN_DECL(MPI_File_read_ordered)(MPI_File fh, void * buf, int count,
619 620 621 622 623
    MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

624
    MAP_OR_FAIL(PMPI_File_read_ordered);
625

626
    tm1 = darshan_core_wtime();
627
    ret = __real_PMPI_File_read_ordered(fh, buf, count,
628 629 630
        datatype, status);
    tm2 = darshan_core_wtime();

631
    MPIIO_PRE_RECORD();
632
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
633 634
    MPIIO_POST_RECORD();

635 636
    return(ret);
}
637
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int count,
638
    MPI_Datatype datatype, MPI_Status * status),
639
        MPI_File_read_ordered(fh, buf, count, datatype, status))
640

641
#ifdef HAVE_MPIIO_CONST
642
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
643 644
    MPI_Datatype datatype, MPI_Status * status)
#else
645
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
646 647 648 649 650 651
    MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

652
    MAP_OR_FAIL(PMPI_File_write_ordered);
653

654
    tm1 = darshan_core_wtime();
655
    ret = __real_PMPI_File_write_ordered(fh, buf, count,
656 657 658
         datatype, status);
    tm2 = darshan_core_wtime();

659
    MPIIO_PRE_RECORD();
660
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
661 662
    MPIIO_POST_RECORD();

663 664
    return(ret);
}
665 666 667 668 669 670 671 672 673
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#endif
674

675
int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
676 677 678 679
{
    int ret;
    double tm1, tm2;

680
    MAP_OR_FAIL(PMPI_File_read_all_begin);
681

682
    tm1 = darshan_core_wtime();
683
    ret = __real_PMPI_File_read_all_begin(fh, buf, count, datatype);
684 685
    tm2 = darshan_core_wtime();

686
    MPIIO_PRE_RECORD();
687
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
688 689
    MPIIO_POST_RECORD();

690 691
    return(ret);
}
692 693
DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_all_begin(fh, buf, count, datatype))
694 695

#ifdef HAVE_MPIIO_CONST
696
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
697
#else
698
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
699 700 701 702 703
#endif
{
    int ret;
    double tm1, tm2;

704
    MAP_OR_FAIL(PMPI_File_write_all_begin);
705

706
    tm1 = darshan_core_wtime();
707
    ret = __real_PMPI_File_write_all_begin(fh, buf, count, datatype);
708 709
    tm2 = darshan_core_wtime();

710
    MPIIO_PRE_RECORD();
711
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
712 713
    MPIIO_POST_RECORD();

714 715
    return(ret);
}
716 717 718 719 720 721 722
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#endif
723

724
int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
725 726 727 728 729
    int count, MPI_Datatype datatype)
{
    int ret;
    double tm1, tm2;

730
    MAP_OR_FAIL(PMPI_File_read_at_all_begin);
731

732
    tm1 = darshan_core_wtime();
733
    ret = __real_PMPI_File_read_at_all_begin(fh, offset, buf,
734 735 736
        count, datatype);
    tm2 = darshan_core_wtime();
    
737
    MPIIO_PRE_RECORD();
738
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
739 740
    MPIIO_POST_RECORD();

741 742
    return(ret);
}
743 744 745
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_read_at_all_begin(fh, offset, buf, count,
        datatype))
746 747

#ifdef HAVE_MPIIO_CONST
748
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
749 750
    int count, MPI_Datatype datatype)
#else
751
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
752 753 754 755 756 757
    int count, MPI_Datatype datatype)
#endif
{
    int ret;
    double tm1, tm2;

758
    MAP_OR_FAIL(PMPI_File_write_at_all_begin);
759

760
    tm1 = darshan_core_wtime();
761
    ret = __real_PMPI_File_write_at_all_begin(fh, offset,
762 763 764
        buf, count, datatype);
    tm2 = darshan_core_wtime();

765
    MPIIO_PRE_RECORD();
766
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
767 768
    MPIIO_POST_RECORD();

769 770
    return(ret);
}
771 772 773 774 775 776 777
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#endif
778

779
int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
780 781 782 783
{
    int ret;
    double tm1, tm2;

784
    MAP_OR_FAIL(PMPI_File_read_ordered_begin);
785

786
    tm1 = darshan_core_wtime();
787
    ret = __real_PMPI_File_read_ordered_begin(fh, buf, count,
788 789 790
        datatype);
    tm2 = darshan_core_wtime();

791
    MPIIO_PRE_RECORD();
792
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
793 794
    MPIIO_POST_RECORD();

795 796
    return(ret);
}
797 798
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_ordered_begin(fh, buf, count, datatype))
799 800

#ifdef HAVE_MPIIO_CONST
801
int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
802
#else
803
int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
804 805 806 807 808
#endif
{
    int ret;
    double tm1, tm2;

809
    MAP_OR_FAIL(PMPI_File_write_ordered_begin);
810

811
    tm1 = darshan_core_wtime();
812
    ret = __real_PMPI_File_write_ordered_begin(fh, buf, count,
813 814 815
        datatype);
    tm2 = darshan_core_wtime();

816
    MPIIO_PRE_RECORD();
817
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
818 819
    MPIIO_POST_RECORD();

820 821
    return(ret);
}
822 823 824 825 826 827 828
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_ordered_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_ordered_begin(fh, buf, count, datatype))
#endif
829

830
int DARSHAN_DECL(MPI_File_iread)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request)
831 832 833 834
{
    int ret;
    double tm1, tm2;

835
    MAP_OR_FAIL(PMPI_File_iread);
836

837
    tm1 = darshan_core_wtime();
838
    ret = __real_PMPI_File_iread(fh, buf, count, datatype, request);
839 840
    tm2 = darshan_core_wtime();

841
    MPIIO_PRE_RECORD();
842
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
843 844
    MPIIO_POST_RECORD();

845 846
    return(ret);
}
847 848
DARSHAN_WRAPPER_MAP(PMPI_File_iread, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iread(fh, buf, count, datatype, request))
849 850

#ifdef HAVE_MPIIO_CONST
851
int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, const void * buf, int count,
852 853
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#else
854
int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, void * buf, int count,
855 856 857 858 859 860
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#endif
{
    int ret;
    double tm1, tm2;

861
    MAP_OR_FAIL(PMPI_File_iwrite);
862

863
    tm1 = darshan_core_wtime();
864
    ret = __real_PMPI_File_iwrite(fh, buf, count, datatype, request);
865 866
    tm2 = darshan_core_wtime();

867
    MPIIO_PRE_RECORD();
868
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
869 870
    MPIIO_POST_RECORD();

871 872
    return(ret);
}
873 874 875 876 877 878 879 880 881
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite(fh, buf, count, datatype, request))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite(fh, buf, count, datatype, request))
#endif
882

883
int DARSHAN_DECL(MPI_File_iread_at)(MPI_File fh, MPI_Offset offset, void * buf,
884 885 886 887 888
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
{
    int ret;
    double tm1, tm2;

889
    MAP_OR_FAIL(PMPI_File_iread_at);
890

891
    tm1 = darshan_core_wtime();
892
    ret = __real_PMPI_File_iread_at(fh, offset, buf, count,
893 894 895
        datatype, request);
    tm2 = darshan_core_wtime();

896
    MPIIO_PRE_RECORD();
897
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
898 899
    MPIIO_POST_RECORD();

900 901
    return(ret);
}
902
DARSHAN_WRAPPER_MAP(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void * buf,
903
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
904
        MPI_File_iread_at(fh, offset,buf,count,datatype,request))
905

906
#ifdef HAVE_MPIIO_CONST
907
int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, const void * buf,
908 909
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
#else
910
int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, void * buf,
911 912 913 914 915 916
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
#endif
{
    int ret;
    double tm1, tm2;

917
    MAP_OR_FAIL(PMPI_File_iwrite_at);
918

919
    tm1 = darshan_core_wtime();
920
    ret = __real_PMPI_File_iwrite_at(fh, offset, buf,
921 922 923
        count, datatype, request);
    tm2 = darshan_core_wtime();

924
    MPIIO_PRE_RECORD();
925
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
926 927
    MPIIO_POST_RECORD();

928 929
    return(ret);
}
930 931 932 933 934 935 936 937 938
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
        MPI_File_iwrite_at(fh, offset, buf, count, datatype, request))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
        MPI_File_iwrite_at(fh, offset, buf, count, datatype, request))
#endif
939

940
int DARSHAN_DECL(MPI_File_iread_shared)(MPI_File fh, void * buf, int count,
941 942 943 944 945
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
{
    int ret;
    double tm1, tm2;

946
    MAP_OR_FAIL(PMPI_File_iread_shared);
947

948
    tm1 = darshan_core_wtime();
949
    ret = __real_PMPI_File_iread_shared(fh, buf, count,
950 951 952
        datatype, request);
    tm2 = darshan_core_wtime();

953
    MPIIO_PRE_RECORD();
954
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
955 956
    MPIIO_POST_RECORD();

957 958
    return(ret);
}
959
DARSHAN_WRAPPER_MAP(PMPI_File_iread_shared, int, (MPI_File fh, void * buf, int count,
960
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
961
        MPI_File_iread_shared(fh, buf, count, datatype, request))
962

963
#ifdef HAVE_MPIIO_CONST
964
int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, const void * buf, int count,
965 966
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#else
967
int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, void * buf, int count,
968 969 970 971 972 973
    MPI_Datatype datatype, __D_MPI_REQUEST * request)
#endif
{
    int ret;
    double tm1, tm2;

974
    MAP_OR_FAIL(PMPI_File_iwrite_shared);
975

976
    tm1 = darshan_core_wtime();
977
    ret = __real_PMPI_File_iwrite_shared(fh, buf, count,
978 979 980
        datatype, request);
    tm2 = darshan_core_wtime();

981
    MPIIO_PRE_RECORD();
982
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
983 984
    MPIIO_POST_RECORD();

985 986
    return(ret);
}
987 988 989 990 991 992 993 994 995
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite_shared(fh, buf, count, datatype, request))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, __D_MPI_REQUEST * request),
        MPI_File_iwrite_shared(fh, buf, count, datatype, request))
#endif
996

997
int DARSHAN_DECL(MPI_File_sync)(MPI_File fh)
998 999
{
    int ret;
1000
    struct mpiio_file_record_ref *rec_ref;
1001 1002
    double tm1, tm2;

1003
    MAP_OR_FAIL(PMPI_File_sync);
1004

1005
    tm1 = darshan_core_wtime();
1006
    ret = __real_PMPI_File_sync(fh);
1007 1008 1009 1010
    tm2 = darshan_core_wtime();

    if(ret == MPI_SUCCESS)
    {
1011 1012 1013 1014
        MPIIO_PRE_RECORD();
        rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash,
            &fh, sizeof(MPI_File));
        if(rec_ref)
1015
        {
1016
            rec_ref->file_rec->counters[MPIIO_SYNCS] += 1;
1017
            DARSHAN_TIMER_INC_NO_OVERLAP(
1018 1019
                rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME],
                tm1, tm2, rec_ref->last_write_end);
1020
        }
1021
        MPIIO_POST_RECORD();
1022 1023 1024 1025
    }

    return(ret);
}
1026
DARSHAN_WRAPPER_MAP(PMPI_File_sync, int, (MPI_File fh), MPI_File_sync(fh))
1027 1028

#ifdef HAVE_MPIIO_CONST
1029
int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
1030 1031
    MPI_Datatype filetype, const char *datarep, MPI_Info info)
#else
1032
int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
1033 1034 1035 1036
    MPI_Datatype filetype, char *datarep, MPI_Info info)
#endif
{
    int ret;
1037
    struct mpiio_file_record_ref *rec_ref;
1038 1039
    double tm1, tm2;

1040
    MAP_OR_FAIL(PMPI_File_set_view);
1041

1042
    tm1 = darshan_core_wtime();
1043
    ret = __real_PMPI_File_set_view(fh, disp, etype, filetype,
1044 1045 1046 1047 1048
        datarep, info);
    tm2 = darshan_core_wtime();

    if(ret == MPI_SUCCESS)
    {
1049 1050 1051 1052
        MPIIO_PRE_RECORD();
        rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash,
            &fh, sizeof(MPI_File));
        if(rec_ref)
1053
        {
1054
            rec_ref->file_rec->counters[MPIIO_VIEWS] += 1;
1055 1056
            if(info != MPI_INFO_NULL)
            {
1057
                rec_ref->file_rec->counters[MPIIO_HINTS] += 1;
1058
                DARSHAN_TIMER_INC_NO_OVERLAP(
1059 1060
                    rec_ref->file_rec->fcounters[MPIIO_F_META_TIME],
                    tm1, tm2, rec_ref->last_meta_end);
1061 1062
           }
        }
1063
        MPIIO_POST_RECORD();
1064 1065 1066 1067
    }

    return(ret);
}
1068 1069 1070 1071 1072 1073 1074
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
    MPI_Datatype filetype, const char *datarep, MPI_Info info), MPI_File_set_view(fh, disp, etype, filetype, datarep, info))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
    MPI_Datatype filetype, char *datarep, MPI_Info info), MPI_File_set_view(fh, disp, etype, filetype, datarep, info))
#endif
1075

1076
int DARSHAN_DECL(MPI_File_close)(MPI_File *fh)
1077 1078
{
    int ret;
1079
    struct mpiio_file_record_ref *rec_ref;
1080
    MPI_File tmp_fh = *fh;
1081 1082
    double tm1, tm2;

1083
    MAP_OR_FAIL(PMPI_File_close);
1084

1085
    tm1 = darshan_core_wtime();
1086
    ret = __real_PMPI_File_close(fh);
1087 1088
    tm2 = darshan_core_wtime();

Shane Snyder's avatar