darshan-mpiio.c 60.6 KB
Newer Older
Philip Carns's avatar
Philip Carns committed
1
/*
Shane Snyder's avatar
Shane Snyder committed
2 3 4
 * Copyright (C) 2015 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
Philip Carns's avatar
Philip Carns committed
5 6
 */

7 8 9
#define _XOPEN_SOURCE 500
#define _GNU_SOURCE

Philip Carns's avatar
Philip Carns committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include "darshan-runtime-config.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#include <errno.h>
#include <search.h>
#include <assert.h>
#include <pthread.h>

#include "darshan.h"
Shane Snyder's avatar
Shane Snyder committed
26
#include "darshan-dynamic.h"
Philip Carns's avatar
Philip Carns committed
27

28 29 30 31
DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
32
#ifdef HAVE_MPIIO_CONST
33
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
34
#else
35
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
36 37
#endif
#ifdef HAVE_MPIIO_CONST
38
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
39
#else
40
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
41 42
#endif
#ifdef HAVE_MPIIO_CONST
43
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
44
#else
45
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
46 47
#endif
#ifdef HAVE_MPIIO_CONST
48
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
49
#else
50
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
51
#endif
52 53 54 55 56 57 58 59 60
DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
61
#ifdef HAVE_MPIIO_CONST
62
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
63
#else
64
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
65
#endif
66
DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
67
#ifdef HAVE_MPIIO_CONST
68
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
69
#else
70
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
71 72
#endif
#ifdef HAVE_MPIIO_CONST
73
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
74
#else
75
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
76 77
#endif
#ifdef HAVE_MPIIO_CONST
78
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
79
#else
80
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
81 82
#endif
#ifdef HAVE_MPIIO_CONST
83
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
84
#else
85
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
86 87
#endif
#ifdef HAVE_MPIIO_CONST
88
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
89
#else
90
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
91 92
#endif
#ifdef HAVE_MPIIO_CONST
93
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
94
#else
95
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
96 97
#endif
#ifdef HAVE_MPIIO_CONST
98
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
99
#else
100
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
101 102
#endif
#ifdef HAVE_MPIIO_CONST
103
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
104
#else
105
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
106 107
#endif
#ifdef HAVE_MPIIO_CONST
108
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
109
#else
110
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
111 112
#endif

113
/* The mpiio_file_record_ref structure maintains necessary runtime metadata
114
 * for the MPIIO file record (darshan_mpiio_file structure, defined in
115
 * darshan-mpiio-log-format.h) pointed to by 'file_rec'. This metadata
116 117 118 119 120 121
 * assists with the instrumenting of specific statistics in the file record.
 *
 * RATIONALE: the MPIIO module needs to track some stateful, volatile 
 * information about each open file (like the current file offset, most recent 
 * access time, etc.) to aid in instrumentation, but this information can't be
 * stored in the darshan_mpiio_file struct because we don't want it to appear in
122 123 124 125
 * the final darshan log file.  We therefore associate a mpiio_file_record_ref
 * struct with each darshan_mpiio_file struct in order to track this information
 * (i.e., the mapping between mpiio_file_record_ref structs to darshan_mpiio_file
 * structs is one-to-one).
126
 *
127 128 129 130 131 132 133
 * NOTE: we use the 'darshan_record_ref' interface (in darshan-common) to
 * associate different types of handles with this mpiio_file_record_ref struct.
 * This allows us to index this struct (and the underlying file record) by using
 * either the corresponding Darshan record identifier (derived from the filename)
 * or by a generated MPI file handle, for instance. So, while there should only
 * be a single Darshan record identifier that indexes a mpiio_file_record_ref,
 * there could be multiple open file handles that index it.
134
 */
135
struct mpiio_file_record_ref
Philip Carns's avatar
Philip Carns committed
136
{
137
    struct darshan_mpiio_file *file_rec;
Shane Snyder's avatar
Shane Snyder committed
138
    enum darshan_io_type last_io_type;
139 140 141
    double last_meta_end;
    double last_read_end;
    double last_write_end;
142 143
    void *access_root;
    int access_count;
Philip Carns's avatar
Philip Carns committed
144 145
};

146 147 148 149
/* The mpiio_runtime structure maintains necessary state for storing
 * MPI-IO file records and for coordinating with darshan-core at 
 * shutdown time.
 */
Philip Carns's avatar
Philip Carns committed
150 151
struct mpiio_runtime
{
152 153 154
    void *rec_id_hash;
    void *fh_hash;
    int file_rec_count;
Philip Carns's avatar
Philip Carns committed
155 156
};

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void mpiio_runtime_initialize(
    void);
static struct mpiio_file_record_ref *mpiio_track_new_file_record(
    darshan_record_id rec_id, const char *path);
static void mpiio_finalize_file_records(
    void *rec_ref_p);
static void mpiio_record_reduction_op(
    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
static void mpiio_shared_record_variance(
    MPI_Comm mod_comm, struct darshan_mpiio_file *inrec_array,
    struct darshan_mpiio_file *outrec_array, int shared_rec_count);
static void mpiio_cleanup_runtime(
    void);

static void mpiio_shutdown(
    MPI_Comm mod_comm, darshan_record_id *shared_recs,
    int shared_rec_count, void **mpiio_buf, int *mpiio_buf_sz);

175 176
/* extern DXT function defs */
extern void dxt_mpiio_write(darshan_record_id rec_id, int64_t length,
177
    double start_time, double end_time);
178
extern void dxt_mpiio_read(darshan_record_id rec_id, int64_t length,
179 180
    double start_time, double end_time);

Philip Carns's avatar
Philip Carns committed
181 182 183
static struct mpiio_runtime *mpiio_runtime = NULL;
static pthread_mutex_t mpiio_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static int my_rank = -1;
184
static int enable_dxt_io_trace = 0;
Philip Carns's avatar
Philip Carns committed
185

186 187 188
#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)

189 190
#define MPIIO_PRE_RECORD() do { \
    MPIIO_LOCK(); \
191
    if(!darshan_core_disabled_instrumentation()) { \
192 193 194
        if(!mpiio_runtime) { \
            mpiio_runtime_initialize(); \
        } \
195
        if(mpiio_runtime) break; \
196
    } \
197 198
    MPIIO_UNLOCK(); \
    return(ret); \
199 200 201 202 203 204
} while(0)

#define MPIIO_POST_RECORD() do { \
    MPIIO_UNLOCK(); \
} while(0)

205
#define MPIIO_RECORD_OPEN(__ret, __path, __fh, __comm, __mode, __info, __tm1, __tm2) do { \
206 207 208
    darshan_record_id rec_id; \
    struct mpiio_file_record_ref *rec_ref; \
    char *newpath; \
209 210
    int comm_size; \
    if(__ret != MPI_SUCCESS) break; \
211 212 213 214 215 216 217 218
    newpath = darshan_clean_file_path(__path); \
    if(!newpath) newpath = (char *)__path; \
    if(darshan_core_excluded_path(newpath)) { \
        if(newpath != __path) free(newpath); \
        break; \
    } \
    rec_id = darshan_core_gen_record_id(newpath); \
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->rec_id_hash, &rec_id, sizeof(darshan_record_id)); \
219
    if(!rec_ref) rec_ref = mpiio_track_new_file_record(rec_id, newpath); \
220 221 222
    if(!rec_ref) { \
        if(newpath != __path) free(newpath); \
        break; \
223
    } \
224
    rec_ref->file_rec->counters[MPIIO_MODE] = __mode; \
225
    PMPI_Comm_size(__comm, &comm_size); \
226
    if(comm_size == 1) \
227
        rec_ref->file_rec->counters[MPIIO_INDEP_OPENS] += 1; \
228
    else \
229
        rec_ref->file_rec->counters[MPIIO_COLL_OPENS] += 1; \
230
    if(__info != MPI_INFO_NULL) \
231
        rec_ref->file_rec->counters[MPIIO_HINTS] += 1; \
232 233 234 235
    if(rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] > __tm1) \
        rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_OPEN_END_TIMESTAMP] = __tm2; \
236 237 238 239
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_META_TIME], \
        __tm1, __tm2, rec_ref->last_meta_end); \
    darshan_add_record_ref(&(mpiio_runtime->fh_hash), &__fh, sizeof(MPI_File), rec_ref); \
    if(newpath != __path) free(newpath); \
240 241
} while(0)

242
#define MPIIO_RECORD_READ(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
243
    struct mpiio_file_record_ref *rec_ref; \
244
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
245
    double __elapsed = __tm2-__tm1; \
246
    if(__ret != MPI_SUCCESS) break; \
247 248
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
249
    PMPI_Type_size(__datatype, &size);  \
250
    size = size * __count; \
251
    /* DXT to record detailed read tracing information */ \
252 253
    if(enable_dxt_io_trace) { \
        dxt_mpiio_read(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
254
    } \
255 256 257 258 259 260 261 262 263
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_READ_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_READ] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_WRITE) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_READ; \
264 265
    if(rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] > __tm1) \
266 267 268 269 270 271 272
        rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_READ_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_READ_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_READ_TIME], \
        __tm1, __tm2, rec_ref->last_read_end); \
273
} while(0)
274

275
#define MPIIO_RECORD_WRITE(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
276
    struct mpiio_file_record_ref *rec_ref; \
277
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
278 279
    double __elapsed = __tm2-__tm1; \
    if(__ret != MPI_SUCCESS) break; \
280 281
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
282
    PMPI_Type_size(__datatype, &size);  \
283
    size = size * __count; \
284
     /* DXT to record detailed write tracing information */ \
285 286
    if(enable_dxt_io_trace) { \
        dxt_mpiio_write(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
287
    } \
288 289 290 291 292 293 294 295 296
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_WRITE_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_WRITTEN] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_READ) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_WRITE; \
297
    if(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] == 0 || \
298
     rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] > __tm1) \
299 300 301 302 303 304 305
        rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_WRITE_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_WRITE_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME], \
        __tm1, __tm2, rec_ref->last_write_end); \
306 307
} while(0)

308 309 310
/**********************************************************
 *        Wrappers for MPI-IO functions of interest       * 
 **********************************************************/
Philip Carns's avatar
Philip Carns committed
311

Philip Carns's avatar
Philip Carns committed
312
#ifdef HAVE_MPIIO_CONST
313
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
314
#else
315
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
316 317 318
#endif
{
    int ret;
319
    MPI_File tmp_fh;
Philip Carns's avatar
Philip Carns committed
320 321 322
    char* tmp;
    double tm1, tm2;

323
    MAP_OR_FAIL(PMPI_File_open);
324

Philip Carns's avatar
Philip Carns committed
325
    tm1 = darshan_core_wtime();
326
    ret = __real_PMPI_File_open(comm, filename, amode, info, fh);
Philip Carns's avatar
Philip Carns committed
327 328
    tm2 = darshan_core_wtime();

329 330 331 332 333 334 335 336
    /* use ROMIO approach to strip prefix if present */
    /* strip off prefix if there is one, but only skip prefixes
     * if they are greater than length one to allow for windows
     * drive specifications (e.g. c:\...) 
     */
    tmp = strchr(filename, ':');
    if (tmp > filename + 1) {
        filename = tmp + 1;
Philip Carns's avatar
Philip Carns committed
337 338
    }

339 340 341 342 343
    MPIIO_PRE_RECORD();
    tmp_fh = *fh;
    MPIIO_RECORD_OPEN(ret, filename, tmp_fh, comm, amode, info, tm1, tm2);
    MPIIO_POST_RECORD();

Philip Carns's avatar
Philip Carns committed
344 345
    return(ret);
}
346 347 348 349 350
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh)) 
#endif
351

352
int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
353 354 355 356 357
    MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

358
    MAP_OR_FAIL(PMPI_File_read);
359

360
    tm1 = darshan_core_wtime();
361
    ret = __real_PMPI_File_read(fh, buf, count, datatype, status);
362 363
    tm2 = darshan_core_wtime();

364
    MPIIO_PRE_RECORD();
365
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
366 367
    MPIIO_POST_RECORD();

368 369
    return(ret);
}
370 371
DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_read(fh,buf,count,datatype,status))
372 373

#ifdef HAVE_MPIIO_CONST
374
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
375 376
    MPI_Datatype datatype, MPI_Status *status)
#else
377
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
378 379 380 381 382 383
    MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

384
    MAP_OR_FAIL(PMPI_File_write);
385

386
    tm1 = darshan_core_wtime();
387
    ret = __real_PMPI_File_write(fh, buf, count, datatype, status);
388 389
    tm2 = darshan_core_wtime();

390
    MPIIO_PRE_RECORD();
391
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
392 393
    MPIIO_POST_RECORD();

394 395
    return(ret);
}
396 397 398 399 400 401 402
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#endif
403

404
int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
405 406 407 408 409
    int count, MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

410
    MAP_OR_FAIL(PMPI_File_read_at);
411

412
    tm1 = darshan_core_wtime();
413
    ret = __real_PMPI_File_read_at(fh, offset, buf,
414 415 416
        count, datatype, status);
    tm2 = darshan_core_wtime();

417
    MPIIO_PRE_RECORD();
418
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
419 420
    MPIIO_POST_RECORD();

421 422
    return(ret);
}
423 424
DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at(fh, offset, buf, count, datatype, status))
425 426

#ifdef HAVE_MPIIO_CONST
427
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
428 429
    int count, MPI_Datatype datatype, MPI_Status *status)
#else
430
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
431 432 433 434 435 436
    int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

437
    MAP_OR_FAIL(PMPI_File_write_at);
438

439
    tm1 = darshan_core_wtime();
440
    ret = __real_PMPI_File_write_at(fh, offset, buf,
441 442 443
        count, datatype, status);
    tm2 = darshan_core_wtime();

444
    MPIIO_PRE_RECORD();
445
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
446 447
    MPIIO_POST_RECORD();

448 449
    return(ret);
}
450 451 452 453 454 455 456
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#endif
457

458
int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
459 460 461 462
{
    int ret;
    double tm1, tm2;

Rob Latham's avatar
Rob Latham committed
463
    MAP_OR_FAIL(PMPI_File_read_all);
464

465
    tm1 = darshan_core_wtime();
466
    ret = __real_PMPI_File_read_all(fh, buf, count,
467 468 469
        datatype, status);
    tm2 = darshan_core_wtime();

470
    MPIIO_PRE_RECORD();
471
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
472 473
    MPIIO_POST_RECORD();

474 475
    return(ret);
}
476 477
DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_all(fh,buf,count,datatype,status))
478 479

#ifdef HAVE_MPIIO_CONST
480
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
481
#else
482
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
483 484 485 486 487
#endif
{
    int ret;
    double tm1, tm2;

488
    MAP_OR_FAIL(PMPI_File_write_all);
489

490
    tm1 = darshan_core_wtime();
491
    ret = __real_PMPI_File_write_all(fh, buf, count,
492 493 494
        datatype, status);
    tm2 = darshan_core_wtime();

495
    MPIIO_PRE_RECORD();
496
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
497 498
    MPIIO_POST_RECORD();

499 500
    return(ret);
}
501 502 503 504 505 506 507
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#endif
508

509
int DARSHAN_DECL(MPI_File_read_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
510 511 512 513 514
    int count, MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

515
    MAP_OR_FAIL(PMPI_File_read_at_all);
516

517
    tm1 = darshan_core_wtime();
518
    ret = __real_PMPI_File_read_at_all(fh, offset, buf,
519 520 521
        count, datatype, status);
    tm2 = darshan_core_wtime();

522
    MPIIO_PRE_RECORD();
523
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
524 525
    MPIIO_POST_RECORD();

526 527
    return(ret);
}
528
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
529
    int count, MPI_Datatype datatype, MPI_Status * status),
530
        MPI_File_read_at_all(fh,offset,buf,count,datatype,status))
531

532
#ifdef HAVE_MPIIO_CONST
533
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
534 535
    int count, MPI_Datatype datatype, MPI_Status * status)
#else
536
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
537 538 539 540 541 542
    int count, MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

543
    MAP_OR_FAIL(PMPI_File_write_at_all);
544

545
    tm1 = darshan_core_wtime();
546
    ret = __real_PMPI_File_write_at_all(fh, offset, buf,
547 548 549
        count, datatype, status);
    tm2 = darshan_core_wtime();

550
    MPIIO_PRE_RECORD();
551
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
552 553
    MPIIO_POST_RECORD();

554 555
    return(ret);
}
556 557 558 559 560 561 562 563 564
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#endif
565

566
int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
567 568 569 570
{
    int ret;
    double tm1, tm2;

571
    MAP_OR_FAIL(PMPI_File_read_shared);
572

573
    tm1 = darshan_core_wtime();
574
    ret = __real_PMPI_File_read_shared(fh, buf, count,
575 576 577
        datatype, status);
    tm2 = darshan_core_wtime();

578
    MPIIO_PRE_RECORD();
579
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
580 581
    MPIIO_POST_RECORD();

582 583
    return(ret);
}
584 585
DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_shared(fh, buf, count, datatype, status))
586 587

#ifdef HAVE_MPIIO_CONST
588
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
589
#else
590
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
591 592 593 594 595
#endif
{
    int ret;
    double tm1, tm2;

596
    MAP_OR_FAIL(PMPI_File_write_shared);
597

598
    tm1 = darshan_core_wtime();
599
    ret = __real_PMPI_File_write_shared(fh, buf, count,
600 601 602
        datatype, status);
    tm2 = darshan_core_wtime();

603
    MPIIO_PRE_RECORD();
604
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
605 606
    MPIIO_POST_RECORD();

607 608
    return(ret);
}
609 610 611 612 613 614 615
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#endif
616

617

618
int DARSHAN_DECL(MPI_File_read_ordered)(MPI_File fh, void * buf, int count,
619 620 621 622 623
    MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

624
    MAP_OR_FAIL(PMPI_File_read_ordered);
625

626
    tm1 = darshan_core_wtime();
627
    ret = __real_PMPI_File_read_ordered(fh, buf, count,
628 629 630
        datatype, status);
    tm2 = darshan_core_wtime();

631
    MPIIO_PRE_RECORD();
632
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
633 634
    MPIIO_POST_RECORD();

635 636
    return(ret);
}
637
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int count,
638
    MPI_Datatype datatype, MPI_Status * status),
639
        MPI_File_read_ordered(fh, buf, count, datatype, status))
640

641
#ifdef HAVE_MPIIO_CONST
642
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
643 644
    MPI_Datatype datatype, MPI_Status * status)
#else
645
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
646 647 648 649 650 651
    MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

652
    MAP_OR_FAIL(PMPI_File_write_ordered);
653

654
    tm1 = darshan_core_wtime();
655
    ret = __real_PMPI_File_write_ordered(fh, buf, count,
656 657 658
         datatype, status);
    tm2 = darshan_core_wtime();

659
    MPIIO_PRE_RECORD();
660
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
661 662
    MPIIO_POST_RECORD();

663 664
    return(ret);
}
665 666 667 668 669 670 671 672 673
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#endif
674

675
int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
676 677 678 679
{
    int ret;
    double tm1, tm2;

680
    MAP_OR_FAIL(PMPI_File_read_all_begin);
681

682
    tm1 = darshan_core_wtime();
683
    ret = __real_PMPI_File_read_all_begin(fh, buf, count, datatype);
684 685
    tm2 = darshan_core_wtime();

686
    MPIIO_PRE_RECORD();
687
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
688 689
    MPIIO_POST_RECORD();

690 691
    return(ret);
}
692 693
DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_all_begin(fh, buf, count, datatype))
694 695

#ifdef HAVE_MPIIO_CONST
696
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
697
#else
698
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
699 700 701 702 703
#endif
{
    int ret;
    double tm1, tm2;

704
    MAP_OR_FAIL(PMPI_File_write_all_begin);
705

706
    tm1 = darshan_core_wtime();
707
    ret = __real_PMPI_File_write_all_begin(fh, buf, count, datatype);
708 709
    tm2 = darshan_core_wtime();

710
    MPIIO_PRE_RECORD();
711
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
712 713
    MPIIO_POST_RECORD();

714 715
    return(ret);
}
716 717 718 719 720 721 722
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#endif
723

724
int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
725 726 727 728 729
    int count, MPI_Datatype datatype)
{
    int ret;
    double tm1, tm2;

730
    MAP_OR_FAIL(PMPI_File_read_at_all_begin);
731

732
    tm1 = darshan_core_wtime();
733
    ret = __real_PMPI_File_read_at_all_begin(fh, offset, buf,
734 735 736
        count, datatype);
    tm2 = darshan_core_wtime();
    
737
    MPIIO_PRE_RECORD();
738
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
739 740
    MPIIO_POST_RECORD();

741 742
    return(ret);
}
743 744 745
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_read_at_all_begin(fh, offset, buf, count,
        datatype))
746 747

#ifdef HAVE_MPIIO_CONST
748
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
749 750
    int count, MPI_Datatype datatype)
#else
751
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
752 753 754 755 756 757
    int count, MPI_Datatype datatype)
#endif
{
    int ret;
    double tm1, tm2;

758
    MAP_OR_FAIL(PMPI_File_write_at_all_begin);
759

760
    tm1 = darshan_core_wtime();
761
    ret = __real_PMPI_File_write_at_all_begin(fh, offset,
762 763 764
        buf, count, datatype);
    tm2 = darshan_core_wtime();

765
    MPIIO_PRE_RECORD();
766
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
767 768
    MPIIO_POST_RECORD();

769 770
    return(ret);
}
771 772 773 774 775 776 777
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#endif
778

779
int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
780 781 782 783
{
    int ret;
    double tm1, tm2;

784