darshan-mpiio.c 60.2 KB
Newer Older
Philip Carns's avatar
Philip Carns committed
1
/*
Shane Snyder's avatar
Shane Snyder committed
2 3 4
 * Copyright (C) 2015 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
Philip Carns's avatar
Philip Carns committed
5 6
 */

7 8 9
#define _XOPEN_SOURCE 500
#define _GNU_SOURCE

Philip Carns's avatar
Philip Carns committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include "darshan-runtime-config.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#include <errno.h>
#include <search.h>
#include <assert.h>
#include <pthread.h>

#include "darshan.h"
Shane Snyder's avatar
Shane Snyder committed
26
#include "darshan-dynamic.h"
Shane Snyder's avatar
Shane Snyder committed
27
#include "darshan-dxt.h"
Philip Carns's avatar
Philip Carns committed
28

29 30 31 32
DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
33
#ifdef HAVE_MPIIO_CONST
34
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
35
#else
36
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
37 38
#endif
#ifdef HAVE_MPIIO_CONST
39
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
40
#else
41
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
42 43
#endif
#ifdef HAVE_MPIIO_CONST
44
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
45
#else
46
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
47 48
#endif
#ifdef HAVE_MPIIO_CONST
49
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
50
#else
51
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
52
#endif
53 54 55 56 57 58 59 60 61
DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
62
#ifdef HAVE_MPIIO_CONST
63
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
64
#else
65
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
66
#endif
67
DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
68
#ifdef HAVE_MPIIO_CONST
69
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
70
#else
71
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
72 73
#endif
#ifdef HAVE_MPIIO_CONST
74
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
75
#else
76
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
77 78
#endif
#ifdef HAVE_MPIIO_CONST
79
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
80
#else
81
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
82 83
#endif
#ifdef HAVE_MPIIO_CONST
84
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
85
#else
86
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
87 88
#endif
#ifdef HAVE_MPIIO_CONST
89
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
90
#else
91
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
92 93
#endif
#ifdef HAVE_MPIIO_CONST
94
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
95
#else
96
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
97 98
#endif
#ifdef HAVE_MPIIO_CONST
99
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
100
#else
101
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
102 103
#endif
#ifdef HAVE_MPIIO_CONST
104
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
105
#else
106
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
107 108
#endif
#ifdef HAVE_MPIIO_CONST
109
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
110
#else
111
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
112 113
#endif

114
/* The mpiio_file_record_ref structure maintains necessary runtime metadata
115
 * for the MPIIO file record (darshan_mpiio_file structure, defined in
116
 * darshan-mpiio-log-format.h) pointed to by 'file_rec'. This metadata
117 118 119 120 121 122
 * assists with the instrumenting of specific statistics in the file record.
 *
 * RATIONALE: the MPIIO module needs to track some stateful, volatile 
 * information about each open file (like the current file offset, most recent 
 * access time, etc.) to aid in instrumentation, but this information can't be
 * stored in the darshan_mpiio_file struct because we don't want it to appear in
123 124 125 126
 * the final darshan log file.  We therefore associate a mpiio_file_record_ref
 * struct with each darshan_mpiio_file struct in order to track this information
 * (i.e., the mapping between mpiio_file_record_ref structs to darshan_mpiio_file
 * structs is one-to-one).
127
 *
128 129 130 131 132 133 134
 * NOTE: we use the 'darshan_record_ref' interface (in darshan-common) to
 * associate different types of handles with this mpiio_file_record_ref struct.
 * This allows us to index this struct (and the underlying file record) by using
 * either the corresponding Darshan record identifier (derived from the filename)
 * or by a generated MPI file handle, for instance. So, while there should only
 * be a single Darshan record identifier that indexes a mpiio_file_record_ref,
 * there could be multiple open file handles that index it.
135
 */
136
struct mpiio_file_record_ref
Philip Carns's avatar
Philip Carns committed
137
{
138
    struct darshan_mpiio_file *file_rec;
Shane Snyder's avatar
Shane Snyder committed
139
    enum darshan_io_type last_io_type;
140 141 142
    double last_meta_end;
    double last_read_end;
    double last_write_end;
143 144
    void *access_root;
    int access_count;
Philip Carns's avatar
Philip Carns committed
145 146
};

147 148 149 150
/* The mpiio_runtime structure maintains necessary state for storing
 * MPI-IO file records and for coordinating with darshan-core at 
 * shutdown time.
 */
Philip Carns's avatar
Philip Carns committed
151 152
struct mpiio_runtime
{
153 154 155
    void *rec_id_hash;
    void *fh_hash;
    int file_rec_count;
Philip Carns's avatar
Philip Carns committed
156 157
};

158 159 160 161 162
static void mpiio_runtime_initialize(
    void);
static struct mpiio_file_record_ref *mpiio_track_new_file_record(
    darshan_record_id rec_id, const char *path);
static void mpiio_finalize_file_records(
163
    void *rec_ref_p, void *user_ptr);
164 165 166
static void mpiio_cleanup_runtime(
    void);
#ifdef HAVE_MPI
167 168 169 170 171
static void mpiio_record_reduction_op(
    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
static void mpiio_shared_record_variance(
    MPI_Comm mod_comm, struct darshan_mpiio_file *inrec_array,
    struct darshan_mpiio_file *outrec_array, int shared_rec_count);
172 173 174 175
static void mpiio_mpi_redux(
    void *mpiio_buf, MPI_Comm mod_comm,
    darshan_record_id *shared_recs, int shared_rec_count);
#endif
176
static void mpiio_shutdown(
177
    void **mpiio_buf, int *mpiio_buf_sz);
178

Philip Carns's avatar
Philip Carns committed
179 180 181 182
static struct mpiio_runtime *mpiio_runtime = NULL;
static pthread_mutex_t mpiio_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static int my_rank = -1;

183 184 185
#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)

186 187
#define MPIIO_PRE_RECORD() do { \
    MPIIO_LOCK(); \
188
    if(!darshan_core_disabled_instrumentation()) { \
189 190 191
        if(!mpiio_runtime) { \
            mpiio_runtime_initialize(); \
        } \
192
        if(mpiio_runtime) break; \
193
    } \
194 195
    MPIIO_UNLOCK(); \
    return(ret); \
196 197 198 199 200 201
} while(0)

#define MPIIO_POST_RECORD() do { \
    MPIIO_UNLOCK(); \
} while(0)

202
#define MPIIO_RECORD_OPEN(__ret, __path, __fh, __comm, __mode, __info, __tm1, __tm2) do { \
203 204 205
    darshan_record_id rec_id; \
    struct mpiio_file_record_ref *rec_ref; \
    char *newpath; \
206 207
    int comm_size; \
    if(__ret != MPI_SUCCESS) break; \
208 209 210 211 212 213 214 215
    newpath = darshan_clean_file_path(__path); \
    if(!newpath) newpath = (char *)__path; \
    if(darshan_core_excluded_path(newpath)) { \
        if(newpath != __path) free(newpath); \
        break; \
    } \
    rec_id = darshan_core_gen_record_id(newpath); \
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->rec_id_hash, &rec_id, sizeof(darshan_record_id)); \
216
    if(!rec_ref) rec_ref = mpiio_track_new_file_record(rec_id, newpath); \
217 218 219
    if(!rec_ref) { \
        if(newpath != __path) free(newpath); \
        break; \
220
    } \
221
    rec_ref->file_rec->counters[MPIIO_MODE] = __mode; \
222
    PMPI_Comm_size(__comm, &comm_size); \
223
    if(comm_size == 1) \
224
        rec_ref->file_rec->counters[MPIIO_INDEP_OPENS] += 1; \
225
    else \
226
        rec_ref->file_rec->counters[MPIIO_COLL_OPENS] += 1; \
227
    if(__info != MPI_INFO_NULL) \
228
        rec_ref->file_rec->counters[MPIIO_HINTS] += 1; \
229 230 231 232
    if(rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] > __tm1) \
        rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_OPEN_END_TIMESTAMP] = __tm2; \
233 234 235 236
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_META_TIME], \
        __tm1, __tm2, rec_ref->last_meta_end); \
    darshan_add_record_ref(&(mpiio_runtime->fh_hash), &__fh, sizeof(MPI_File), rec_ref); \
    if(newpath != __path) free(newpath); \
237 238
} while(0)

239
#define MPIIO_RECORD_READ(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
240
    struct mpiio_file_record_ref *rec_ref; \
241
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
242
    double __elapsed = __tm2-__tm1; \
243
    if(__ret != MPI_SUCCESS) break; \
244 245
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
246
    PMPI_Type_size(__datatype, &size);  \
247
    size = size * __count; \
248
    /* DXT to record detailed read tracing information */ \
Shane Snyder's avatar
Shane Snyder committed
249
    dxt_mpiio_read(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
250 251 252 253 254 255 256 257 258
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_READ_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_READ] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_WRITE) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_READ; \
259 260
    if(rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] > __tm1) \
261 262 263 264 265 266 267
        rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_READ_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_READ_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_READ_TIME], \
        __tm1, __tm2, rec_ref->last_read_end); \
268
} while(0)
269

270
#define MPIIO_RECORD_WRITE(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
271
    struct mpiio_file_record_ref *rec_ref; \
272
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
273 274
    double __elapsed = __tm2-__tm1; \
    if(__ret != MPI_SUCCESS) break; \
275 276
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
277
    PMPI_Type_size(__datatype, &size);  \
278
    size = size * __count; \
Shane Snyder's avatar
Shane Snyder committed
279 280
    /* DXT to record detailed write tracing information */ \
    dxt_mpiio_write(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
281 282 283 284 285 286 287 288 289
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_WRITE_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_WRITTEN] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_READ) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_WRITE; \
290
    if(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] == 0 || \
291
     rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] > __tm1) \
292 293 294 295 296 297 298
        rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_WRITE_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_WRITE_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME], \
        __tm1, __tm2, rec_ref->last_write_end); \
299 300
} while(0)

301 302 303
/**********************************************************
 *        Wrappers for MPI-IO functions of interest       * 
 **********************************************************/
Philip Carns's avatar
Philip Carns committed
304

Philip Carns's avatar
Philip Carns committed
305
#ifdef HAVE_MPIIO_CONST
306
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
307
#else
308
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
309 310 311
#endif
{
    int ret;
312
    MPI_File tmp_fh;
Philip Carns's avatar
Philip Carns committed
313 314 315
    char* tmp;
    double tm1, tm2;

316
    MAP_OR_FAIL(PMPI_File_open);
317

Philip Carns's avatar
Philip Carns committed
318
    tm1 = darshan_core_wtime();
319
    ret = __real_PMPI_File_open(comm, filename, amode, info, fh);
Philip Carns's avatar
Philip Carns committed
320 321
    tm2 = darshan_core_wtime();

322 323 324 325 326 327 328 329
    /* use ROMIO approach to strip prefix if present */
    /* strip off prefix if there is one, but only skip prefixes
     * if they are greater than length one to allow for windows
     * drive specifications (e.g. c:\...) 
     */
    tmp = strchr(filename, ':');
    if (tmp > filename + 1) {
        filename = tmp + 1;
Philip Carns's avatar
Philip Carns committed
330 331
    }

332 333 334 335 336
    MPIIO_PRE_RECORD();
    tmp_fh = *fh;
    MPIIO_RECORD_OPEN(ret, filename, tmp_fh, comm, amode, info, tm1, tm2);
    MPIIO_POST_RECORD();

Philip Carns's avatar
Philip Carns committed
337 338
    return(ret);
}
339 340 341 342 343
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh)) 
#endif
344

345
int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
346 347 348 349 350
    MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

351
    MAP_OR_FAIL(PMPI_File_read);
352

353
    tm1 = darshan_core_wtime();
354
    ret = __real_PMPI_File_read(fh, buf, count, datatype, status);
355 356
    tm2 = darshan_core_wtime();

357
    MPIIO_PRE_RECORD();
358
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
359 360
    MPIIO_POST_RECORD();

361 362
    return(ret);
}
363 364
DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_read(fh,buf,count,datatype,status))
365 366

#ifdef HAVE_MPIIO_CONST
367
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
368 369
    MPI_Datatype datatype, MPI_Status *status)
#else
370
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
371 372 373 374 375 376
    MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

377
    MAP_OR_FAIL(PMPI_File_write);
378

379
    tm1 = darshan_core_wtime();
380
    ret = __real_PMPI_File_write(fh, buf, count, datatype, status);
381 382
    tm2 = darshan_core_wtime();

383
    MPIIO_PRE_RECORD();
384
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
385 386
    MPIIO_POST_RECORD();

387 388
    return(ret);
}
389 390 391 392 393 394 395
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#endif
396

397
int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
398 399 400 401 402
    int count, MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

403
    MAP_OR_FAIL(PMPI_File_read_at);
404

405
    tm1 = darshan_core_wtime();
406
    ret = __real_PMPI_File_read_at(fh, offset, buf,
407 408 409
        count, datatype, status);
    tm2 = darshan_core_wtime();

410
    MPIIO_PRE_RECORD();
411
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
412 413
    MPIIO_POST_RECORD();

414 415
    return(ret);
}
416 417
DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at(fh, offset, buf, count, datatype, status))
418 419

#ifdef HAVE_MPIIO_CONST
420
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
421 422
    int count, MPI_Datatype datatype, MPI_Status *status)
#else
423
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
424 425 426 427 428 429
    int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

430
    MAP_OR_FAIL(PMPI_File_write_at);
431

432
    tm1 = darshan_core_wtime();
433
    ret = __real_PMPI_File_write_at(fh, offset, buf,
434 435 436
        count, datatype, status);
    tm2 = darshan_core_wtime();

437
    MPIIO_PRE_RECORD();
438
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
439 440
    MPIIO_POST_RECORD();

441 442
    return(ret);
}
443 444 445 446 447 448 449
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#endif
450

451
int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
452 453 454 455
{
    int ret;
    double tm1, tm2;

Rob Latham's avatar
Rob Latham committed
456
    MAP_OR_FAIL(PMPI_File_read_all);
457

458
    tm1 = darshan_core_wtime();
459
    ret = __real_PMPI_File_read_all(fh, buf, count,
460 461 462
        datatype, status);
    tm2 = darshan_core_wtime();

463
    MPIIO_PRE_RECORD();
464
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
465 466
    MPIIO_POST_RECORD();

467 468
    return(ret);
}
469 470
DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_all(fh,buf,count,datatype,status))
471 472

#ifdef HAVE_MPIIO_CONST
473
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
474
#else
475
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
476 477 478 479 480
#endif
{
    int ret;
    double tm1, tm2;

481
    MAP_OR_FAIL(PMPI_File_write_all);
482

483
    tm1 = darshan_core_wtime();
484
    ret = __real_PMPI_File_write_all(fh, buf, count,
485 486 487
        datatype, status);
    tm2 = darshan_core_wtime();

488
    MPIIO_PRE_RECORD();
489
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
490 491
    MPIIO_POST_RECORD();

492 493
    return(ret);
}
494 495 496 497 498 499 500
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#endif
501

502
int DARSHAN_DECL(MPI_File_read_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
503 504 505 506 507
    int count, MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

508
    MAP_OR_FAIL(PMPI_File_read_at_all);
509

510
    tm1 = darshan_core_wtime();
511
    ret = __real_PMPI_File_read_at_all(fh, offset, buf,
512 513 514
        count, datatype, status);
    tm2 = darshan_core_wtime();

515
    MPIIO_PRE_RECORD();
516
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
517 518
    MPIIO_POST_RECORD();

519 520
    return(ret);
}
521
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
522
    int count, MPI_Datatype datatype, MPI_Status * status),
523
        MPI_File_read_at_all(fh,offset,buf,count,datatype,status))
524

525
#ifdef HAVE_MPIIO_CONST
526
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
527 528
    int count, MPI_Datatype datatype, MPI_Status * status)
#else
529
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
530 531 532 533 534 535
    int count, MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

536
    MAP_OR_FAIL(PMPI_File_write_at_all);
537

538
    tm1 = darshan_core_wtime();
539
    ret = __real_PMPI_File_write_at_all(fh, offset, buf,
540 541 542
        count, datatype, status);
    tm2 = darshan_core_wtime();

543
    MPIIO_PRE_RECORD();
544
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
545 546
    MPIIO_POST_RECORD();

547 548
    return(ret);
}
549 550 551 552 553 554 555 556 557
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#endif
558

559
int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
560 561 562 563
{
    int ret;
    double tm1, tm2;

564
    MAP_OR_FAIL(PMPI_File_read_shared);
565

566
    tm1 = darshan_core_wtime();
567
    ret = __real_PMPI_File_read_shared(fh, buf, count,
568 569 570
        datatype, status);
    tm2 = darshan_core_wtime();

571
    MPIIO_PRE_RECORD();
572
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
573 574
    MPIIO_POST_RECORD();

575 576
    return(ret);
}
577 578
DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_shared(fh, buf, count, datatype, status))
579 580

#ifdef HAVE_MPIIO_CONST
581
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
582
#else
583
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
584 585 586 587 588
#endif
{
    int ret;
    double tm1, tm2;

589
    MAP_OR_FAIL(PMPI_File_write_shared);
590

591
    tm1 = darshan_core_wtime();
592
    ret = __real_PMPI_File_write_shared(fh, buf, count,
593 594 595
        datatype, status);
    tm2 = darshan_core_wtime();

596
    MPIIO_PRE_RECORD();
597
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
598 599
    MPIIO_POST_RECORD();

600 601
    return(ret);
}
602 603 604 605 606 607 608
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#endif
609

610

611
int DARSHAN_DECL(MPI_File_read_ordered)(MPI_File fh, void * buf, int count,
612 613 614 615 616
    MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

617
    MAP_OR_FAIL(PMPI_File_read_ordered);
618

619
    tm1 = darshan_core_wtime();
620
    ret = __real_PMPI_File_read_ordered(fh, buf, count,
621 622 623
        datatype, status);
    tm2 = darshan_core_wtime();

624
    MPIIO_PRE_RECORD();
625
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
626 627
    MPIIO_POST_RECORD();

628 629
    return(ret);
}
630
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int count,
631
    MPI_Datatype datatype, MPI_Status * status),
632
        MPI_File_read_ordered(fh, buf, count, datatype, status))
633

634
#ifdef HAVE_MPIIO_CONST
635
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
636 637
    MPI_Datatype datatype, MPI_Status * status)
#else
638
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
639 640 641 642 643 644
    MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

645
    MAP_OR_FAIL(PMPI_File_write_ordered);
646

647
    tm1 = darshan_core_wtime();
648
    ret = __real_PMPI_File_write_ordered(fh, buf, count,
649 650 651
         datatype, status);
    tm2 = darshan_core_wtime();

652
    MPIIO_PRE_RECORD();
653
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
654 655
    MPIIO_POST_RECORD();

656 657
    return(ret);
}
658 659 660 661 662 663 664 665 666
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#endif
667

668
int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
669 670 671 672
{
    int ret;
    double tm1, tm2;

673
    MAP_OR_FAIL(PMPI_File_read_all_begin);
674

675
    tm1 = darshan_core_wtime();
676
    ret = __real_PMPI_File_read_all_begin(fh, buf, count, datatype);
677 678
    tm2 = darshan_core_wtime();

679
    MPIIO_PRE_RECORD();
680
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
681 682
    MPIIO_POST_RECORD();

683 684
    return(ret);
}
685 686
DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_all_begin(fh, buf, count, datatype))
687 688

#ifdef HAVE_MPIIO_CONST
689
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
690
#else
691
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
692 693 694 695 696
#endif
{
    int ret;
    double tm1, tm2;

697
    MAP_OR_FAIL(PMPI_File_write_all_begin);
698

699
    tm1 = darshan_core_wtime();
700
    ret = __real_PMPI_File_write_all_begin(fh, buf, count, datatype);
701 702
    tm2 = darshan_core_wtime();

703
    MPIIO_PRE_RECORD();
704
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
705 706
    MPIIO_POST_RECORD();

707 708
    return(ret);
}
709 710 711 712 713 714 715
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#endif
716

717
int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
718 719 720 721 722
    int count, MPI_Datatype datatype)
{
    int ret;
    double tm1, tm2;

723
    MAP_OR_FAIL(PMPI_File_read_at_all_begin);
724

725
    tm1 = darshan_core_wtime();
726
    ret = __real_PMPI_File_read_at_all_begin(fh, offset, buf,
727 728 729
        count, datatype);
    tm2 = darshan_core_wtime();
    
730
    MPIIO_PRE_RECORD();
731
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
732 733
    MPIIO_POST_RECORD();

734 735
    return(ret);
}
736 737 738
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_read_at_all_begin(fh, offset, buf, count,
        datatype))
739 740

#ifdef HAVE_MPIIO_CONST
741
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
742 743
    int count, MPI_Datatype datatype)
#else
744
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
745 746 747 748 749 750
    int count, MPI_Datatype datatype)
#endif
{
    int ret;
    double tm1, tm2;

751
    MAP_OR_FAIL(PMPI_File_write_at_all_begin);
752

753
    tm1 = darshan_core_wtime();
754
    ret = __real_PMPI_File_write_at_all_begin(fh, offset,
755 756 757
        buf, count, datatype);
    tm2 = darshan_core_wtime();

758
    MPIIO_PRE_RECORD();
759
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
760 761
    MPIIO_POST_RECORD();

762 763
    return(ret);
}
764 765 766 767 768 769 770
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#endif
771

772
int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
773 774 775 776
{
    int ret;
    double tm1, tm2;

777
    MAP_OR_FAIL(PMPI_File_read_ordered_begin);
778

779
    tm1 = darshan_core_wtime();