darshan-mpiio.c 60.2 KB
Newer Older
Philip Carns's avatar
Philip Carns committed
1
/*
Shane Snyder's avatar
Shane Snyder committed
2 3 4
 * Copyright (C) 2015 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
Philip Carns's avatar
Philip Carns committed
5 6
 */

7 8 9
#define _XOPEN_SOURCE 500
#define _GNU_SOURCE

Philip Carns's avatar
Philip Carns committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include "darshan-runtime-config.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#include <errno.h>
#include <search.h>
#include <assert.h>
#include <pthread.h>

#include "darshan.h"
Shane Snyder's avatar
Shane Snyder committed
26
#include "darshan-dynamic.h"
Shane Snyder's avatar
Shane Snyder committed
27
#include "darshan-dxt.h"
Philip Carns's avatar
Philip Carns committed
28

29 30 31 32
DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
33
#ifdef HAVE_MPIIO_CONST
34
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
35
#else
36
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
37 38
#endif
#ifdef HAVE_MPIIO_CONST
39
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
40
#else
41
DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
42 43
#endif
#ifdef HAVE_MPIIO_CONST
44
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
45
#else
46
DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
47 48
#endif
#ifdef HAVE_MPIIO_CONST
49
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
50
#else
51
DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
52
#endif
53 54 55 56 57 58 59 60 61
DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
62
#ifdef HAVE_MPIIO_CONST
63
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
64
#else
65
DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
66
#endif
67
DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
68
#ifdef HAVE_MPIIO_CONST
69
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
70
#else
71
DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
72 73
#endif
#ifdef HAVE_MPIIO_CONST
74
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
75
#else
76
DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
77 78
#endif
#ifdef HAVE_MPIIO_CONST
79
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
80
#else
81
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
82 83
#endif
#ifdef HAVE_MPIIO_CONST
84
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
85
#else
86
DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
87 88
#endif
#ifdef HAVE_MPIIO_CONST
89
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
90
#else
91
DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
92 93
#endif
#ifdef HAVE_MPIIO_CONST
94
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
95
#else
96
DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
97 98
#endif
#ifdef HAVE_MPIIO_CONST
99
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
100
#else
101
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
102 103
#endif
#ifdef HAVE_MPIIO_CONST
104
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
105
#else
106
DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
107 108
#endif
#ifdef HAVE_MPIIO_CONST
109
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
110
#else
111
DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
112 113
#endif

114
/* The mpiio_file_record_ref structure maintains necessary runtime metadata
115
 * for the MPIIO file record (darshan_mpiio_file structure, defined in
116
 * darshan-mpiio-log-format.h) pointed to by 'file_rec'. This metadata
117 118 119 120 121 122
 * assists with the instrumenting of specific statistics in the file record.
 *
 * RATIONALE: the MPIIO module needs to track some stateful, volatile 
 * information about each open file (like the current file offset, most recent 
 * access time, etc.) to aid in instrumentation, but this information can't be
 * stored in the darshan_mpiio_file struct because we don't want it to appear in
123 124 125 126
 * the final darshan log file.  We therefore associate a mpiio_file_record_ref
 * struct with each darshan_mpiio_file struct in order to track this information
 * (i.e., the mapping between mpiio_file_record_ref structs to darshan_mpiio_file
 * structs is one-to-one).
127
 *
128 129 130 131 132 133 134
 * NOTE: we use the 'darshan_record_ref' interface (in darshan-common) to
 * associate different types of handles with this mpiio_file_record_ref struct.
 * This allows us to index this struct (and the underlying file record) by using
 * either the corresponding Darshan record identifier (derived from the filename)
 * or by a generated MPI file handle, for instance. So, while there should only
 * be a single Darshan record identifier that indexes a mpiio_file_record_ref,
 * there could be multiple open file handles that index it.
135
 */
136
struct mpiio_file_record_ref
Philip Carns's avatar
Philip Carns committed
137
{
138
    struct darshan_mpiio_file *file_rec;
Shane Snyder's avatar
Shane Snyder committed
139
    enum darshan_io_type last_io_type;
140 141 142
    double last_meta_end;
    double last_read_end;
    double last_write_end;
143 144
    void *access_root;
    int access_count;
Philip Carns's avatar
Philip Carns committed
145 146
};

147 148 149 150
/* The mpiio_runtime structure maintains necessary state for storing
 * MPI-IO file records and for coordinating with darshan-core at 
 * shutdown time.
 */
Philip Carns's avatar
Philip Carns committed
151 152
struct mpiio_runtime
{
153 154 155
    void *rec_id_hash;
    void *fh_hash;
    int file_rec_count;
Philip Carns's avatar
Philip Carns committed
156 157
};

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static void mpiio_runtime_initialize(
    void);
static struct mpiio_file_record_ref *mpiio_track_new_file_record(
    darshan_record_id rec_id, const char *path);
static void mpiio_finalize_file_records(
    void *rec_ref_p);
static void mpiio_record_reduction_op(
    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
static void mpiio_shared_record_variance(
    MPI_Comm mod_comm, struct darshan_mpiio_file *inrec_array,
    struct darshan_mpiio_file *outrec_array, int shared_rec_count);
static void mpiio_cleanup_runtime(
    void);

static void mpiio_shutdown(
    MPI_Comm mod_comm, darshan_record_id *shared_recs,
    int shared_rec_count, void **mpiio_buf, int *mpiio_buf_sz);

Philip Carns's avatar
Philip Carns committed
176 177 178 179
static struct mpiio_runtime *mpiio_runtime = NULL;
static pthread_mutex_t mpiio_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static int my_rank = -1;

180 181 182
#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)

183 184
#define MPIIO_PRE_RECORD() do { \
    MPIIO_LOCK(); \
185
    if(!darshan_core_disabled_instrumentation()) { \
186 187 188
        if(!mpiio_runtime) { \
            mpiio_runtime_initialize(); \
        } \
189
        if(mpiio_runtime) break; \
190
    } \
191 192
    MPIIO_UNLOCK(); \
    return(ret); \
193 194 195 196 197 198
} while(0)

#define MPIIO_POST_RECORD() do { \
    MPIIO_UNLOCK(); \
} while(0)

199
#define MPIIO_RECORD_OPEN(__ret, __path, __fh, __comm, __mode, __info, __tm1, __tm2) do { \
200 201 202
    darshan_record_id rec_id; \
    struct mpiio_file_record_ref *rec_ref; \
    char *newpath; \
203 204
    int comm_size; \
    if(__ret != MPI_SUCCESS) break; \
205 206 207 208 209 210 211 212
    newpath = darshan_clean_file_path(__path); \
    if(!newpath) newpath = (char *)__path; \
    if(darshan_core_excluded_path(newpath)) { \
        if(newpath != __path) free(newpath); \
        break; \
    } \
    rec_id = darshan_core_gen_record_id(newpath); \
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->rec_id_hash, &rec_id, sizeof(darshan_record_id)); \
213
    if(!rec_ref) rec_ref = mpiio_track_new_file_record(rec_id, newpath); \
214 215 216
    if(!rec_ref) { \
        if(newpath != __path) free(newpath); \
        break; \
217
    } \
218
    rec_ref->file_rec->counters[MPIIO_MODE] = __mode; \
219
    PMPI_Comm_size(__comm, &comm_size); \
220
    if(comm_size == 1) \
221
        rec_ref->file_rec->counters[MPIIO_INDEP_OPENS] += 1; \
222
    else \
223
        rec_ref->file_rec->counters[MPIIO_COLL_OPENS] += 1; \
224
    if(__info != MPI_INFO_NULL) \
225
        rec_ref->file_rec->counters[MPIIO_HINTS] += 1; \
226 227 228 229
    if(rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] > __tm1) \
        rec_ref->file_rec->fcounters[MPIIO_F_OPEN_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_OPEN_END_TIMESTAMP] = __tm2; \
230 231 232 233
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_META_TIME], \
        __tm1, __tm2, rec_ref->last_meta_end); \
    darshan_add_record_ref(&(mpiio_runtime->fh_hash), &__fh, sizeof(MPI_File), rec_ref); \
    if(newpath != __path) free(newpath); \
234 235
} while(0)

236
#define MPIIO_RECORD_READ(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
237
    struct mpiio_file_record_ref *rec_ref; \
238
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
239
    double __elapsed = __tm2-__tm1; \
240
    if(__ret != MPI_SUCCESS) break; \
241 242
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
243
    PMPI_Type_size(__datatype, &size);  \
244
    size = size * __count; \
245
    /* DXT to record detailed read tracing information */ \
Shane Snyder's avatar
Shane Snyder committed
246
    dxt_mpiio_read(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
247 248 249 250 251 252 253 254 255
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_READ_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_READ] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_WRITE) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_READ; \
256 257
    if(rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] == 0 || \
     rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] > __tm1) \
258 259 260 261 262 263 264
        rec_ref->file_rec->fcounters[MPIIO_F_READ_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_READ_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_READ_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_READ_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_READ_TIME], \
        __tm1, __tm2, rec_ref->last_read_end); \
265
} while(0)
266

267
#define MPIIO_RECORD_WRITE(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
268
    struct mpiio_file_record_ref *rec_ref; \
269
    int size = 0; \
Shane Snyder's avatar
Shane Snyder committed
270 271
    double __elapsed = __tm2-__tm1; \
    if(__ret != MPI_SUCCESS) break; \
272 273
    rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
    if(!rec_ref) break; \
274
    PMPI_Type_size(__datatype, &size);  \
275
    size = size * __count; \
Shane Snyder's avatar
Shane Snyder committed
276 277
    /* DXT to record detailed write tracing information */ \
    dxt_mpiio_write(rec_ref->file_rec->base_rec.id, size, __tm1, __tm2); \
278 279 280 281 282 283 284 285 286
    DARSHAN_BUCKET_INC(&(rec_ref->file_rec->counters[MPIIO_SIZE_WRITE_AGG_0_100]), size); \
    darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, size, \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_ACCESS]), \
        &(rec_ref->file_rec->counters[MPIIO_ACCESS1_COUNT])); \
    rec_ref->file_rec->counters[MPIIO_BYTES_WRITTEN] += size; \
    rec_ref->file_rec->counters[__counter] += 1; \
    if(rec_ref->last_io_type == DARSHAN_IO_READ) \
        rec_ref->file_rec->counters[MPIIO_RW_SWITCHES] += 1; \
    rec_ref->last_io_type = DARSHAN_IO_WRITE; \
287
    if(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] == 0 || \
288
     rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] > __tm1) \
289 290 291 292 293 294 295
        rec_ref->file_rec->fcounters[MPIIO_F_WRITE_START_TIMESTAMP] = __tm1; \
    rec_ref->file_rec->fcounters[MPIIO_F_WRITE_END_TIMESTAMP] = __tm2; \
    if(rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] < __elapsed) { \
        rec_ref->file_rec->fcounters[MPIIO_F_MAX_WRITE_TIME] = __elapsed; \
        rec_ref->file_rec->counters[MPIIO_MAX_WRITE_TIME_SIZE] = size; } \
    DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->file_rec->fcounters[MPIIO_F_WRITE_TIME], \
        __tm1, __tm2, rec_ref->last_write_end); \
296 297
} while(0)

298 299 300
/**********************************************************
 *        Wrappers for MPI-IO functions of interest       * 
 **********************************************************/
Philip Carns's avatar
Philip Carns committed
301

Philip Carns's avatar
Philip Carns committed
302
#ifdef HAVE_MPIIO_CONST
303
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
304
#else
305
int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) 
Philip Carns's avatar
Philip Carns committed
306 307 308
#endif
{
    int ret;
309
    MPI_File tmp_fh;
Philip Carns's avatar
Philip Carns committed
310 311 312
    char* tmp;
    double tm1, tm2;

313
    MAP_OR_FAIL(PMPI_File_open);
314

Philip Carns's avatar
Philip Carns committed
315
    tm1 = darshan_core_wtime();
316
    ret = __real_PMPI_File_open(comm, filename, amode, info, fh);
Philip Carns's avatar
Philip Carns committed
317 318
    tm2 = darshan_core_wtime();

319 320 321 322 323 324 325 326
    /* use ROMIO approach to strip prefix if present */
    /* strip off prefix if there is one, but only skip prefixes
     * if they are greater than length one to allow for windows
     * drive specifications (e.g. c:\...) 
     */
    tmp = strchr(filename, ':');
    if (tmp > filename + 1) {
        filename = tmp + 1;
Philip Carns's avatar
Philip Carns committed
327 328
    }

329 330 331 332 333
    MPIIO_PRE_RECORD();
    tmp_fh = *fh;
    MPIIO_RECORD_OPEN(ret, filename, tmp_fh, comm, amode, info, tm1, tm2);
    MPIIO_POST_RECORD();

Philip Carns's avatar
Philip Carns committed
334 335
    return(ret);
}
336 337 338 339 340
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh)) 
#endif
341

342
int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
343 344 345 346 347
    MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

348
    MAP_OR_FAIL(PMPI_File_read);
349

350
    tm1 = darshan_core_wtime();
351
    ret = __real_PMPI_File_read(fh, buf, count, datatype, status);
352 353
    tm2 = darshan_core_wtime();

354
    MPIIO_PRE_RECORD();
355
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
356 357
    MPIIO_POST_RECORD();

358 359
    return(ret);
}
360 361
DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_read(fh,buf,count,datatype,status))
362 363

#ifdef HAVE_MPIIO_CONST
364
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
365 366
    MPI_Datatype datatype, MPI_Status *status)
#else
367
int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
368 369 370 371 372 373
    MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

374
    MAP_OR_FAIL(PMPI_File_write);
375

376
    tm1 = darshan_core_wtime();
377
    ret = __real_PMPI_File_write(fh, buf, count, datatype, status);
378 379
    tm2 = darshan_core_wtime();

380
    MPIIO_PRE_RECORD();
381
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
382 383
    MPIIO_POST_RECORD();

384 385
    return(ret);
}
386 387 388 389 390 391 392
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, void *buf, int count,
    MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
#endif
393

394
int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
395 396 397 398 399
    int count, MPI_Datatype datatype, MPI_Status *status)
{
    int ret;
    double tm1, tm2;

400
    MAP_OR_FAIL(PMPI_File_read_at);
401

402
    tm1 = darshan_core_wtime();
403
    ret = __real_PMPI_File_read_at(fh, offset, buf,
404 405 406
        count, datatype, status);
    tm2 = darshan_core_wtime();

407
    MPIIO_PRE_RECORD();
408
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
409 410
    MPIIO_POST_RECORD();

411 412
    return(ret);
}
413 414
DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at(fh, offset, buf, count, datatype, status))
415 416

#ifdef HAVE_MPIIO_CONST
417
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
418 419
    int count, MPI_Datatype datatype, MPI_Status *status)
#else
420
int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
421 422 423 424 425 426
    int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
    int ret;
    double tm1, tm2;

427
    MAP_OR_FAIL(PMPI_File_write_at);
428

429
    tm1 = darshan_core_wtime();
430
    ret = __real_PMPI_File_write_at(fh, offset, buf,
431 432 433
        count, datatype, status);
    tm2 = darshan_core_wtime();

434
    MPIIO_PRE_RECORD();
435
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
436 437
    MPIIO_POST_RECORD();

438 439
    return(ret);
}
440 441 442 443 444 445 446
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
    int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
#endif
447

448
int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
449 450 451 452
{
    int ret;
    double tm1, tm2;

Rob Latham's avatar
Rob Latham committed
453
    MAP_OR_FAIL(PMPI_File_read_all);
454

455
    tm1 = darshan_core_wtime();
456
    ret = __real_PMPI_File_read_all(fh, buf, count,
457 458 459
        datatype, status);
    tm2 = darshan_core_wtime();

460
    MPIIO_PRE_RECORD();
461
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
462 463
    MPIIO_POST_RECORD();

464 465
    return(ret);
}
466 467
DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_all(fh,buf,count,datatype,status))
468 469

#ifdef HAVE_MPIIO_CONST
470
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
471
#else
472
int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
473 474 475 476 477
#endif
{
    int ret;
    double tm1, tm2;

478
    MAP_OR_FAIL(PMPI_File_write_all);
479

480
    tm1 = darshan_core_wtime();
481
    ret = __real_PMPI_File_write_all(fh, buf, count,
482 483 484
        datatype, status);
    tm2 = darshan_core_wtime();

485
    MPIIO_PRE_RECORD();
486
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
487 488
    MPIIO_POST_RECORD();

489 490
    return(ret);
}
491 492 493 494 495 496 497
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_all(fh, buf, count, datatype, status))
#endif
498

499
int DARSHAN_DECL(MPI_File_read_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
500 501 502 503 504
    int count, MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

505
    MAP_OR_FAIL(PMPI_File_read_at_all);
506

507
    tm1 = darshan_core_wtime();
508
    ret = __real_PMPI_File_read_at_all(fh, offset, buf,
509 510 511
        count, datatype, status);
    tm2 = darshan_core_wtime();

512
    MPIIO_PRE_RECORD();
513
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
514 515
    MPIIO_POST_RECORD();

516 517
    return(ret);
}
518
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
519
    int count, MPI_Datatype datatype, MPI_Status * status),
520
        MPI_File_read_at_all(fh,offset,buf,count,datatype,status))
521

522
#ifdef HAVE_MPIIO_CONST
523
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
524 525
    int count, MPI_Datatype datatype, MPI_Status * status)
#else
526
int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
527 528 529 530 531 532
    int count, MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

533
    MAP_OR_FAIL(PMPI_File_write_at_all);
534

535
    tm1 = darshan_core_wtime();
536
    ret = __real_PMPI_File_write_at_all(fh, offset, buf,
537 538 539
        count, datatype, status);
    tm2 = darshan_core_wtime();

540
    MPIIO_PRE_RECORD();
541
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
542 543
    MPIIO_POST_RECORD();

544 545
    return(ret);
}
546 547 548 549 550 551 552 553 554
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
#endif
555

556
int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
557 558 559 560
{
    int ret;
    double tm1, tm2;

561
    MAP_OR_FAIL(PMPI_File_read_shared);
562

563
    tm1 = darshan_core_wtime();
564
    ret = __real_PMPI_File_read_shared(fh, buf, count,
565 566 567
        datatype, status);
    tm2 = darshan_core_wtime();

568
    MPIIO_PRE_RECORD();
569
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
570 571
    MPIIO_POST_RECORD();

572 573
    return(ret);
}
574 575
DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_read_shared(fh, buf, count, datatype, status))
576 577

#ifdef HAVE_MPIIO_CONST
578
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
579
#else
580
int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
581 582 583 584 585
#endif
{
    int ret;
    double tm1, tm2;

586
    MAP_OR_FAIL(PMPI_File_write_shared);
587

588
    tm1 = darshan_core_wtime();
589
    ret = __real_PMPI_File_write_shared(fh, buf, count,
590 591 592
        datatype, status);
    tm2 = darshan_core_wtime();

593
    MPIIO_PRE_RECORD();
594
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
595 596
    MPIIO_POST_RECORD();

597 598
    return(ret);
}
599 600 601 602 603 604 605
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
        MPI_File_write_shared(fh, buf, count, datatype, status))
#endif
606

607

608
int DARSHAN_DECL(MPI_File_read_ordered)(MPI_File fh, void * buf, int count,
609 610 611 612 613
    MPI_Datatype datatype, MPI_Status * status)
{
    int ret;
    double tm1, tm2;

614
    MAP_OR_FAIL(PMPI_File_read_ordered);
615

616
    tm1 = darshan_core_wtime();
617
    ret = __real_PMPI_File_read_ordered(fh, buf, count,
618 619 620
        datatype, status);
    tm2 = darshan_core_wtime();

621
    MPIIO_PRE_RECORD();
622
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
623 624
    MPIIO_POST_RECORD();

625 626
    return(ret);
}
627
DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int count,
628
    MPI_Datatype datatype, MPI_Status * status),
629
        MPI_File_read_ordered(fh, buf, count, datatype, status))
630

631
#ifdef HAVE_MPIIO_CONST
632
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
633 634
    MPI_Datatype datatype, MPI_Status * status)
#else
635
int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
636 637 638 639 640 641
    MPI_Datatype datatype, MPI_Status * status)
#endif
{
    int ret;
    double tm1, tm2;

642
    MAP_OR_FAIL(PMPI_File_write_ordered);
643

644
    tm1 = darshan_core_wtime();
645
    ret = __real_PMPI_File_write_ordered(fh, buf, count,
646 647 648
         datatype, status);
    tm2 = darshan_core_wtime();

649
    MPIIO_PRE_RECORD();
650
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
651 652
    MPIIO_POST_RECORD();

653 654
    return(ret);
}
655 656 657 658 659 660 661 662 663
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, void * buf, int count,
    MPI_Datatype datatype, MPI_Status * status),
        MPI_File_write_ordered(fh, buf, count, datatype, status))
#endif
664

665
int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
666 667 668 669
{
    int ret;
    double tm1, tm2;

670
    MAP_OR_FAIL(PMPI_File_read_all_begin);
671

672
    tm1 = darshan_core_wtime();
673
    ret = __real_PMPI_File_read_all_begin(fh, buf, count, datatype);
674 675
    tm2 = darshan_core_wtime();

676
    MPIIO_PRE_RECORD();
677
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
678 679
    MPIIO_POST_RECORD();

680 681
    return(ret);
}
682 683
DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_read_all_begin(fh, buf, count, datatype))
684 685

#ifdef HAVE_MPIIO_CONST
686
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
687
#else
688
int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
689 690 691 692 693
#endif
{
    int ret;
    double tm1, tm2;

694
    MAP_OR_FAIL(PMPI_File_write_all_begin);
695

696
    tm1 = darshan_core_wtime();
697
    ret = __real_PMPI_File_write_all_begin(fh, buf, count, datatype);
698 699
    tm2 = darshan_core_wtime();

700
    MPIIO_PRE_RECORD();
701
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
702 703
    MPIIO_POST_RECORD();

704 705
    return(ret);
}
706 707 708 709 710 711 712
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
        MPI_File_write_all_begin(fh, buf, count, datatype))
#endif
713

714
int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
715 716 717 718 719
    int count, MPI_Datatype datatype)
{
    int ret;
    double tm1, tm2;

720
    MAP_OR_FAIL(PMPI_File_read_at_all_begin);
721

722
    tm1 = darshan_core_wtime();
723
    ret = __real_PMPI_File_read_at_all_begin(fh, offset, buf,
724 725 726
        count, datatype);
    tm2 = darshan_core_wtime();
    
727
    MPIIO_PRE_RECORD();
728
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
729 730
    MPIIO_POST_RECORD();

731 732
    return(ret);
}
733 734 735
DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_read_at_all_begin(fh, offset, buf, count,
        datatype))
736 737

#ifdef HAVE_MPIIO_CONST
738
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
739 740
    int count, MPI_Datatype datatype)
#else
741
int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
742 743 744 745 746 747
    int count, MPI_Datatype datatype)
#endif
{
    int ret;
    double tm1, tm2;

748
    MAP_OR_FAIL(PMPI_File_write_at_all_begin);
749

750
    tm1 = darshan_core_wtime();
751
    ret = __real_PMPI_File_write_at_all_begin(fh, offset,
752 753 754
        buf, count, datatype);
    tm2 = darshan_core_wtime();

755
    MPIIO_PRE_RECORD();
756
    MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
757 758
    MPIIO_POST_RECORD();

759 760
    return(ret);
}
761 762 763 764 765 766 767
#ifdef HAVE_MPIIO_CONST
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#else
DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
    int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
#endif
768

769
int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
770 771 772 773
{
    int ret;
    double tm1, tm2;

774
    MAP_OR_FAIL(PMPI_File_read_ordered_begin);
775

776
    tm1 = darshan_core_wtime();
777
    ret = __real_PMPI_File_read_ordered_begin(fh, buf, count,
778 779 780
        datatype);
    tm2 = darshan_core_wtime();

781
    MPIIO_PRE_RECORD();
782
    MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
783 784
    MPIIO_POST_RECORD();