darshan-parser.c 29.4 KB
Newer Older
1
2
3
4
5
/*
 *  (C) 2009 by Argonne National Laboratory.
 *      See COPYRIGHT in top-level directory.
 */

6
7
8
9
10
11
12
13
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <zlib.h>
#include <time.h>
14
#include <stdlib.h>
15
#include <getopt.h>
16
#include <assert.h>
17

18
#include "darshan-logutils.h"
19

20
#include "uthash-1.9.2/src/uthash.h"
21
22
23
24

/*
 * Options
 */
Philip Carns's avatar
Philip Carns committed
25
26
27
28
#define OPTION_BASE  (1 << 0)  /* darshan log fields */
#define OPTION_TOTAL (1 << 1)  /* aggregated fields */
#define OPTION_PERF  (1 << 2)  /* derived performance */
#define OPTION_FILE  (1 << 3)  /* file count totals */
29
30
31
32
#define OPTION_ALL (\
  OPTION_BASE|\
  OPTION_TOTAL|\
  OPTION_PERF|\
33
  OPTION_FILE)
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70

#define FILETYPE_SHARED (1 << 0)
#define FILETYPE_UNIQUE (1 << 1)
#define FILETYPE_PARTSHARED (1 << 2)

#define max(a,b) (((a) > (b)) ? (a) : (b))
#define max3(a,b,c) (((a) > (b)) ? (((a) > (c)) ? (a) : (c)) : (((b) > (c)) ? (b) : (c)))

/*
 * Datatypes
 */
typedef struct hash_entry_s
{
    UT_hash_handle hlink;
    int64_t hash;
    int64_t type;
    int64_t procs;
    int64_t counters[CP_NUM_INDICES];
    double  fcounters[CP_F_NUM_INDICES];
    double cumul_time;
    double meta_time;
} hash_entry_t;

typedef struct perf_data_s
{
    int64_t total_bytes;
    double slowest_rank_time;
    double slowest_rank_meta_time;
    double shared_time_by_cumul;
    double shared_time_by_open;
    double shared_time_by_open_lastio;
    double shared_time_by_slowest;
    double shared_meta_time;
    double agg_perf_by_cumul;
    double agg_perf_by_open;
    double agg_perf_by_open_lastio;
    double agg_perf_by_slowest;
71
72
    double *rank_cumul_io_time;
    double *rank_cumul_md_time;
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
} perf_data_t;

typedef struct file_data_s
{
    int64_t total;
    int64_t total_size;
    int64_t total_max;
    int64_t read_only;
    int64_t read_only_size;
    int64_t read_only_max;
    int64_t write_only;
    int64_t write_only_size;
    int64_t write_only_max;
    int64_t read_write;
    int64_t read_write_size;
    int64_t read_write_max;
    int64_t unique;
    int64_t unique_size;
    int64_t unique_max;
    int64_t shared;
    int64_t shared_size;
    int64_t shared_max;
} file_data_t;

/*
 * Prototypes
 */
void accum_perf(struct darshan_file *, hash_entry_t *, perf_data_t *);
void calc_perf(struct darshan_job *, hash_entry_t *, perf_data_t *);

void accum_file(struct darshan_file *, hash_entry_t *, file_data_t *);
void calc_file(struct darshan_job *, hash_entry_t *, file_data_t *);

int usage (char *exename)
{
    fprintf(stderr, "Usage: %s [options] <filename>\n", exename);
    fprintf(stderr, "    --all   : all sub-options are enabled\n");
    fprintf(stderr, "    --base  : darshan log field data [default]\n");
    fprintf(stderr, "    --file  : total file counts\n");
    fprintf(stderr, "    --perf  : derived perf data\n");
    fprintf(stderr, "    --total : aggregated darshan field data\n");

    exit(1);
}

int parse_args (int argc, char **argv, char **filename)
{
    int index;
    int mask;
    static struct option long_opts[] =
    {
        {"all",   0, NULL, OPTION_ALL},
        {"base",  0, NULL, OPTION_BASE},
        {"file",  0, NULL, OPTION_FILE},
        {"perf",  0, NULL, OPTION_PERF},
        {"total", 0, NULL, OPTION_TOTAL},
129
130
        {"help",  0, NULL, 0},
        {0, 0, 0, 0}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    };

    mask = 0;

    while(1)
    {
        int c = getopt_long(argc, argv, "", long_opts, &index);

        if (c == -1) break;

        switch(c)
        {
            case OPTION_ALL:
            case OPTION_BASE:
            case OPTION_FILE:
            case OPTION_PERF:
            case OPTION_TOTAL:
                mask |= c;
                break;
            case 0:
151
            case '?':
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
            default:
                usage(argv[0]);
                break;
        }
    }

    if (optind < argc)
    {
        *filename = argv[optind];
    }
    else
    {
        usage(argv[0]);
    }

    /* default mask value if none specified */
    if (mask == 0)
    {
        mask = OPTION_BASE;
    }

    return mask;
}

176
177
178
int main(int argc, char **argv)
{
    int ret;
179
180
    int mask;
    char *filename;
181
182
183
184
    struct darshan_job job;
    struct darshan_file cp_file;
    char tmp_string[1024];
    time_t tmp_time = 0;
185
    darshan_fd file;
186
    int i;
187
    int mount_count;
188
    int64_t* devs;
189
190
    char** mnt_pts;
    char** fs_types;
191
    int last_rank = 0;
192
193
    char *token;
    char *save;
194
    char buffer[DARSHAN_JOB_METADATA_LEN];
195

196
197
198
199
200
201
202
203
204
205
206
    hash_entry_t *file_hash = NULL;
    hash_entry_t *curr = NULL;
    hash_entry_t *tmp = NULL;
    hash_entry_t total;
    perf_data_t pdata;
    file_data_t fdata;

    memset(&pdata, 0, sizeof(pdata));
    memset(&total, 0, sizeof(total));

    mask = parse_args(argc, argv, &filename);
207

208
    file = darshan_log_open(filename, "r");
209
210
    if(!file)
    {
211
        fprintf(stderr, "darshan_log_open() failed to open %s\n.", filename);
212
213
214
215
        return(-1);
    }
   
    /* read job info */
216
    ret = darshan_log_getjob(file, &job);
217
    if(ret < 0)
218
    {
219
        fprintf(stderr, "Error: unable to read job information from log file.\n");
220
        darshan_log_close(file);
221
222
223
        return(-1);
    }

224
225
226
    /* warn user about any missing information in this log format */
    darshan_log_print_version_warnings(&job);

227
    ret = darshan_log_getexe(file, tmp_string);
228
    if(ret < 0)
229
    {
230
        fprintf(stderr, "Error: unable to read trailing job information.\n");
231
        darshan_log_close(file);
232
233
234
        return(-1);
    }

235
    /* print job summary */
236
    printf("# darshan log version: %s\n", job.version_string);
237
238
    printf("# size of file statistics: %zu bytes\n", sizeof(cp_file));
    printf("# size of job statistics: %zu bytes\n", sizeof(job));
239
    printf("# exe: %s\n", tmp_string);
Philip Carns's avatar
Philip Carns committed
240
241
242
    printf("# uid: %" PRId64 "\n", job.uid);
    printf("# jobid: %" PRId64 "\n", job.jobid);
    printf("# start_time: %" PRId64 "\n", job.start_time);
243
    tmp_time += job.start_time;
244
    printf("# start_time_asci: %s", ctime(&tmp_time));
Philip Carns's avatar
Philip Carns committed
245
    printf("# end_time: %" PRId64 "\n", job.end_time);
246
247
    tmp_time = 0;
    tmp_time += job.end_time;
248
    printf("# end_time_asci: %s", ctime(&tmp_time));
Philip Carns's avatar
Philip Carns committed
249
250
    printf("# nprocs: %" PRId64 "\n", job.nprocs);
    printf("# run time: %" PRId64 "\n", job.end_time - job.start_time + 1);
251
252
253
254
255
    for(token=strtok_r(job.metadata, "\n", &save);
        token != NULL;
        token=strtok_r(NULL, "\n", &save))
    {
        char *key;
256
257
258
259
260
261
262
263
264
265
266
267
268
269
        char *value;
        /* NOTE: we intentionally only split on the first = character.
         * There may be additional = characters in the value portion
         * (for example, when storing mpi-io hints).
         */
        strcpy(buffer, token);
        key = buffer;
        value = index(buffer, '=');
        if(!value)
            continue;
        /* convert = to a null terminator to split key and value */
        value[0] = '\0';
        value++;
        printf("# metadata: %s = %s\n", key, value);
270
    }
271
272
 
    /* print table of mounted file systems */
273
    ret = darshan_log_getmounts(file, &devs, &mnt_pts, &fs_types, &mount_count);
274
    printf("\n# mounted file systems (device, mount point, and fs type)\n");
275
276
277
    printf("# -------------------------------------------------------\n");
    for(i=0; i<mount_count; i++)
    {
Philip Carns's avatar
Philip Carns committed
278
        printf("# mount entry: %" PRId64 "\t%s\t%s\n", devs[i], mnt_pts[i], fs_types[i]);
279
280
    }
  
281
282
283
284
285
286
287
288
289
    /* try to retrieve first record (may not exist) */
    ret = darshan_log_getfile(file, &job, &cp_file);
    if(ret < 0)
    {
        fprintf(stderr, "Error: failed to parse log file.\n");
        fflush(stderr);
        return(-1);
    }
    if(ret == 0)
290
291
292
    {
        /* it looks like the app didn't open any files */
        printf("# no files opened.\n");
293
        darshan_log_close(file);
294
295
296
        return(0);
    }

297
298
299
300
301
302
303
304
305
    if ((mask & OPTION_BASE))
    {
        printf("\n# description of columns:\n");
        printf("#   <rank>: MPI rank.  -1 indicates that the file is shared\n");
        printf("#      across all processes and statistics are aggregated.\n");
        printf("#   <file>: hash of file path.  0 indicates that statistics\n");
        printf("#      are condensed to refer to all files opened at the given\n");
        printf("#      process.\n");
        printf("#   <counter> and <value>: statistical counters.\n");
306
307
        printf("#      A value of -1 indicates that Darshan could not monitor\n");
        printf("#      that counter, and its value should be ignored.\n");
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
        printf("#   <name suffix>: last %d characters of file name.\n", CP_NAME_SUFFIX_LEN);
        printf("#   <mount pt>: mount point that the file resides on.\n");
        printf("#   <fs type>: type of file system that the file resides on.\n");
        printf("\n# description of counters:\n");
        printf("#   CP_POSIX_*: posix operation counts.\n");
        printf("#   CP_COLL_*: MPI collective operation counts.\n");
        printf("#   CP_INDEP_*: MPI independent operation counts.\n");
        printf("#   CP_SPIT_*: MPI split collective operation counts.\n");
        printf("#   CP_NB_*: MPI non blocking operation counts.\n");
        printf("#   READS,WRITES,OPENS,SEEKS,STATS, and MMAPS are types of operations.\n");
        printf("#   CP_*_NC_OPENS: number of indep. and collective pnetcdf opens.\n");
        printf("#   CP_HDF5_OPENS: number of hdf5 opens.\n");
        printf("#   CP_COMBINER_*: combiner counts for MPI mem and file datatypes.\n");
        printf("#   CP_HINTS: number of times MPI hints were used.\n");
        printf("#   CP_VIEWS: number of times MPI file views were used.\n");
        printf("#   CP_MODE: mode that file was opened in.\n");
        printf("#   CP_BYTES_*: total bytes read and written.\n");
        printf("#   CP_MAX_BYTE_*: highest offset byte read and written.\n");
        printf("#   CP_CONSEC_*: number of exactly adjacent reads and writes.\n");
        printf("#   CP_SEQ_*: number of reads and writes from increasing offsets.\n");
        printf("#   CP_RW_SWITCHES: number of times access alternated between read and write.\n");
        printf("#   CP_*_ALIGNMENT: memory and file alignment.\n");
        printf("#   CP_*_NOT_ALIGNED: number of reads and writes that were not aligned.\n");
        printf("#   CP_MAX_*_TIME_SIZE: size of the slowest read and write operations.\n");
        printf("#   CP_SIZE_READ_*: histogram of read access sizes.\n");
        printf("#   CP_SIZE_READ_AGG_*: histogram of MPI datatype total sizes.\n");
        printf("#   CP_EXTENT_READ_*: histogram of MPI datatype extents.\n");
        printf("#   CP_STRIDE*_STRIDE: the four most common strides detected.\n");
        printf("#   CP_STRIDE*_COUNT: count of the four most common strides.\n");
        printf("#   CP_ACCESS*_ACCESS: the four most common access sizes.\n");
        printf("#   CP_ACCESS*_COUNT: count of the four most common access sizes.\n");
        printf("#   CP_DEVICE: device id reported by stat().\n");
        printf("#   CP_SIZE_AT_OPEN: size of file when first opened.\n");
        printf("#   CP_*_RANK_BYTES: fastest, slowest and variance of bytes transfer.\n");
        printf("#   CP_F_OPEN_TIMESTAMP: timestamp of first open (mpi or posix).\n");
        printf("#   CP_F_*_START_TIMESTAMP: timestamp of first read/write (mpi or posix).\n");
        printf("#   CP_F_*_END_TIMESTAMP: timestamp of last read/write (mpi or posix).\n");
        printf("#   CP_F_CLOSE_TIMESTAMP: timestamp of last close (mpi or posix).\n");
        printf("#   CP_F_POSIX_READ/WRITE_TIME: cumulative time spent in posix reads or writes.\n");
        printf("#   CP_F_MPI_READ/WRITE_TIME: cumulative time spent in mpi-io reads or writes.\n");
        printf("#   CP_F_POSIX_META_TIME: cumulative time spent in posix open, close, fsync, stat and seek, .\n");
        printf("#   CP_F_MPI_META_TIME: cumulative time spent in mpi-io open, close, set_view, and sync.\n");
        printf("#   CP_MAX_*_TIME: duration of the slowest read and write operations.\n");
        printf("#   CP_*_RANK_TIME: fastest, slowest variance of transfer time.\n");

        printf("\n");
        CP_PRINT_HEADER();
    }
356

357
358
359
360
361
362
363
364
365
366
367
368
369
370
    pdata.rank_cumul_io_time = malloc(sizeof(double)*job.nprocs);
    pdata.rank_cumul_md_time = malloc(sizeof(double)*job.nprocs);
    if (!pdata.rank_cumul_io_time || !pdata.rank_cumul_md_time)
    {
        perror("malloc failed");
        darshan_log_close(file);
        return(-1);
    }
    else
    {
        memset(pdata.rank_cumul_io_time, 0, sizeof(double)*job.nprocs);
        memset(pdata.rank_cumul_md_time, 0, sizeof(double)*job.nprocs);
    }

371
    do
372
    {
373
374
        char* mnt_pt = NULL;
        char* fs_type = NULL;
375
        hash_entry_t *hfile = NULL;
376
377
378
379

        if(cp_file.rank != -1 && cp_file.rank < last_rank)
        {
            fprintf(stderr, "Error: log file contains out of order rank data.\n");
380
            fflush(stderr);
381
382
383
384
            return(-1);
        }
        if(cp_file.rank != -1)
            last_rank = cp_file.rank;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
        
        for(i=0; i<mount_count; i++)
        {
            if(cp_file.counters[CP_DEVICE] == devs[i])
            {
                mnt_pt = mnt_pts[i];
                fs_type = fs_types[i];
                break;
            }
        }
        if(!mnt_pt)
            mnt_pt = "UNKNOWN";
        if(!fs_type)
            fs_type = "UNKNOWN";

400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
        HASH_FIND(hlink,file_hash,&cp_file.hash,sizeof(int64_t),hfile);
        if (!hfile)
        {
            hfile = (hash_entry_t*) malloc(sizeof(*hfile));
            if (!hfile)
            {
                fprintf(stderr,"malloc failure");
                exit(1);
            }

            /* init */
            memset(hfile, 0, sizeof(*hfile));
            hfile->hash          = cp_file.hash;
            hfile->type          = 0;
            hfile->procs         = 0;
            hfile->cumul_time    = 0.0;
            hfile->meta_time     = 0.0;

            HASH_ADD(hlink,file_hash,hash,sizeof(int64_t),hfile);
        }

        accum_file(&cp_file, &total, NULL);
        accum_file(&cp_file, hfile, &fdata);
        accum_perf(&cp_file, hfile, &pdata);

        if ((mask & OPTION_BASE))
        {
            for(i=0; i<CP_NUM_INDICES; i++)
            {
                CP_PRINT(&job, &cp_file, i, mnt_pt, fs_type);
            }
            for(i=0; i<CP_F_NUM_INDICES; i++)
            {
                CP_F_PRINT(&job, &cp_file, i, mnt_pt, fs_type);
            }
        }
436
    }while((ret = darshan_log_getfile(file, &job, &cp_file)) == 1);
437
438
439
440

    /* Total Calc */
    if ((mask & OPTION_TOTAL))
    {
441
442
        for(i=0; i<CP_NUM_INDICES; i++)
        {
Philip Carns's avatar
Philip Carns committed
443
            printf("total_%s: %" PRId64 "\n",
444
                   darshan_names[i], total.counters[i]);
445
446
447
        }
        for(i=0; i<CP_F_NUM_INDICES; i++)
        {
448
449
            printf("total_%s: %lf\n",
                   darshan_f_names[i], total.fcounters[i]);
450
        }
451
452
    }

453
454
455
456
457
458
    /* Perf Calc */
    calc_perf(&job, file_hash, &pdata);
    if ((mask & OPTION_PERF))
    {
        printf("\n# performance\n");
        printf("# -----------\n");
Philip Carns's avatar
Philip Carns committed
459
        printf("# total_bytes: %" PRId64 "\n", pdata.total_bytes);
460
461
462
463
464
465
466
467
468
469
470
        printf("# slowest_rank_time: %lf\n", pdata.slowest_rank_time);
        printf("# slowest_rank_meta_time: %lf\n", pdata.slowest_rank_meta_time);
        printf("# shared_time_by_cumul: %lf\n", pdata.shared_time_by_cumul);
        printf("# shared_time_by_open: %lf\n", pdata.shared_time_by_open);
        printf("# shared_time_by_open_lastio: %lf\n", pdata.shared_time_by_open_lastio);
        printf("# shared_meta_time: %lf\n", pdata.shared_meta_time);
        printf("# agg_perf_by_cumul: %lf\n", pdata.agg_perf_by_cumul);
        printf("# agg_perf_by_open: %lf\n", pdata.agg_perf_by_open);
        printf("# agg_perf_by_open_lastio: %lf\n", pdata.agg_perf_by_open_lastio);
        printf("# agg_perf_by_slowest: %lf\n", pdata.agg_perf_by_slowest);
    }
471

472
473
474
475
476
477
    /* File Calc */
    calc_file(&job, file_hash, &fdata);
    if ((mask & OPTION_FILE))
    {
        printf("\n# files\n");
        printf("# -----\n");
Philip Carns's avatar
Philip Carns committed
478
        printf("# total: %" PRId64 " %" PRId64 " %" PRId64 "\n",
479
480
481
               fdata.total,
               fdata.total_size,
               fdata.total_max);
Philip Carns's avatar
Philip Carns committed
482
        printf("# read_only: %" PRId64 " %" PRId64 " %" PRId64 "\n",
483
484
485
               fdata.read_only,
               fdata.read_only_size,
               fdata.read_only_max);
Philip Carns's avatar
Philip Carns committed
486
        printf("# write_only: %" PRId64 " %" PRId64 " %" PRId64 "\n",
487
488
489
               fdata.write_only,
               fdata.write_only_size,
               fdata.write_only_max);
Philip Carns's avatar
Philip Carns committed
490
        printf("# read_write: %" PRId64 " %" PRId64 " %" PRId64 "\n",
491
492
493
               fdata.read_write,
               fdata.read_write_size,
               fdata.read_write_max);
Philip Carns's avatar
Philip Carns committed
494
        printf("# unique: %" PRId64 " %" PRId64 " %" PRId64 "\n",
495
496
497
               fdata.unique,
               fdata.unique_size,
               fdata.unique_max);
Philip Carns's avatar
Philip Carns committed
498
        printf("# shared: %" PRId64 " %" PRId64 " %" PRId64 "\n",
499
500
501
               fdata.shared,
               fdata.shared_size,
               fdata.shared_max);
502
503
    }

504
    if(ret < 0)
505
    {
506
        fprintf(stderr, "Error: failed to parse log file.\n");
507
        fflush(stderr);
508
509
510
        return(-1);
    }

511
512
513
514
515
    for(i=0; i<mount_count; i++)
    {
        free(mnt_pts[i]);
        free(fs_types[i]);
    }
516
517
518
519
520
521
    if(mount_count > 0)
    {
        free(devs);
        free(mnt_pts);
        free(fs_types);
    }
522
 
523
    darshan_log_close(file);
524
525
526
527
528
529
530

    HASH_ITER(hlink, file_hash, curr, tmp)
    {
        HASH_DELETE(hlink, file_hash, curr);
        free(curr);
    }

531
532
    return(0);
}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563

void accum_file(struct darshan_file *dfile,
                hash_entry_t *hfile, 
                file_data_t *fdata)
{
    int i;

    hfile->procs += 1;

    if (dfile->rank == -1)
    {
        hfile->type |= FILETYPE_SHARED;
    }
    else if (hfile->procs > 1)
    {
        hfile->type &= (~FILETYPE_UNIQUE);
        hfile->type |= FILETYPE_PARTSHARED;
    }
    else
    {
        hfile->type |= FILETYPE_UNIQUE;
    }

    for (i = 0; i < CP_NUM_INDICES; i++)
    {
        switch(i)
        {
        case CP_DEVICE:
        case CP_MODE:
        case CP_MEM_ALIGNMENT:
        case CP_FILE_ALIGNMENT:
564
565
            if(CP_FILE_PARTIAL(hfile))
                hfile->counters[i] = dfile->counters[i];
566
567
568
569
570
571
            break;
        case CP_SIZE_AT_OPEN:
            if (hfile->counters[i] == -1)
            {
                hfile->counters[i] = dfile->counters[i];
            }
572
            if (hfile->counters[i] > dfile->counters[i] && !CP_FILE_PARTIAL(dfile))
573
574
575
576
577
578
579
580
581
582
583
            {
                hfile->counters[i] = dfile->counters[i];
            }
            break;
        case CP_MAX_BYTE_READ:
        case CP_MAX_BYTE_WRITTEN:
            if (hfile->counters[i] < dfile->counters[i])
            {
                hfile->counters[i] = dfile->counters[i];
            }
            break;
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598

        case CP_STRIDE1_STRIDE:
        case CP_STRIDE2_STRIDE:
        case CP_STRIDE3_STRIDE:
        case CP_STRIDE4_STRIDE:
        case CP_ACCESS1_ACCESS:
        case CP_ACCESS2_ACCESS:
        case CP_ACCESS3_ACCESS:
        case CP_ACCESS4_ACCESS:
           /*
            * do nothing here because these will be stored
            * when the _COUNT is accessed.
            */
           break;
 
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
        case CP_STRIDE1_COUNT:
        case CP_STRIDE2_COUNT:
        case CP_STRIDE3_COUNT:
        case CP_STRIDE4_COUNT:
        case CP_ACCESS1_COUNT:
        case CP_ACCESS2_COUNT:
        case CP_ACCESS3_COUNT:
        case CP_ACCESS4_COUNT:
            if (hfile->counters[i] < dfile->counters[i])
            {
                hfile->counters[i]   = dfile->counters[i];
                hfile->counters[i-4] = dfile->counters[i-4];
            }
            break;
        case CP_FASTEST_RANK:
        case CP_SLOWEST_RANK:
        case CP_FASTEST_RANK_BYTES:
        case CP_SLOWEST_RANK_BYTES:
            hfile->counters[i] = 0;
            break;
        case CP_MAX_READ_TIME_SIZE:
        case CP_MAX_WRITE_TIME_SIZE:
            break;
        default:
            hfile->counters[i] += dfile->counters[i];
            break;
        }
    }

    for (i = 0; i < CP_F_NUM_INDICES; i++)
    {
        switch(i)
        {
            case CP_F_FASTEST_RANK_TIME:
            case CP_F_SLOWEST_RANK_TIME:
            case CP_F_VARIANCE_RANK_TIME:
            case CP_F_VARIANCE_RANK_BYTES:
                hfile->fcounters[i] = 0;
                break;
            case CP_F_MAX_READ_TIME:
                if (hfile->fcounters[i] > dfile->fcounters[i])
                {
                    hfile->fcounters[i] = dfile->fcounters[i];
                    hfile->counters[CP_MAX_READ_TIME_SIZE] =
                        dfile->counters[CP_MAX_READ_TIME_SIZE];
                }
                break;
            case CP_F_MAX_WRITE_TIME:
                if (hfile->fcounters[i] > dfile->fcounters[i])
                {
                    hfile->fcounters[i] = dfile->fcounters[i];
                    hfile->counters[CP_MAX_WRITE_TIME_SIZE] =
                        dfile->counters[CP_MAX_WRITE_TIME_SIZE];
                }
                break;
            default:
                hfile->fcounters[i] += dfile->fcounters[i];
                break;
        }
    }

    return;
}


void calc_file(struct darshan_job *djob,
               hash_entry_t *file_hash, 
               file_data_t *fdata)
{
    hash_entry_t *curr = NULL;
    hash_entry_t *tmp = NULL;

    memset(fdata, 0, sizeof(*fdata));

    HASH_ITER(hlink, file_hash, curr, tmp)
    {
        int64_t max;
        int64_t r;
        int64_t w;

        max = max3(curr->counters[CP_SIZE_AT_OPEN],
                   curr->counters[CP_MAX_BYTE_READ],
                   curr->counters[CP_MAX_BYTE_WRITTEN]);

        r = (curr->counters[CP_POSIX_READS]+
             curr->counters[CP_POSIX_FREADS]+
             curr->counters[CP_INDEP_READS]+
             curr->counters[CP_COLL_READS]+
             curr->counters[CP_SPLIT_READS]+
             curr->counters[CP_NB_READS]);

        w = (curr->counters[CP_POSIX_WRITES]+
             curr->counters[CP_POSIX_FWRITES]+
             curr->counters[CP_INDEP_WRITES]+
             curr->counters[CP_COLL_WRITES]+
             curr->counters[CP_SPLIT_WRITES]+
             curr->counters[CP_NB_WRITES]);

        fdata->total += 1;
        fdata->total_size += max;
        fdata->total_max = max(fdata->total_max, max);

        if (r && !w)
        {
            fdata->read_only += 1;
            fdata->read_only_size += max;
            fdata->read_only_max = max(fdata->read_only_max, max);
        }

        if (!r && w)
        {
            fdata->write_only += 1;
            fdata->write_only_size += max;
            fdata->write_only_max = max(fdata->write_only_max, max);
        }

        if (r && w)
        {
            fdata->read_write += 1;
            fdata->read_write_size += max;
            fdata->read_write_max = max(fdata->read_write_max, max);
        }

        if ((curr->type & (FILETYPE_SHARED|FILETYPE_PARTSHARED)))
        {
            fdata->shared += 1;
            fdata->shared_size += max;
            fdata->shared_max = max(fdata->shared_max, max);
        }

        if ((curr->type & (FILETYPE_UNIQUE)))
        {
            fdata->unique += 1;
            fdata->unique_size += max;
            fdata->unique_max = max(fdata->unique_max, max);
        }
    }

    return;
}

void accum_perf(struct darshan_file *dfile,
                hash_entry_t *hfile,
                perf_data_t *pdata)
{
    int64_t mpi_file;

    pdata->total_bytes += dfile->counters[CP_BYTES_READ] +
                          dfile->counters[CP_BYTES_WRITTEN];

    mpi_file = dfile->counters[CP_INDEP_OPENS] +
               dfile->counters[CP_COLL_OPENS];

    /*
     * Calculation of Shared File Time
     *   Four Methods!!!!
     *     by_cumul: sum time counters and divide by nprocs
     *               (inaccurate if lots of variance between procs)
     *     by_open: difference between timestamp of open and close
     *              (inaccurate if file is left open without i/o happening)
     *     by_open_lastio: difference between timestamp of open and the
     *                     timestamp of last i/o
     *                     (similar to above but fixes case where file is left
     *                      open after io is complete)
     *     by_slowest: use slowest rank time from log data
     *                 (most accurate but requires newer log version)
     */
    if (dfile->rank == -1)
    {
        /* by_open (same for MPI or POSIX) */
769
770
771
772
773
774
775
        if (dfile->fcounters[CP_F_CLOSE_TIMESTAMP] >
            dfile->fcounters[CP_F_OPEN_TIMESTAMP])
        {
            pdata->shared_time_by_open +=
                dfile->fcounters[CP_F_CLOSE_TIMESTAMP] -
                dfile->fcounters[CP_F_OPEN_TIMESTAMP];
        }
776
777
778
779
780

        /* by_open_lastio (same for MPI or POSIX) */
        if (dfile->fcounters[CP_F_READ_END_TIMESTAMP] >
            dfile->fcounters[CP_F_WRITE_END_TIMESTAMP])
        {
781
782
783
784
785
786
787
            /* be careful: file may have been opened but not read or written */
            if(dfile->fcounters[CP_F_READ_END_TIMESTAMP] > dfile->fcounters[CP_F_OPEN_TIMESTAMP])
            {
                pdata->shared_time_by_open_lastio += 
                    dfile->fcounters[CP_F_READ_END_TIMESTAMP] - 
                    dfile->fcounters[CP_F_OPEN_TIMESTAMP];
            }
788
789
790
        }
        else
        {
791
792
793
794
795
796
797
            /* be careful: file may have been opened but not read or written */
            if(dfile->fcounters[CP_F_WRITE_END_TIMESTAMP] > dfile->fcounters[CP_F_OPEN_TIMESTAMP])
            {
                pdata->shared_time_by_open_lastio += 
                    dfile->fcounters[CP_F_WRITE_END_TIMESTAMP] - 
                    dfile->fcounters[CP_F_OPEN_TIMESTAMP];
            }
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
        }

        /* by_cumul */
        if (mpi_file)
        {
            pdata->shared_time_by_cumul +=
                dfile->fcounters[CP_F_MPI_META_TIME] +
                dfile->fcounters[CP_F_MPI_READ_TIME] +
                dfile->fcounters[CP_F_MPI_WRITE_TIME];
            pdata->shared_meta_time += dfile->fcounters[CP_F_MPI_META_TIME];
        }
        else
        {
            pdata->shared_time_by_cumul +=
                dfile->fcounters[CP_F_POSIX_META_TIME] +
                dfile->fcounters[CP_F_POSIX_READ_TIME] +
                dfile->fcounters[CP_F_POSIX_WRITE_TIME];
            pdata->shared_meta_time += dfile->fcounters[CP_F_POSIX_META_TIME];
        }

        /* by_slowest (same for MPI or POSIX) */
        pdata->shared_time_by_slowest +=
            dfile->fcounters[CP_F_SLOWEST_RANK_TIME];
    }

    /*
     * Calculation of Unique File Time
     *   record the data for each file and sum it 
     */
    else
    {
        if (mpi_file)
        {
831
#if 0
832
833
834
835
            hfile->cumul_time += dfile->fcounters[CP_F_MPI_META_TIME] +
                                dfile->fcounters[CP_F_MPI_READ_TIME] +
                                dfile->fcounters[CP_F_MPI_WRITE_TIME];
            hfile->meta_time += dfile->fcounters[CP_F_MPI_META_TIME];
836
837
838
839
840
841
#else
            pdata->rank_cumul_io_time[dfile->rank] += dfile->fcounters[CP_F_MPI_META_TIME] +
                                dfile->fcounters[CP_F_MPI_READ_TIME] +
                                dfile->fcounters[CP_F_MPI_WRITE_TIME];
            pdata->rank_cumul_md_time[dfile->rank] += dfile->fcounters[CP_F_MPI_META_TIME];
#endif
842
843
844
        }
        else
        {
845
#if 0
846
847
848
849
             hfile->cumul_time += dfile->fcounters[CP_F_POSIX_META_TIME] +
                                 dfile->fcounters[CP_F_POSIX_READ_TIME] +
                                 dfile->fcounters[CP_F_POSIX_WRITE_TIME];
             hfile->meta_time += dfile->fcounters[CP_F_POSIX_META_TIME];
850
851
852
853
854
855
856
#else
            pdata->rank_cumul_io_time[dfile->rank] += dfile->fcounters[CP_F_POSIX_META_TIME] +
                                dfile->fcounters[CP_F_POSIX_READ_TIME] +
                                dfile->fcounters[CP_F_POSIX_WRITE_TIME];
            pdata->rank_cumul_md_time[dfile->rank] += dfile->fcounters[CP_F_POSIX_META_TIME];

#endif
857
        }
858

859
#if 0
860
861
        pdata->rank_cumul_io_time[dfile->rank] += hfile->cumul_time;
        pdata->rank_cumul_md_time[dfile->rank] += hfile->meta_time;
862
#endif
863
864
865
866
867
868
869
870
871
    }

    return;
}

void calc_perf(struct darshan_job *djob,
               hash_entry_t *hash_rank_uniq,
               perf_data_t *pdata)
{
872
    int64_t i;
873
874
875
876
877
878

    pdata->shared_time_by_cumul =
        pdata->shared_time_by_cumul / (double)djob->nprocs;

    pdata->shared_meta_time = pdata->shared_meta_time / (double)djob->nprocs;

879
    for (i=0; i<djob->nprocs; i++)
880
    {
881
        if (pdata->rank_cumul_io_time[i] > pdata->slowest_rank_time)
882
        {
883
            pdata->slowest_rank_time = pdata->rank_cumul_io_time[i];
884
            pdata->slowest_rank_meta_time = pdata->rank_cumul_md_time[i];
885
886
887
        }
    }

888
    if (pdata->slowest_rank_time + pdata->shared_time_by_cumul)
889
    pdata->agg_perf_by_cumul = ((double)pdata->total_bytes / 1048576.0) /
890
891
892
893
                                  (pdata->slowest_rank_time +
                                   pdata->shared_time_by_cumul);

    if (pdata->slowest_rank_time + pdata->shared_time_by_open)
894
    pdata->agg_perf_by_open  = ((double)pdata->total_bytes / 1048576.0) / 
895
896
897
898
                                   (pdata->slowest_rank_time +
                                    pdata->shared_time_by_open);

    if (pdata->slowest_rank_time + pdata->shared_time_by_open_lastio)
899
900
901
    pdata->agg_perf_by_open_lastio = ((double)pdata->total_bytes / 1048576.0) /
                                     (pdata->slowest_rank_time +
                                      pdata->shared_time_by_open_lastio);
902

903
904
    if (pdata->slowest_rank_time + pdata->shared_time_by_slowest)
    pdata->agg_perf_by_slowest = ((double)pdata->total_bytes / 1048576.0) /
905
906
                                     (pdata->slowest_rank_time +
                                      pdata->shared_time_by_slowest);
907
908
909

    return;
}