darshan-core.c 32 KB
Newer Older
1
2
3
4
5
/*
 *  (C) 2009 by Argonne National Laboratory.
 *      See COPYRIGHT in top-level directory.
 */

6
#define _GNU_SOURCE
7

8
9
10
11
12
13
#include "darshan-runtime-config.h"

#include <stdio.h>
#ifdef HAVE_MNTENT_H
#include <mntent.h>
#endif
14
15
16
17
18
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <limits.h>
#include <pthread.h>
19
20
21
22
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <mpi.h>
23
#include <assert.h>
24

25
#include "uthash.h"
26
27
#include "darshan-core.h"

28
29
/* TODO is __progname_full needed here */
extern char* __progname;
30

31
/* internal variable delcarations */
32
33
static struct darshan_core_runtime *darshan_core_job = NULL;
static pthread_mutex_t darshan_core_mutex = PTHREAD_MUTEX_INITIALIZER;
34
static int my_rank = -1;
35
static int nprocs = -1;
36

37
38
39
40
41
42
43
44
45
46
47
/* prototypes for internal helper functions */
static void darshan_core_initialize(
    int *argc, char ***argv);
static void darshan_core_shutdown(
    void);
static void darshan_core_cleanup(
    struct darshan_core_runtime* job);
static void darshan_get_logfile_name(
    char* logfile_name, int jobid, struct tm* start_tm);
static void darshan_log_record_hints_and_ver(
    struct darshan_core_runtime* job);
48
static void darshan_get_shared_record_ids(
49
    struct darshan_core_runtime *job, darshan_record_id *shared_recs);
50
51
52
static int darshan_log_coll_open(
    char *logfile_name, MPI_File *log_fh);
static int darshan_log_write_record_hash(
53
54
55
56
    MPI_File log_fh, struct darshan_core_record_ref *rec_hash,
    darshan_record_id *shared_recs, struct darshan_log_map *map);
static int darshan_log_coll_write(
    MPI_File log_fh, void *buf, int count, struct darshan_log_map *map);
57

58
59
#define DARSHAN_CORE_LOCK() pthread_mutex_lock(&darshan_core_mutex)
#define DARSHAN_CORE_UNLOCK() pthread_mutex_unlock(&darshan_core_mutex)
60

61
/* intercept MPI initialize and finalize to manage darshan core runtime */
62
63
64
65
66
67
68
69
70
71
int MPI_Init(int *argc, char ***argv)
{
    int ret;

    ret = DARSHAN_MPI_CALL(PMPI_Init)(argc, argv);
    if(ret != MPI_SUCCESS)
    {
        return(ret);
    }

72
    darshan_core_initialize(argc, argv);
73
74
75
76

    return(ret);
}

77
int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
78
79
80
81
{
    int ret;

    ret = DARSHAN_MPI_CALL(PMPI_Init_thread)(argc, argv, required, provided);
82
    if(ret != MPI_SUCCESS)
83
84
85
86
    {
        return(ret);
    }

87
    darshan_core_initialize(argc, argv);
88
89
90
91
92
93
94
95

    return(ret);
}

int MPI_Finalize(void)
{
    int ret;

96
    darshan_core_shutdown();
97
98
99
100
101

    ret = DARSHAN_MPI_CALL(PMPI_Finalize)();
    return(ret);
}

102
103
/* *********************************** */

104
static void darshan_core_initialize(int *argc, char ***argv)
105
106
107
108
109
110
111
112
113
{
    int i;
    int internal_timing_flag = 0;
    double init_start, init_time, init_max;
    char* truncate_string = "<TRUNCATED>";
    int truncate_offset;
    int chars_left = 0;

    DARSHAN_MPI_CALL(PMPI_Comm_size)(MPI_COMM_WORLD, &nprocs);
114
    DARSHAN_MPI_CALL(PMPI_Comm_rank)(MPI_COMM_WORLD, &my_rank);
115
116
117
118

    if(getenv("DARSHAN_INTERNAL_TIMING"))
        internal_timing_flag = 1;

119
    if(internal_timing_flag)
120
121
122
        init_start = DARSHAN_MPI_CALL(PMPI_Wtime)();

    /* setup darshan runtime if darshan is enabled and hasn't been initialized already */
123
    if(!getenv("DARSHAN_DISABLE") && !darshan_core_job)
124
    {
125
126
127
        /* allocate structure to track darshan_core_job information */
        darshan_core_job = malloc(sizeof(*darshan_core_job));
        if(darshan_core_job)
128
        {
129
            memset(darshan_core_job, 0, sizeof(*darshan_core_job));
130

131
132
133
134
            darshan_core_job->log_job.uid = getuid();
            darshan_core_job->log_job.start_time = time(NULL);
            darshan_core_job->log_job.nprocs = nprocs;
            darshan_core_job->wtime_offset = DARSHAN_MPI_CALL(PMPI_Wtime)();
135
136
137
138

            /* record exe and arguments */
            for(i=0; i<(*argc); i++)
            {
139
140
                chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
                strncat(darshan_core_job->exe, *(argv[i]), chars_left);
141
142
                if(i < ((*argc)-1))
                {
143
144
                    chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
                    strncat(darshan_core_job->exe, " ", chars_left);
145
146
147
148
149
150
151
152
                }
            }

            /* if we don't see any arguments, then use glibc symbol to get
             * program name at least (this happens in fortran)
             */
            if(argc == 0)
            {
153
                chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
154
                strncat(darshan_core_job->exe, __progname, chars_left);
155
156
                chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
                strncat(darshan_core_job->exe, " <unknown args>", chars_left);
157
158
159
160
161
162
            }

            if(chars_left == 0)
            {
                /* we ran out of room; mark that string was truncated */
                truncate_offset = CP_EXE_LEN - strlen(truncate_string);
163
                sprintf(&darshan_core_job->exe[truncate_offset], "%s",
164
165
166
167
168
169
170
171
172
173
                    truncate_string);
            }
        }
    }

    if(internal_timing_flag)
    {
        init_time = DARSHAN_MPI_CALL(PMPI_Wtime)() - init_start;
        DARSHAN_MPI_CALL(PMPI_Reduce)(&init_time, &init_max, 1,
            MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
174
        if(my_rank == 0)
175
176
177
178
179
180
181
182
183
        {
            printf("#darshan:<op>\t<nprocs>\t<time>\n");
            printf("darshan:init\t%d\t%f\n", nprocs, init_max);
        }
    }

    return;
}

184
static void darshan_core_shutdown()
185
{
186
    int i;
187
    char *logfile_name;
188
    struct darshan_core_runtime *final_job;
189
    int internal_timing_flag = 0;
190
191
    char *envjobid;
    char *jobid_str;
192
    int jobid;
193
    struct tm *start_tm;
194
    time_t start_time_tmp;
195
196
    int ret = 0;
    int all_ret = 0;
197
198
    int64_t first_start_time;
    int64_t last_end_time;
199
200
    int local_mod_use[DARSHAN_MAX_MODS] = {0};
    int global_mod_use_count[DARSHAN_MAX_MODS] = {0};
201
    darshan_record_id shared_recs[DARSHAN_CORE_MAX_RECORDS] = {0};
202
203
    double start_log_time;
    long offset;
204
    struct darshan_header log_header;
205
    MPI_File log_fh;
206
    MPI_Offset tmp_off;
207
    MPI_Status status;
208
209
210
211

    if(getenv("DARSHAN_INTERNAL_TIMING"))
        internal_timing_flag = 1;

212
    DARSHAN_CORE_LOCK();
213
214
    if(!darshan_core_job)
    {
215
        DARSHAN_CORE_UNLOCK();
216
217
218
219
220
221
222
        return;
    }
    /* disable further tracing while hanging onto the data so that we can
     * write it out
     */
    final_job = darshan_core_job;
    darshan_core_job = NULL;
223
    DARSHAN_CORE_UNLOCK();
224

225
226
    start_log_time = DARSHAN_MPI_CALL(PMPI_Wtime)();

227
228
229
230
231
232
233
    logfile_name = malloc(PATH_MAX);
    if(!logfile_name)
    {
        darshan_core_cleanup(final_job);
        return;
    }

234
    /* set darshan job id/metadata and constuct log file name on rank 0 */
235
    if(my_rank == 0)
236
237
238
    {
        /* Use CP_JOBID_OVERRIDE for the env var or CP_JOBID */
        envjobid = getenv(CP_JOBID_OVERRIDE);
239
        if(!envjobid)
240
241
242
243
        {
            envjobid = CP_JOBID;
        }

244
        /* find a job id */
245
246
247
248
249
250
251
252
253
254
255
256
        jobid_str = getenv(envjobid);
        if(jobid_str)
        {
            /* in cobalt we can find it in env var */
            ret = sscanf(jobid_str, "%d", &jobid);
        }
        if(!jobid_str || ret != 1)
        {
            /* use pid as fall back */
            jobid = getpid();
        }

257
        final_job->log_job.jobid = (int64_t)jobid;
258

259
260
/* TODO */
#if 0
261
        /* if we are using any hints to write the log file, then record those
262
         * hints with the darshan job information
263
264
         */
        darshan_log_record_hints_and_ver(final_job);
265
#endif
266

267
268
269
        /* use human readable start time format in log filename */
        start_time_tmp = final_job->log_job.start_time;
        start_tm = localtime(&start_time_tmp);
270

271
272
        /* construct log file name */
        darshan_get_logfile_name(logfile_name, jobid, start_tm);
273
274
275
276
277
278
279
280
281
    }

    /* broadcast log file name */
    DARSHAN_MPI_CALL(PMPI_Bcast)(logfile_name, PATH_MAX, MPI_CHAR, 0,
        MPI_COMM_WORLD);

    if(strlen(logfile_name) == 0)
    {
        /* failed to generate log file name */
282
        free(logfile_name);
283
284
285
286
287
288
        darshan_core_cleanup(final_job);
        return;
    }

    final_job->log_job.end_time = time(NULL);

289
290
291
292
293
294
295
296
297
298
    /* reduce to report first start time and last end time across all ranks
     * at rank 0
     */
    DARSHAN_MPI_CALL(PMPI_Reduce)(&final_job->log_job.start_time, &first_start_time, 1, MPI_LONG_LONG, MPI_MIN, 0, MPI_COMM_WORLD);
    DARSHAN_MPI_CALL(PMPI_Reduce)(&final_job->log_job.end_time, &last_end_time, 1, MPI_LONG_LONG, MPI_MAX, 0, MPI_COMM_WORLD);
    if(my_rank == 0)
    {
        final_job->log_job.start_time = first_start_time;
        final_job->log_job.end_time = last_end_time;
    }
299

300
301
302
303
304
305
306
307
308
309
    /* set which local modules were actually used */
    for(i = 0; i < DARSHAN_MAX_MODS; i++)
    {
        if(final_job->mod_array[i])
            local_mod_use[i] = 1;
    }

    /* reduce the number of times a module was opened globally and bcast to everyone */   
    DARSHAN_MPI_CALL(PMPI_Allreduce)(local_mod_use, global_mod_use_count, DARSHAN_MAX_MODS, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

310
    /* get a list of records which are shared across all processes */
311
    darshan_get_shared_record_ids(final_job, shared_recs);
312

313
314
    /* collectively open the darshan log file */
    ret = darshan_log_coll_open(logfile_name, &log_fh);
315
316
317
318
319
320
321
322

    /* error out if unable to open log file */
    DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
        MPI_LOR, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        if(my_rank == 0)
        {
323
324
            fprintf(stderr, "darshan library warning: unable to open log file %s\n",
                logfile_name);
325
326
327
328
329
330
331
            unlink(logfile_name);
        }
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
    }

332
    /* rank 0 is responsible for writing the darshan job information */
Shane Snyder's avatar
Shane Snyder committed
333
    if(my_rank == 0)
334
    {
335
        /* write the job information, making sure to prealloc space for the log header */
336
        all_ret = DARSHAN_MPI_CALL(PMPI_File_write_at)(log_fh, sizeof(struct darshan_header),
337
                &final_job->log_job, sizeof(struct darshan_job), MPI_BYTE, &status);
338
        if(all_ret != MPI_SUCCESS)
339
        {
340
341
342
            fprintf(stderr, "darshan library warning: unable to write job data to log file %s\n",
                    logfile_name);
            unlink(logfile_name);
343
        }
Shane Snyder's avatar
Shane Snyder committed
344

345
        /* TODO: after compression is added, this should be fixed */
346
        log_header.rec_map.off = sizeof(struct darshan_header) + sizeof(struct darshan_job);
347
348
    }

349
350
351
352
353
354
355
356
357
358
359
    /* error out if unable to write job information */
    DARSHAN_MPI_CALL(PMPI_Bcast)(&all_ret, 1, MPI_INT, 0, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
    }

    /* write the record name->id hash to the log file */
    ret = darshan_log_write_record_hash(log_fh, final_job->rec_hash,
360
        shared_recs, &log_header.rec_map);
361

362
    /* error out if unable to write record hash */
363
364
365
366
367
368
369
370
    DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
        MPI_LOR, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        if(my_rank == 0)
        {
            fprintf(stderr, "darshan library warning: unable to write record map to log file %s\n",
                logfile_name);
371
            unlink(logfile_name);
372
373
374
375
376
        }
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
    }
377
378
379

    /* loop over globally used darshan modules and:
     *      - get final output buffer
380
     *      - compress (zlib) provided output buffer
Shane Snyder's avatar
Shane Snyder committed
381
     *      - append compressed buffer to log file
382
383
     *      - add module index info (file offset/length) to log header
     *      - shutdown the module
384
     */
385
    for(i = 0; i < DARSHAN_MAX_MODS; i++)
386
    {
387
388
389
390
391
392
        struct darshan_core_module* this_mod = final_job->mod_array[i];
        MPI_Comm mod_comm;
        void* mod_buf = NULL;
        int mod_buf_size = 0;

        if(!global_mod_use_count[i])
393
394
395
396
        {
            if(my_rank == 0)
                log_header.mod_map[i].off = log_header.mod_map[i].len = 0;

397
            continue;
398
        }
399
400
401
402
403
404
405
406
407
408

        /* create a communicator to use for shutting down the module */
        if(global_mod_use_count[i] == nprocs)
        {
            MPI_Comm_dup(MPI_COMM_WORLD, &mod_comm);
        }
        else
        {
            MPI_Comm_split(MPI_COMM_WORLD, local_mod_use[i], 0, &mod_comm);
        }
409

410
411
412
413
414
415
416
        /* if module is registered locally, get the corresponding output buffer */
        if(local_mod_use[i])
        {
            /* get output buffer from module */
            this_mod->mod_funcs.get_output_data(mod_comm, &mod_buf, &mod_buf_size);
        }

417
418
419
420
421
422
423
424
        /* set the starting offset of this module */
        if(tmp_off == 0)
            tmp_off = log_header.rec_map.off + log_header.rec_map.len;

        log_header.mod_map[i].off = tmp_off;

        /* write module data buffer to the darshan log file */
        ret = darshan_log_coll_write(log_fh, mod_buf, mod_buf_size, &log_header.mod_map[i]);
425
426
427
428
429

        /* error out if unable to write this module's data */
        DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
            MPI_LOR, MPI_COMM_WORLD);
        if(all_ret != 0)
430
        {
431
432
433
434
435
436
437
438
439
440
            if(my_rank == 0)
            {
                fprintf(stderr,
                    "darshan library warning: unable to write %s module data to log file %s\n",
                    darshan_module_names[i], logfile_name);
                unlink(logfile_name);
            }
            free(logfile_name);
            darshan_core_cleanup(final_job);
            return;
441
442
        }

443
444
        tmp_off += log_header.mod_map[i].len;

445
446
447
448
449
450
451
452
453
454
        /* shutdown module if registered locally */
        if(local_mod_use[i])
        {
            this_mod->mod_funcs.shutdown();
            this_mod = NULL;
        }

        MPI_Comm_free(&mod_comm);
    }

455
    /* rank 0 is responsible for writing the log header */
456
457
    if(my_rank == 0)
    {
458
459
460
461
462
        /* initialize the remaining header fields */
        strcpy(log_header.version_string, CP_VERSION);
        log_header.magic_nr = CP_MAGIC_NR;
        log_header.comp_type = DARSHAN_GZ_COMP;

463
        all_ret = DARSHAN_MPI_CALL(PMPI_File_write_at)(log_fh, 0, &log_header,
464
            sizeof(struct darshan_header), MPI_BYTE, &status);
465
        if(all_ret != MPI_SUCCESS)
466
        {
467
468
469
            fprintf(stderr, "darshan library warning: unable to write header to log file %s\n",
                    logfile_name);
            unlink(logfile_name);
470
        }
471
472
    }

473
474
475
476
477
478
479
480
481
    /* error out if unable to write log header */
    DARSHAN_MPI_CALL(PMPI_Bcast)(&all_ret, 1, MPI_INT, 0, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
    }

482
483
484
485
486
487
    DARSHAN_MPI_CALL(PMPI_File_close)(&log_fh);

    /* if we got this far, there are no errors, so rename from *.darshan_partial
     * to *-<logwritetime>.darshan.gz, which indicates that this log file is
     * complete and ready for analysis
     */
488
    /* TODO: support user given logfile path/name */
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
    if(my_rank == 0)
    {
        char* tmp_index;
        double end_log_time;
        char* new_logfile_name;

        new_logfile_name = malloc(PATH_MAX);
        if(new_logfile_name)
        {
            new_logfile_name[0] = '\0';
            end_log_time = DARSHAN_MPI_CALL(PMPI_Wtime)();
            strcat(new_logfile_name, logfile_name);
            tmp_index = strstr(new_logfile_name, ".darshan_partial");
            sprintf(tmp_index, "_%d.darshan.gz", (int)(end_log_time-start_log_time+1));
            rename(logfile_name, new_logfile_name);
            /* set permissions on log file */
505
#ifdef __CP_GROUP_READABLE_LOGS
506
            chmod(new_logfile_name, (S_IRUSR|S_IRGRP));
507
#else
508
            chmod(new_logfile_name, (S_IRUSR));
509
#endif
510
511
            free(new_logfile_name);
        }
512
    }
513

514
    free(logfile_name);
515
    darshan_core_cleanup(final_job);
516

517
    if(internal_timing_flag)
518
    {
519
520
521
522
523
        /* TODO: what do we want to time in new darshan version? */
    }
    
    return;
}
524

525
/* free darshan core data structures to shutdown */
526
static void darshan_core_cleanup(struct darshan_core_runtime* job)
527
{
528
    int i;
529

530
    for(i = 0; i < DARSHAN_MAX_MODS; i++)
531
    {
532
533
534
535
536
        if(job->mod_array[i])
        {        
            free(job->mod_array[i]);
            job->mod_array[i] = NULL;
        }
537
    }
538

539
    free(job);
540
541
542
543

    return;
}

544
/* construct the darshan log file name */
545
static void darshan_get_logfile_name(char* logfile_name, int jobid, struct tm* start_tm)
546
{
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
    char* logpath;
    char* logname_string;
    char* logpath_override = NULL;
#ifdef __CP_LOG_ENV
    char env_check[256];
    char* env_tok;
#endif
    uint64_t hlevel;
    char hname[HOST_NAME_MAX];
    uint64_t logmod;
    char cuser[L_cuserid] = {0};
    int ret;

    /* Use CP_LOG_PATH_OVERRIDE for the value or __CP_LOG_PATH */
    logpath = getenv(CP_LOG_PATH_OVERRIDE);
    if(!logpath)
    {
#ifdef __CP_LOG_PATH
        logpath = __CP_LOG_PATH;
#endif
    }

    /* get the username for this job.  In order we will try each of the
     * following until one of them succeeds:
     *
     * - cuserid()
     * - getenv("LOGNAME")
     * - snprintf(..., geteuid());
     *
     * Note that we do not use getpwuid() because it generally will not
     * work in statically compiled binaries.
     */

#ifndef DARSHAN_DISABLE_CUSERID
    cuserid(cuser);
#endif

    /* if cuserid() didn't work, then check the environment */
    if(strcmp(cuser, "") == 0)
    {
        logname_string = getenv("LOGNAME");
        if(logname_string)
        {
            strncpy(cuser, logname_string, (L_cuserid-1));
        }
    }

    /* if cuserid() and environment both fail, then fall back to uid */
    if(strcmp(cuser, "") == 0)
    {
        uid_t uid = geteuid();
        snprintf(cuser, sizeof(cuser), "%u", uid);
    }

    /* generate a random number to help differentiate the log */
    hlevel=DARSHAN_MPI_CALL(PMPI_Wtime)() * 1000000;
    (void)gethostname(hname, sizeof(hname));
    logmod = darshan_hash((void*)hname,strlen(hname),hlevel);

    /* see if darshan was configured using the --with-logpath-by-env
     * argument, which allows the user to specify an absolute path to
     * place logs via an env variable.
     */
#ifdef __CP_LOG_ENV
    /* just silently skip if the environment variable list is too big */
    if(strlen(__CP_LOG_ENV) < 256)
    {
        /* copy env variable list to a temporary buffer */
        strcpy(env_check, __CP_LOG_ENV);
        /* tokenize the comma-separated list */
        env_tok = strtok(env_check, ",");
        if(env_tok)
        {
            do
            {
                /* check each env variable in order */
                logpath_override = getenv(env_tok);
                if(logpath_override)
                {
                    /* stop as soon as we find a match */
                    break;
                }
            }while((env_tok = strtok(NULL, ",")));
        }
    }
#endif

    if(logpath_override)
    {
        ret = snprintf(logfile_name, PATH_MAX,
            "%s/%s_%s_id%d_%d-%d-%d-%" PRIu64 ".darshan_partial",
            logpath_override,
            cuser, __progname, jobid,
            (start_tm->tm_mon+1),
            start_tm->tm_mday,
            (start_tm->tm_hour*60*60 + start_tm->tm_min*60 + start_tm->tm_sec),
            logmod);
        if(ret == (PATH_MAX-1))
        {
            /* file name was too big; squish it down */
            snprintf(logfile_name, PATH_MAX,
                "%s/id%d.darshan_partial",
                logpath_override, jobid);
        }
    }
    else if(logpath)
    {
        ret = snprintf(logfile_name, PATH_MAX,
            "%s/%d/%d/%d/%s_%s_id%d_%d-%d-%d-%" PRIu64 ".darshan_partial",
            logpath, (start_tm->tm_year+1900),
            (start_tm->tm_mon+1), start_tm->tm_mday,
            cuser, __progname, jobid,
            (start_tm->tm_mon+1),
            start_tm->tm_mday,
            (start_tm->tm_hour*60*60 + start_tm->tm_min*60 + start_tm->tm_sec),
            logmod);
        if(ret == (PATH_MAX-1))
        {
            /* file name was too big; squish it down */
            snprintf(logfile_name, PATH_MAX,
                "%s/id%d.darshan_partial",
                logpath, jobid);
        }
    }
    else
    {
        logfile_name[0] = '\0';
    }

    return;
677
678
}

679
/* record any hints used to write the darshan log in the log header */
680
static void darshan_log_record_hints_and_ver(struct darshan_core_runtime* job)
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
{
    char* hints;
    char* header_hints;
    int meta_remain = 0;
    char* m;

    /* check environment variable to see if the default MPI file hints have
     * been overridden
     */
    hints = getenv(CP_LOG_HINTS_OVERRIDE);
    if(!hints)
    {
        hints = __CP_LOG_HINTS;
    }

    if(!hints || strlen(hints) < 1)
        return;

    header_hints = strdup(hints);
    if(!header_hints)
        return;

    meta_remain = DARSHAN_JOB_METADATA_LEN -
        strlen(job->log_job.metadata) - 1;
    if(meta_remain >= (strlen(PACKAGE_VERSION) + 9))
    {
        sprintf(job->log_job.metadata, "lib_ver=%s\n", PACKAGE_VERSION);
        meta_remain -= (strlen(PACKAGE_VERSION) + 9);
    }
    if(meta_remain >= (3 + strlen(header_hints)))
    {
        m = job->log_job.metadata + strlen(job->log_job.metadata);
        /* We have room to store the hints in the metadata portion of
         * the job header.  We just prepend an h= to the hints list.  The
         * metadata parser will ignore = characters that appear in the value
         * portion of the metadata key/value pair.
         */
        sprintf(m, "h=%s\n", header_hints);
    }
    free(header_hints);

    return;
}

725
static void darshan_get_shared_record_ids(struct darshan_core_runtime *job,
726
    darshan_record_id *shared_recs)
727
728
729
730
731
732
733
734
735
736
737
738
{
    int i;
    int ndx;
    struct darshan_core_record_ref *ref, *tmp;
    darshan_record_id id_array[DARSHAN_CORE_MAX_RECORDS] = {0};
    darshan_record_id mask_array[DARSHAN_CORE_MAX_RECORDS] = {0};
    darshan_record_id all_mask_array[DARSHAN_CORE_MAX_RECORDS] = {0};

    /* first, determine list of records root process has opened */
    if(my_rank == 0)
    {
        ndx = 0;
739
        HASH_ITER(hlink, job->rec_hash, ref, tmp)
740
        {
741
            id_array[ndx++] = ref->rec.id;           
742
743
744
745
        }
    }

    /* broadcast root's list of records to all other processes */
746
    DARSHAN_MPI_CALL(PMPI_Bcast)(id_array,
747
748
749
750
751
752
        (DARSHAN_CORE_MAX_RECORDS * sizeof(darshan_record_id)),
        MPI_BYTE, 0, MPI_COMM_WORLD);

    /* everyone looks to see if they opened the same records as root */
    for(i=0; (i<DARSHAN_CORE_MAX_RECORDS && id_array[i] != 0); i++)
    {
753
        HASH_ITER(hlink, job->rec_hash, ref, tmp)
754
        {
755
            if(id_array[i] == ref->rec.id)
756
757
758
759
760
761
762
763
764
            {
                /* we opened that record too */
                mask_array[i] = 1;
                break;
            }
        }
    }

    /* now allreduce so everyone agrees which files are shared */
765
    DARSHAN_MPI_CALL(PMPI_Allreduce)(mask_array, all_mask_array,
766
767
768
769
770
771
772
773
774
775
776
        DARSHAN_CORE_MAX_RECORDS, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

    ndx = 0;
    for(i=0; (i<DARSHAN_CORE_MAX_RECORDS && id_array[i] != 0); i++)
    {
        if(all_mask_array[i] != 0)
        {
            shared_recs[ndx++] = id_array[i];
        }
    }

777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
    return;
}

static int darshan_log_coll_open(char *logfile_name, MPI_File *log_fh)
{
    char *hints;
    char *tok_str;
    char *orig_tok_str;
    char *key;
    char *value;
    char *saveptr = NULL;
    int ret;
    MPI_Info info;

    /* check environment variable to see if the default MPI file hints have
     * been overridden
     */
    MPI_Info_create(&info);

    hints = getenv(CP_LOG_HINTS_OVERRIDE);
    if(!hints)
    {
        hints = __CP_LOG_HINTS;
    }

    if(hints && strlen(hints) > 0)
    {
        tok_str = strdup(hints);
        if(tok_str)
        {
            orig_tok_str = tok_str;
            do
            {
                /* split string on semicolon */
                key = strtok_r(tok_str, ";", &saveptr);
                if(key)
                {
                    tok_str = NULL;
                    /* look for = sign splitting key/value pairs */
                    value = index(key, '=');
                    if(value)
                    {
                        /* break key and value into separate null terminated strings */
                        value[0] = '\0';
                        value++;
                        if(strlen(key) > 0)
                            MPI_Info_set(info, key, value);
                    }
                }
            }while(key != NULL);
            free(orig_tok_str);
        }
    }

    /* open the darshan log file for writing */
    ret = DARSHAN_MPI_CALL(PMPI_File_open)(MPI_COMM_WORLD, logfile_name,
        MPI_MODE_CREATE | MPI_MODE_WRONLY | MPI_MODE_EXCL, info, log_fh);
    if(ret < 0)
        return(-1);

    MPI_Info_free(&info);
Shane Snyder's avatar
Shane Snyder committed
838
839
840
    return(0);
}

841
842
843
/* NOTE: the map written to file may contain duplicate id->name entries if a
 *       record is opened by multiple ranks, but not all ranks
 */
844
static int darshan_log_write_record_hash(MPI_File log_fh, struct darshan_core_record_ref *rec_hash,
845
    darshan_record_id *shared_recs, struct darshan_log_map *map)
846
{
847
848
849
850
851
    int i;
    int ret;
    struct darshan_core_record_ref *ref, *tmp;
    uint32_t name_len;
    size_t record_sz;
852
853
854
    size_t hash_buf_sz = 0;
    unsigned char *hash_buf;
    unsigned char *hash_buf_off;
855
856
    MPI_Status status;

857
    /* non-root ranks (rank > 0) remove shared records from their map --
858
859
860
861
862
863
     * these records will be written by rank 0
     */
    if(my_rank > 0)
    {
        for(i=0; (i<DARSHAN_CORE_MAX_RECORDS && shared_recs[i]); i++)
        {
864
            HASH_FIND(hlink, rec_hash, &shared_recs[i], sizeof(darshan_record_id), ref);
865
            assert(ref); /* this id had better be in the hash ... */
866
            HASH_DELETE(hlink, rec_hash, ref);
867
868
869
870
871
872
873
            if(ref->rec.name) free(ref->rec.name);
            free(ref);
        }
    }

    /* allocate a buffer to store at most 64 bytes for each of a max number of records */
    /* NOTE: this buffer may be reallocated if estimate is too small */
874
875
876
    hash_buf_sz = DARSHAN_CORE_MAX_RECORDS * 64;
    hash_buf = malloc(hash_buf_sz);
    if(!hash_buf)
877
    {
Shane Snyder's avatar
Shane Snyder committed
878
        return(-1);
879
880
    }

Shane Snyder's avatar
Shane Snyder committed
881
    /* serialize the record map into a buffer for writing */
882
883
    hash_buf_off = hash_buf;
    HASH_ITER(hlink, rec_hash, ref, tmp)
884
885
886
887
    {
        name_len = strlen(ref->rec.name);
        record_sz = sizeof(darshan_record_id) + sizeof(int) + name_len;
        /* make sure there is room in the buffer for this record */
888
        if((hash_buf_off + record_sz) > (hash_buf + hash_buf_sz))
889
890
891
892
893
        {
            unsigned char *tmp_buf;
            size_t old_buf_sz;

            /* if no room, reallocate the map buffer at twice the current size */
894
895
896
            old_buf_sz = hash_buf_off - hash_buf;
            hash_buf_sz *= 2;
            tmp_buf = malloc(hash_buf_sz);
897
898
            if(!tmp_buf)
            {
899
                free(hash_buf);
Shane Snyder's avatar
Shane Snyder committed
900
                return(-1);
901
902
            }

903
904
905
906
            memcpy(tmp_buf, hash_buf, old_buf_sz);
            free(hash_buf);
            hash_buf = tmp_buf;
            hash_buf_off = hash_buf + old_buf_sz;
907
908
909
910
911
912
        }

        /* now serialize the record into the map buffer.
         * NOTE: darshan record map serialization method: 
         *          ... darshan_record_id | (uint32_t) path_len | path ...
         */
913
914
915
916
917
918
        *((darshan_record_id *)hash_buf_off) = ref->rec.id;
        hash_buf_off += sizeof(darshan_record_id);
        *((uint32_t *)hash_buf_off) = name_len;
        hash_buf_off += sizeof(uint32_t);
        memcpy(hash_buf_off, ref->rec.name, name_len);
        hash_buf_off += name_len;
919
920
921
    }

    /* collectively write out the record map to the darshan log */
922
    if(hash_buf_off > hash_buf)
923
924
    {
        /* we have records to contribute to the collective write of the record map */
925
        ret = darshan_log_coll_write(log_fh, hash_buf, (hash_buf_off-hash_buf), map);
926
927
928
929
    }
    else
    {
        /* we have no data to write, but participate in the collective anyway */
930
        ret = darshan_log_coll_write(log_fh, NULL, 0, map);
931
932
    }

933
    free(hash_buf);
934

Shane Snyder's avatar
Shane Snyder committed
935
936
937
938
939
940
    if(ret < 0)
        return(-1);

    return(0);
}

941
942
943
944
945
946
/* NOTE: The in/out param 'map' is only valid on rank 0 and is used
 *       to provide the starting offset of this collective write and
 *       to store the aggregate size of this write upon completion.
 *       This implies ONLY rank 0 can specify the starting offset
 *       and that only rank 0 knows the ending log file offset upon
 *       return from this function (starting off + aggregate size).
Shane Snyder's avatar
Shane Snyder committed
947
 */
948
949
static int darshan_log_coll_write(MPI_File log_fh, void *buf, int count,
    struct darshan_log_map *map)
Shane Snyder's avatar
Shane Snyder committed
950
951
952
953
954
{
    MPI_Offset send_off, my_off;
    MPI_Status status;
    int ret;

955
    /* figure out where everyone is writing using scan */
Shane Snyder's avatar
Shane Snyder committed
956
957
    send_off = count;
    if(my_rank == 0)
958
959
960
    {
        send_off += map->off; /* rank 0 knows the beginning offset */
    }
Shane Snyder's avatar
Shane Snyder committed
961
962
963
964
965
966
967
968
969
970
971
972

    DARSHAN_MPI_CALL(PMPI_Scan)(&send_off, &my_off, 1, MPI_OFFSET,
        MPI_SUM, MPI_COMM_WORLD);
    /* scan in inclusive; subtract local size back out */
    my_off -= count;

    /* perform the collective write */
    ret = DARSHAN_MPI_CALL(PMPI_File_write_at_all)(log_fh, my_off, buf,
        count, MPI_BYTE, &status);
    if(ret < 0)
        return(-1);

973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
    /* send the ending offset from rank (n-1) to rank 0 */
    if(nprocs > 1)
    {
        if(my_rank == (nprocs-1))
        {
            my_off += count;
            DARSHAN_MPI_CALL(PMPI_Send)(&my_off, 1, MPI_OFFSET, 0, 0,
                MPI_COMM_WORLD);
        }
        else if(my_rank == 0)
        {
            DARSHAN_MPI_CALL(PMPI_Recv)(&my_off, 1, MPI_OFFSET, (nprocs-1), 0,
                MPI_COMM_WORLD, &status);

            map->len = my_off - map->off;
        }
    }
    else
    {
        map->len = my_off + count - map->off;
    }
Shane Snyder's avatar
Shane Snyder committed
994
995

    return(0);
996
997
}

998
/* ********************************************************* */
999
1000

void darshan_core_register_module(
For faster browsing, not all history is shown. View entire blame