darshan-core.c 31.1 KB
Newer Older
1 2 3 4 5
/*
 *  (C) 2009 by Argonne National Laboratory.
 *      See COPYRIGHT in top-level directory.
 */

6
#define _GNU_SOURCE
7

8 9 10 11 12 13
#include "darshan-runtime-config.h"

#include <stdio.h>
#ifdef HAVE_MNTENT_H
#include <mntent.h>
#endif
14 15 16 17 18
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <limits.h>
#include <pthread.h>
19 20 21 22
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <mpi.h>
23
#include <assert.h>
24

25
#include "uthash.h"
26 27
#include "darshan-core.h"

28 29
/* TODO is __progname_full needed here */
extern char* __progname;
30

31
/* internal variable delcarations */
32 33
static struct darshan_core_runtime *darshan_core_job = NULL;
static pthread_mutex_t darshan_core_mutex = PTHREAD_MUTEX_INITIALIZER;
34
static int my_rank = -1;
35
static int nprocs = -1;
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
/* prototypes for internal helper functions */
static void darshan_core_initialize(
    int *argc, char ***argv);
static void darshan_core_shutdown(
    void);
static void darshan_core_cleanup(
    struct darshan_core_runtime* job);
static void darshan_get_logfile_name(
    char* logfile_name, int jobid, struct tm* start_tm);
static void darshan_log_record_hints_and_ver(
    struct darshan_core_runtime* job);
static int darshan_get_shared_record_ids(
    struct darshan_core_runtime *job, darshan_record_id *shared_recs);
static int darshan_write_record_map(
    struct darshan_core_runtime *job, MPI_File log_fh, darshan_record_id *share_recs);
52

53 54
#define DARSHAN_CORE_LOCK() pthread_mutex_lock(&darshan_core_mutex)
#define DARSHAN_CORE_UNLOCK() pthread_mutex_unlock(&darshan_core_mutex)
55

56
/* intercept MPI initialize and finalize to manage darshan core runtime */
57 58 59 60 61 62 63 64 65 66
int MPI_Init(int *argc, char ***argv)
{
    int ret;

    ret = DARSHAN_MPI_CALL(PMPI_Init)(argc, argv);
    if(ret != MPI_SUCCESS)
    {
        return(ret);
    }

67
    darshan_core_initialize(argc, argv);
68 69 70 71

    return(ret);
}

72
int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
73 74 75 76
{
    int ret;

    ret = DARSHAN_MPI_CALL(PMPI_Init_thread)(argc, argv, required, provided);
77
    if(ret != MPI_SUCCESS)
78 79 80 81
    {
        return(ret);
    }

82
    darshan_core_initialize(argc, argv);
83 84 85 86 87 88 89 90

    return(ret);
}

int MPI_Finalize(void)
{
    int ret;

91
    darshan_core_shutdown();
92 93 94 95 96

    ret = DARSHAN_MPI_CALL(PMPI_Finalize)();
    return(ret);
}

97 98
/* *********************************** */

99
static void darshan_core_initialize(int *argc, char ***argv)
100 101 102 103 104 105 106 107 108
{
    int i;
    int internal_timing_flag = 0;
    double init_start, init_time, init_max;
    char* truncate_string = "<TRUNCATED>";
    int truncate_offset;
    int chars_left = 0;

    DARSHAN_MPI_CALL(PMPI_Comm_size)(MPI_COMM_WORLD, &nprocs);
109
    DARSHAN_MPI_CALL(PMPI_Comm_rank)(MPI_COMM_WORLD, &my_rank);
110 111 112 113

    if(getenv("DARSHAN_INTERNAL_TIMING"))
        internal_timing_flag = 1;

114
    if(internal_timing_flag)
115 116 117
        init_start = DARSHAN_MPI_CALL(PMPI_Wtime)();

    /* setup darshan runtime if darshan is enabled and hasn't been initialized already */
118
    if(!getenv("DARSHAN_DISABLE") && !darshan_core_job)
119
    {
120 121 122
        /* allocate structure to track darshan_core_job information */
        darshan_core_job = malloc(sizeof(*darshan_core_job));
        if(darshan_core_job)
123
        {
124
            memset(darshan_core_job, 0, sizeof(*darshan_core_job));
125

126 127 128 129
            darshan_core_job->log_job.uid = getuid();
            darshan_core_job->log_job.start_time = time(NULL);
            darshan_core_job->log_job.nprocs = nprocs;
            darshan_core_job->wtime_offset = DARSHAN_MPI_CALL(PMPI_Wtime)();
130 131 132 133

            /* record exe and arguments */
            for(i=0; i<(*argc); i++)
            {
134 135
                chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
                strncat(darshan_core_job->exe, *(argv[i]), chars_left);
136 137
                if(i < ((*argc)-1))
                {
138 139
                    chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
                    strncat(darshan_core_job->exe, " ", chars_left);
140 141 142 143 144 145 146 147
                }
            }

            /* if we don't see any arguments, then use glibc symbol to get
             * program name at least (this happens in fortran)
             */
            if(argc == 0)
            {
148
                chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
149
                strncat(darshan_core_job->exe, __progname, chars_left);
150 151
                chars_left = CP_EXE_LEN-strlen(darshan_core_job->exe);
                strncat(darshan_core_job->exe, " <unknown args>", chars_left);
152 153 154 155 156 157
            }

            if(chars_left == 0)
            {
                /* we ran out of room; mark that string was truncated */
                truncate_offset = CP_EXE_LEN - strlen(truncate_string);
158
                sprintf(&darshan_core_job->exe[truncate_offset], "%s",
159 160 161 162 163 164 165 166 167 168
                    truncate_string);
            }
        }
    }

    if(internal_timing_flag)
    {
        init_time = DARSHAN_MPI_CALL(PMPI_Wtime)() - init_start;
        DARSHAN_MPI_CALL(PMPI_Reduce)(&init_time, &init_max, 1,
            MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
169
        if(my_rank == 0)
170 171 172 173 174 175 176 177 178
        {
            printf("#darshan:<op>\t<nprocs>\t<time>\n");
            printf("darshan:init\t%d\t%f\n", nprocs, init_max);
        }
    }

    return;
}

179
static void darshan_core_shutdown()
180
{
181
    int i;
182
    char *logfile_name;
183
    struct darshan_core_runtime *final_job;
184
    struct darshan_core_module *mod, *tmp;
185
    int internal_timing_flag = 0;
186 187
    char *envjobid;
    char *jobid_str;
188
    int jobid;
189
    struct tm *start_tm;
190
    time_t start_time_tmp;
191 192
    int ret = 0;
    int all_ret = 0;
193 194
    int64_t first_start_time;
    int64_t last_end_time;
195 196
    int local_mod_use[DARSHAN_MAX_MODS] = {0};
    int global_mod_use_count[DARSHAN_MAX_MODS] = {0};
197 198 199 200 201 202 203 204 205
    darshan_record_id shared_recs[DARSHAN_CORE_MAX_RECORDS] = {0};
    char *key;
    char *value;
    char *hints;
    char *tok_str;
    char *orig_tok_str;
    char *saveptr = NULL;
    char *mod_index;
    char *new_logfile_name;
206 207 208 209 210 211
    double start_log_time;
    double end_log_time;
    long offset;
    MPI_File log_fh;
    MPI_Info info;
    MPI_Status status;
212 213 214 215

    if(getenv("DARSHAN_INTERNAL_TIMING"))
        internal_timing_flag = 1;

216
    DARSHAN_CORE_LOCK();
217 218
    if(!darshan_core_job)
    {
219
        DARSHAN_CORE_UNLOCK();
220 221 222 223 224 225 226
        return;
    }
    /* disable further tracing while hanging onto the data so that we can
     * write it out
     */
    final_job = darshan_core_job;
    darshan_core_job = NULL;
227
    DARSHAN_CORE_UNLOCK();
228

229 230
    start_log_time = DARSHAN_MPI_CALL(PMPI_Wtime)();

231 232 233 234 235 236 237
    logfile_name = malloc(PATH_MAX);
    if(!logfile_name)
    {
        darshan_core_cleanup(final_job);
        return;
    }

238
    /* set darshan job id/metadata and constuct log file name on rank 0 */
239
    if(my_rank == 0)
240 241 242
    {
        /* Use CP_JOBID_OVERRIDE for the env var or CP_JOBID */
        envjobid = getenv(CP_JOBID_OVERRIDE);
243
        if(!envjobid)
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
        {
            envjobid = CP_JOBID;
        }

        jobid_str = getenv(envjobid);
        if(jobid_str)
        {
            /* in cobalt we can find it in env var */
            ret = sscanf(jobid_str, "%d", &jobid);
        }
        if(!jobid_str || ret != 1)
        {
            /* use pid as fall back */
            jobid = getpid();
        }

260
        final_job->log_job.jobid = (int64_t)jobid;
261

262 263 264 265 266
        /* if we are using any hints to write the log file, then record those
         * hints in the log file header
         */
        darshan_log_record_hints_and_ver(final_job);

267 268 269
        /* use human readable start time format in log filename */
        start_time_tmp = final_job->log_job.start_time;
        start_tm = localtime(&start_time_tmp);
270

271 272
        /* construct log file name */
        darshan_get_logfile_name(logfile_name, jobid, start_tm);
273 274 275 276 277 278 279 280 281
    }

    /* broadcast log file name */
    DARSHAN_MPI_CALL(PMPI_Bcast)(logfile_name, PATH_MAX, MPI_CHAR, 0,
        MPI_COMM_WORLD);

    if(strlen(logfile_name) == 0)
    {
        /* failed to generate log file name */
282
        free(logfile_name);
283 284 285 286 287 288
        darshan_core_cleanup(final_job);
        return;
    }

    final_job->log_job.end_time = time(NULL);

289 290 291 292 293 294 295 296 297 298
    /* reduce to report first start time and last end time across all ranks
     * at rank 0
     */
    DARSHAN_MPI_CALL(PMPI_Reduce)(&final_job->log_job.start_time, &first_start_time, 1, MPI_LONG_LONG, MPI_MIN, 0, MPI_COMM_WORLD);
    DARSHAN_MPI_CALL(PMPI_Reduce)(&final_job->log_job.end_time, &last_end_time, 1, MPI_LONG_LONG, MPI_MAX, 0, MPI_COMM_WORLD);
    if(my_rank == 0)
    {
        final_job->log_job.start_time = first_start_time;
        final_job->log_job.end_time = last_end_time;
    }
299

300 301 302 303 304 305 306 307 308 309
    /* set which local modules were actually used */
    for(i = 0; i < DARSHAN_MAX_MODS; i++)
    {
        if(final_job->mod_array[i])
            local_mod_use[i] = 1;
    }

    /* reduce the number of times a module was opened globally and bcast to everyone */   
    DARSHAN_MPI_CALL(PMPI_Allreduce)(local_mod_use, global_mod_use_count, DARSHAN_MAX_MODS, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

310 311 312
    /* check environment variable to see if the default MPI file hints have
     * been overridden
     */
313 314
    MPI_Info_create(&info);

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
    hints = getenv(CP_LOG_HINTS_OVERRIDE);
    if(!hints)
    {
        hints = __CP_LOG_HINTS;
    }

    if(hints && strlen(hints) > 0)
    {
        tok_str = strdup(hints);
        if(tok_str)
        {
            orig_tok_str = tok_str;
            do
            {
                /* split string on semicolon */
                key = strtok_r(tok_str, ";", &saveptr);
                if(key)
                {
                    tok_str = NULL;
                    /* look for = sign splitting key/value pairs */
                    value = index(key, '=');
                    if(value)
                    {
                        /* break key and value into separate null terminated strings */
                        value[0] = '\0';
                        value++;
                        if(strlen(key) > 0)
                            MPI_Info_set(info, key, value);
                    }
                }
            }while(key != NULL);
            free(orig_tok_str);
        }
    }

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
    /* get a list of records which are shared across all processes */
    /* TODO: do we store rank with the name map? */
    ret = darshan_get_shared_record_ids(final_job, shared_recs);

    /* error out if unable to determine shared file records */
    DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
        MPI_LOR, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        if(my_rank == 0)
        {
            fprintf(stderr, "darshan library warning: unable to determine shared file records\n");
        }
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;

    }

    /* TODO: ensuing error checking...does MPI ensure collective I/O functions return the same error
     * globally, or do I always need to allreduce????? */

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
    /* open the darshan log file for writing */
    ret = DARSHAN_MPI_CALL(PMPI_File_open)(MPI_COMM_WORLD, logfile_name,
        MPI_MODE_CREATE | MPI_MODE_WRONLY | MPI_MODE_EXCL, info, &log_fh);
    MPI_Info_free(&info);

    /* error out if unable to open log file */
    DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
        MPI_LOR, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        if(my_rank == 0)
        {
            int msg_len;
            char msg[MPI_MAX_ERROR_STRING] = {0};

            MPI_Error_string(ret, msg, &msg_len);
            fprintf(stderr, "darshan library warning: unable to open log file %s: %s\n",
389
                logfile_name, msg);
390 391 392 393 394 395 396
            unlink(logfile_name);
        }
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
    }

397 398 399 400 401 402
    /* reserve space at beginning of darshan log for uncompressed header using seek */
    /* NOTE: the header includes the the darshan job struct and the module indices map */
    MPI_Offset header_end = sizeof(struct darshan_job);
    /* header_end += (); TODO: how much do i leave for the indices map? */
    ret = DARSHAN_MPI_CALL(PMPI_File_seek)(log_fh, header_end, MPI_SEEK_SET);
    if(ret != MPI_SUCCESS)
403
    {
404
        if(my_rank == 0)
405 406 407 408 409
        {
            int msg_len;
            char msg[MPI_MAX_ERROR_STRING] = {0};

            MPI_Error_string(ret, msg, &msg_len);
410
            fprintf(stderr, "darshan library warning: unable to seek in log file %s: %s\n",
411
                logfile_name, msg);
412 413
            unlink(logfile_name);
        }
414 415 416
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
417 418
    }

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
    /* write the record name->id map to the log file */
    ret = darshan_write_record_map(final_job, log_fh, shared_recs);

    DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
        MPI_LOR, MPI_COMM_WORLD);
    if(all_ret != 0)
    {
        if(my_rank == 0)
        {
            fprintf(stderr, "darshan library warning: unable to write record map to log file %s\n",
                logfile_name);
        }
        free(logfile_name);
        darshan_core_cleanup(final_job);
        return;
    }
435 436 437

    /* loop over globally used darshan modules and:
     *      - get final output buffer
438
     *      - compress (zlib) provided output buffer
439 440 441
     *      - write compressed buffer to log file
     *      - shutdown the module
     */
442
    for(i = 0; i < DARSHAN_MAX_MODS; i++)
443
    {
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
        struct darshan_core_module* this_mod = final_job->mod_array[i];
        MPI_Comm mod_comm;
        void* mod_buf = NULL;
        int mod_buf_size = 0;
        void* comp_buf = NULL;
        long comp_buf_size = 0;
        long scan_offset = 0;

        if(!global_mod_use_count[i])
            continue;

        /* create a communicator to use for shutting down the module */
        if(global_mod_use_count[i] == nprocs)
        {
            MPI_Comm_dup(MPI_COMM_WORLD, &mod_comm);
        }
        else
        {
            MPI_Comm_split(MPI_COMM_WORLD, local_mod_use[i], 0, &mod_comm);
        }
464

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
        /* if module is registered locally, get the corresponding output buffer */
        if(local_mod_use[i])
        {
            /* get output buffer from module */
            this_mod->mod_funcs.get_output_data(mod_comm, &mod_buf, &mod_buf_size);
        }

        if(mod_buf_size > 0)
        {
            /* TODO generic compression */
            comp_buf = mod_buf;
            comp_buf_size = mod_buf_size;
        }

        /* get current file size on rank 0 so we can calculate offset correctly */
        scan_offset = comp_buf_size;
        if(my_rank == 0)
        {
            MPI_Offset tmp_off;
            
            ret = MPI_File_get_size(log_fh, &tmp_off);
            if(ret != MPI_SUCCESS)
            {
                int msg_len;
                char msg[MPI_MAX_ERROR_STRING] = {0};

                MPI_Error_string(ret, msg, &msg_len);
                fprintf(stderr, "darshan library warning: unable to write module data to log file %s: %s\n",
                        logfile_name, msg);
                DARSHAN_MPI_CALL(PMPI_File_close)(&log_fh);
                unlink(logfile_name);
                free(logfile_name);
                darshan_core_cleanup(final_job);
                return;
            }
            scan_offset += tmp_off;
        }

        /* figure out everyone's offset using scan */
        DARSHAN_MPI_CALL(PMPI_Scan)(&scan_offset, &offset, 1, MPI_LONG, MPI_SUM, MPI_COMM_WORLD);
        offset -= comp_buf_size;

        /* collectively write out each rank's contributing data (maybe nothing) */
        ret = DARSHAN_MPI_CALL(PMPI_File_write_at_all)(log_fh, offset, comp_buf,
            comp_buf_size, MPI_BYTE, &status);

        /* error out if unable to write */
        DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
            MPI_LOR, MPI_COMM_WORLD);
        if(all_ret != 0)
        {
            DARSHAN_MPI_CALL(PMPI_File_close)(&log_fh);
            if(my_rank == 0)
            {
                int msg_len;
                char msg[MPI_MAX_ERROR_STRING] = {0};

                MPI_Error_string(ret, msg, &msg_len);
                fprintf(stderr, "darshan library warning: unable to write module data to log file %s: %s\n",
                        logfile_name, msg);
                unlink(logfile_name);
            }
            free(logfile_name);
            darshan_core_cleanup(final_job);
            return;
        }

        /* shutdown module if registered locally */
        if(local_mod_use[i])
        {
            this_mod->mod_funcs.shutdown();
            this_mod = NULL;
        }

        MPI_Comm_free(&mod_comm);
    }

542 543 544
    /* TODO: is this still right? -- write the job info on rank 0 */
    if(my_rank == 0)
    {
545
        /* TODO: we want to send log_job, and offsets map */
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
        ret = DARSHAN_MPI_CALL(PMPI_File_write_at)(log_fh, 0, &(final_job->log_job),
            sizeof(struct darshan_job), MPI_BYTE, &status);
        if(ret != MPI_SUCCESS)
        {
            int msg_len;
            char msg[MPI_MAX_ERROR_STRING] = {0};

            MPI_Error_string(ret, msg, &msg_len);
            fprintf(stderr, "darshan library warning: unable to write job data to log file %s: %s\n",
                    logfile_name, msg);
            unlink(logfile_name);
            free(logfile_name);
            darshan_core_cleanup(final_job);
            return;
        }
    }

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
    DARSHAN_MPI_CALL(PMPI_File_close)(&log_fh);

    /* if we got this far, there are no errors, so rename from *.darshan_partial
     * to *-<logwritetime>.darshan.gz, which indicates that this log file is
     * complete and ready for analysis
     */
    new_logfile_name = malloc(PATH_MAX);
    if(new_logfile_name)
    {
        new_logfile_name[0] = '\0';
        end_log_time = DARSHAN_MPI_CALL(PMPI_Wtime)();
        strcat(new_logfile_name, logfile_name);
        mod_index = strstr(new_logfile_name, ".darshan_partial");
        sprintf(mod_index, "_%d.darshan.gz", (int)(end_log_time-start_log_time+1));
        rename(logfile_name, new_logfile_name);
        /* set permissions on log file */
#ifdef __CP_GROUP_READABLE_LOGS
        chmod(new_logfile_name, (S_IRUSR|S_IRGRP));
#else
        chmod(new_logfile_name, (S_IRUSR));
#endif
        free(new_logfile_name);
585
    }
586 587

    darshan_core_cleanup(final_job);
588
    free(logfile_name);
589

590
    if(internal_timing_flag)
591
    {
592 593 594 595 596
        /* TODO: what do we want to time in new darshan version? */
    }
    
    return;
}
597

598
/* free darshan core data structures to shutdown */
599
static void darshan_core_cleanup(struct darshan_core_runtime* job)
600
{
601
    int i;
602

603
    for(i = 0; i < DARSHAN_MAX_MODS; i++)
604
    {
605 606 607 608 609
        if(job->mod_array[i])
        {        
            free(job->mod_array[i]);
            job->mod_array[i] = NULL;
        }
610
    }
611

612
    free(job);
613 614 615 616

    return;
}

617
/* construct the darshan log file name */
618
static void darshan_get_logfile_name(char* logfile_name, int jobid, struct tm* start_tm)
619
{
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
    char* logpath;
    char* logname_string;
    char* logpath_override = NULL;
#ifdef __CP_LOG_ENV
    char env_check[256];
    char* env_tok;
#endif
    uint64_t hlevel;
    char hname[HOST_NAME_MAX];
    uint64_t logmod;
    char cuser[L_cuserid] = {0};
    int ret;

    /* Use CP_LOG_PATH_OVERRIDE for the value or __CP_LOG_PATH */
    logpath = getenv(CP_LOG_PATH_OVERRIDE);
    if(!logpath)
    {
#ifdef __CP_LOG_PATH
        logpath = __CP_LOG_PATH;
#endif
    }

    /* get the username for this job.  In order we will try each of the
     * following until one of them succeeds:
     *
     * - cuserid()
     * - getenv("LOGNAME")
     * - snprintf(..., geteuid());
     *
     * Note that we do not use getpwuid() because it generally will not
     * work in statically compiled binaries.
     */

#ifndef DARSHAN_DISABLE_CUSERID
    cuserid(cuser);
#endif

    /* if cuserid() didn't work, then check the environment */
    if(strcmp(cuser, "") == 0)
    {
        logname_string = getenv("LOGNAME");
        if(logname_string)
        {
            strncpy(cuser, logname_string, (L_cuserid-1));
        }

    }

    /* if cuserid() and environment both fail, then fall back to uid */
    if(strcmp(cuser, "") == 0)
    {
        uid_t uid = geteuid();
        snprintf(cuser, sizeof(cuser), "%u", uid);
    }

    /* generate a random number to help differentiate the log */
    hlevel=DARSHAN_MPI_CALL(PMPI_Wtime)() * 1000000;
    (void)gethostname(hname, sizeof(hname));
    logmod = darshan_hash((void*)hname,strlen(hname),hlevel);

    /* see if darshan was configured using the --with-logpath-by-env
     * argument, which allows the user to specify an absolute path to
     * place logs via an env variable.
     */
#ifdef __CP_LOG_ENV
    /* just silently skip if the environment variable list is too big */
    if(strlen(__CP_LOG_ENV) < 256)
    {
        /* copy env variable list to a temporary buffer */
        strcpy(env_check, __CP_LOG_ENV);
        /* tokenize the comma-separated list */
        env_tok = strtok(env_check, ",");
        if(env_tok)
        {
            do
            {
                /* check each env variable in order */
                logpath_override = getenv(env_tok);
                if(logpath_override)
                {
                    /* stop as soon as we find a match */
                    break;
                }
            }while((env_tok = strtok(NULL, ",")));
        }
    }
#endif

    if(logpath_override)
    {
        ret = snprintf(logfile_name, PATH_MAX,
            "%s/%s_%s_id%d_%d-%d-%d-%" PRIu64 ".darshan_partial",
            logpath_override,
            cuser, __progname, jobid,
            (start_tm->tm_mon+1),
            start_tm->tm_mday,
            (start_tm->tm_hour*60*60 + start_tm->tm_min*60 + start_tm->tm_sec),
            logmod);
        if(ret == (PATH_MAX-1))
        {
            /* file name was too big; squish it down */
            snprintf(logfile_name, PATH_MAX,
                "%s/id%d.darshan_partial",
                logpath_override, jobid);
        }
    }
    else if(logpath)
    {
        ret = snprintf(logfile_name, PATH_MAX,
            "%s/%d/%d/%d/%s_%s_id%d_%d-%d-%d-%" PRIu64 ".darshan_partial",
            logpath, (start_tm->tm_year+1900),
            (start_tm->tm_mon+1), start_tm->tm_mday,
            cuser, __progname, jobid,
            (start_tm->tm_mon+1),
            start_tm->tm_mday,
            (start_tm->tm_hour*60*60 + start_tm->tm_min*60 + start_tm->tm_sec),
            logmod);
        if(ret == (PATH_MAX-1))
        {
            /* file name was too big; squish it down */
            snprintf(logfile_name, PATH_MAX,
                "%s/id%d.darshan_partial",
                logpath, jobid);
        }
    }
    else
    {
        logfile_name[0] = '\0';
    }

    return;
751 752
}

753
/* record any hints used to write the darshan log in the log header */
754
static void darshan_log_record_hints_and_ver(struct darshan_core_runtime* job)
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
{
    char* hints;
    char* header_hints;
    int meta_remain = 0;
    char* m;

    /* check environment variable to see if the default MPI file hints have
     * been overridden
     */
    hints = getenv(CP_LOG_HINTS_OVERRIDE);
    if(!hints)
    {
        hints = __CP_LOG_HINTS;
    }

    if(!hints || strlen(hints) < 1)
        return;

    header_hints = strdup(hints);
    if(!header_hints)
        return;

    meta_remain = DARSHAN_JOB_METADATA_LEN -
        strlen(job->log_job.metadata) - 1;
    if(meta_remain >= (strlen(PACKAGE_VERSION) + 9))
    {
        sprintf(job->log_job.metadata, "lib_ver=%s\n", PACKAGE_VERSION);
        meta_remain -= (strlen(PACKAGE_VERSION) + 9);
    }
    if(meta_remain >= (3 + strlen(header_hints)))
    {
        m = job->log_job.metadata + strlen(job->log_job.metadata);
        /* We have room to store the hints in the metadata portion of
         * the job header.  We just prepend an h= to the hints list.  The
         * metadata parser will ignore = characters that appear in the value
         * portion of the metadata key/value pair.
         */
        sprintf(m, "h=%s\n", header_hints);
    }
    free(header_hints);

    return;
}

799 800
static int darshan_get_shared_record_ids(struct darshan_core_runtime *job,
    darshan_record_id *shared_recs)
801 802 803 804 805 806 807 808 809 810 811 812 813
{
    int i;
    int ndx;
    int ret;
    struct darshan_core_record_ref *ref, *tmp;
    darshan_record_id id_array[DARSHAN_CORE_MAX_RECORDS] = {0};
    darshan_record_id mask_array[DARSHAN_CORE_MAX_RECORDS] = {0};
    darshan_record_id all_mask_array[DARSHAN_CORE_MAX_RECORDS] = {0};

    /* first, determine list of records root process has opened */
    if(my_rank == 0)
    {
        ndx = 0;
814
        HASH_ITER(hlink, job->rec_hash, ref, tmp)
815
        {
816
            id_array[ndx++] = ref->rec.id;           
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
        }
    }

    /* broadcast root's list of records to all other processes */
    ret = DARSHAN_MPI_CALL(PMPI_Bcast)(id_array,
        (DARSHAN_CORE_MAX_RECORDS * sizeof(darshan_record_id)),
        MPI_BYTE, 0, MPI_COMM_WORLD);
    if(ret != 0)
    {
        return -1;
    }

    /* everyone looks to see if they opened the same records as root */
    for(i=0; (i<DARSHAN_CORE_MAX_RECORDS && id_array[i] != 0); i++)
    {
832
        HASH_ITER(hlink, job->rec_hash, ref, tmp)
833
        {
834
            if(id_array[i] == ref->rec.id)
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
            {
                /* we opened that record too */
                mask_array[i] = 1;
                break;
            }
        }
    }

    /* now allreduce so everyone agrees which files are shared */
    ret = DARSHAN_MPI_CALL(PMPI_Allreduce)(mask_array, all_mask_array,
        DARSHAN_CORE_MAX_RECORDS, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
    if(ret != 0)
    {
        return -1;
    }

    ndx = 0;
    for(i=0; (i<DARSHAN_CORE_MAX_RECORDS && id_array[i] != 0); i++)
    {
        if(all_mask_array[i] != 0)
        {
            shared_recs[ndx++] = id_array[i];
        }
    }

    return 0;
}

863 864 865 866 867
/* NOTE: the map written to file may contain duplicate id->name entries if a
 *       record is opened by multiple ranks, but not all ranks
 */
static int darshan_write_record_map(struct darshan_core_runtime *job, MPI_File log_fh,
    darshan_record_id *shared_recs)
868
{
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
    int i;
    int ret;
    struct darshan_core_record_ref *ref, *tmp;
    uint32_t name_len;
    size_t record_sz;
    size_t map_buf_sz = 0;
    unsigned char *map_buf;
    unsigned char *map_buf_off;
    MPI_Status status;

    /* non-root ranks (rank 0) remove shared records from their map --
     * these records will be written by rank 0
     */
    if(my_rank > 0)
    {
        for(i=0; (i<DARSHAN_CORE_MAX_RECORDS && shared_recs[i]); i++)
        {
            HASH_FIND(hlink, job->rec_hash, &shared_recs[i], sizeof(darshan_record_id), ref);
            assert(ref); /* this id had better be in the hash ... */
            HASH_DELETE(hlink, job->rec_hash, ref);
            if(ref->rec.name) free(ref->rec.name);
            free(ref);
        }
    }

    /* allocate a buffer to store at most 64 bytes for each of a max number of records */
    /* NOTE: this buffer may be reallocated if estimate is too small */
    map_buf_sz = DARSHAN_CORE_MAX_RECORDS * 64;
    map_buf = malloc(map_buf_sz);
    if(!map_buf)
    {
        return -1;
    }

    map_buf_off = map_buf;
    HASH_ITER(hlink, job->rec_hash, ref, tmp)
    {
        name_len = strlen(ref->rec.name);
        record_sz = sizeof(darshan_record_id) + sizeof(int) + name_len;
        /* make sure there is room in the buffer for this record */
        if((map_buf_off + record_sz) > (map_buf + map_buf_sz))
        {
            unsigned char *tmp_buf;
            size_t old_buf_sz;

            /* if no room, reallocate the map buffer at twice the current size */
            old_buf_sz = map_buf_off - map_buf;
            map_buf_sz *= 2;
            tmp_buf = malloc(map_buf_sz);
            if(!tmp_buf)
            {
                free(map_buf);
                return -1;
            }

            memcpy(tmp_buf, map_buf, old_buf_sz);
            free(map_buf);
            map_buf = tmp_buf;
            map_buf_off = map_buf + old_buf_sz;
        }

        /* now serialize the record into the map buffer.
         * NOTE: darshan record map serialization method: 
         *          ... darshan_record_id | (uint32_t) path_len | path ...
         */
        *((darshan_record_id *)map_buf_off) = ref->rec.id;
        map_buf_off += sizeof(darshan_record_id);
        *((uint32_t *)map_buf_off) = name_len;
        map_buf_off += sizeof(uint32_t);
        memcpy(map_buf_off, ref->rec.name, name_len);
        map_buf_off += name_len;
    }

    /* collectively write out the record map to the darshan log */
    if(map_buf_off > map_buf)
    {
        /* we have records to contribute to the collective write of the record map */
        ret = DARSHAN_MPI_CALL(PMPI_File_write_all)(log_fh, map_buf, (map_buf_off - map_buf),
            MPI_BYTE, &status);
    }
    else
    {
        /* we have no data to write, but participate in the collective anyway */
        ret = DARSHAN_MPI_CALL(PMPI_File_write_all)(log_fh, NULL, 0,
            MPI_BYTE, &status);
    }
    if(ret != MPI_SUCCESS)
    {
        return -1;
    }

    free(map_buf);

    return 0;
963 964
}

965
/* ********************************************************* */
966 967

void darshan_core_register_module(
968
    darshan_module_id id,
969 970 971 972
    char *name,
    struct darshan_module_funcs *funcs,
    int *runtime_mem_limit)
{
973
    struct darshan_core_module* mod;
974

975
    DARSHAN_CORE_LOCK();
976

977
    *runtime_mem_limit = 0;
978
    if(!darshan_core_job || (id >= DARSHAN_MAX_MODS))
979
    {
980
        DARSHAN_CORE_UNLOCK();
981
        return;
982
    }
983 984

    /* see if this module is already registered */
985
    if(darshan_core_job->mod_array[id])
986
    {
987
        /* if module is already registered just return */
988
        /* NOTE: we do not recalculate memory limit here, just set to 0 */
989
        DARSHAN_CORE_UNLOCK();
990
        return;
991 992
    }

993
    /* this module has not been registered yet, allocate and initialize it */
994 995
    mod = malloc(sizeof(*mod));
    if(!mod)
996
    {
997
        DARSHAN_CORE_UNLOCK();
998 999
        return;
    }
1000
    memset(mod, 0, sizeof(*mod));
1001

1002
    mod->id = id;
1003 1004
    strncpy(mod->name, name, DARSHAN_MOD_NAME_LEN);
    mod->mod_funcs = *funcs;
1005 1006 1007

    /* register module with darshan */
    darshan_core_job->mod_array[id] = mod;
1008

1009 1010
    /* TODO: something smarter than just 2 MiB per module */
    *runtime_mem_limit = 2 * 1024 * 1024;
1011

1012
    DARSHAN_CORE_UNLOCK();
1013

1014 1015 1016
    return;
}

1017
void darshan_core_lookup_record_id(
1018 1019 1020
    void *name,
    int len,
    int printable_flag,
1021
    darshan_record_id *id)
1022
{
1023 1024
    darshan_record_id tmp_id;
    struct darshan_core_record_ref* ref;
1025

1026
    if(!darshan_core_job || !name)
1027 1028 1029 1030 1031 1032
        return;

    /* TODO: what do you do with printable flag? */

    /* hash the input name to get a unique id for this record */
    tmp_id = darshan_hash(name, len, 0);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
 
    DARSHAN_CORE_LOCK();

    /* check to see if we've already stored the id->name mapping for this record */
    HASH_FIND(hlink, darshan_core_job->rec_hash, &tmp_id, sizeof(darshan_record_id), ref);
    if(!ref)
    {
        /* if not, add this record to the hash */
        ref = malloc(sizeof(struct darshan_core_record_ref));
        if(ref)
        {
1044 1045 1046 1047
            ref->rec.id = tmp_id;
            ref->rec.name = malloc(strlen(name) + 1);
            if(ref->rec.name)
                strcpy(ref->rec.name, name);
1048

1049
            HASH_ADD(hlink, darshan_core_job->rec_hash, rec.id, sizeof(darshan_record_id), ref);
1050 1051 1052 1053
        }
    }   

    DARSHAN_CORE_UNLOCK();
1054 1055

    *id = tmp_id;
1056 1057 1058
    return;
}

1059 1060
double darshan_core_wtime()
{
1061
    if(!darshan_core_job)
1062 1063 1064 1065
    {
        return(0);
    }

1066
    return(DARSHAN_MPI_CALL(PMPI_Wtime)() - darshan_core_job->wtime_offset);
1067
}
1068 1069 1070 1071 1072 1073 1074 1075 1076

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ts=8 sts=4 sw=4 expandtab
 */