codes-dumpi-trace-nw-wrkld.c 35.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2014 University of Chicago
 * See COPYRIGHT notice in top-level directory.
 *
 */

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <ross.h>
#include <assert.h>
#include "dumpi/libundumpi/bindings.h"
#include "dumpi/libundumpi/libundumpi.h"
#include "codes/codes-workload.h"
#include "codes/quickhash.h"
17
#include "codes/codes-jobmap.h"
18
#include "codes/jenkins-hash.h"
19
#include "codes/model-net.h"
20

21 22
#if ENABLE_CORTEX
#include <cortex/cortex.h>
23
#include <cortex/datatype.h>
24 25
#include <cortex/cortex-mpich.h>
#ifdef ENABLE_CORTEX_PYTHON
26
#include <cortex/cortex-python.h>
27
#endif
28
#define PROFILE_TYPE cortex_dumpi_profile*
29
//#define UNDUMPI_OPEN cortex_undumpi_open
30 31 32 33
#define DUMPI_START_STREAM_READ cortex_dumpi_start_stream_read
#define UNDUMPI_CLOSE cortex_undumpi_close
#else
#define PROFILE_TYPE dumpi_profile*
34
//#define UNDUMPI_OPEN undumpi_open
35 36 37 38
#define DUMPI_START_STREAM_READ dumpi_start_stream_read
#define UNDUMPI_CLOSE undumpi_close
#endif

39
#define MAX_LENGTH_FILE 512
40 41
#define MAX_OPERATIONS 32768
#define DUMPI_IGNORE_DELAY 100
42
#define RANK_HASH_TABLE_SIZE 400
43

44 45 46
/* This variable is defined in src/network-workloads/model-net-mpi-replay.c */
extern struct codes_jobmap_ctx *jobmap_ctx; 

47 48 49
static struct qhash_table *rank_tbl = NULL;
static int rank_tbl_pop = 0;

50
static unsigned int max_threshold = INT_MAX;
51 52 53
/* context of the MPI workload */
typedef struct rank_mpi_context
{
54
    PROFILE_TYPE profile;
55
    int my_app_id;
56 57
    // whether we've seen an init op (needed for timing correctness)
    int is_init;
58
    int num_reqs;
59
    unsigned int num_ops;
60 61
    int64_t my_rank;
    double last_op_time;
62
    double init_time;
63 64
    void* dumpi_mpi_array;	
    struct qhash_head hash_link;
65 66
    
    struct rc_stack * completed_ctx;
67 68
} rank_mpi_context;

69 70 71 72 73 74
typedef struct rank_mpi_compare
{
    int app;
    int rank;
} rank_mpi_compare;

75 76 77 78 79 80 81 82
/* Holds all the data about MPI operations from the log */
typedef struct dumpi_op_data_array
{
	struct codes_workload_op* op_array;
        int64_t op_arr_ndx;
        int64_t op_arr_cnt;
} dumpi_op_data_array;

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
/* timing utilities */

#ifdef __GNUC__
__attribute__((unused))
#endif
static dumpi_clock timediff(
        dumpi_clock end,
        dumpi_clock start)
{
    dumpi_clock temp;
    if ((end.nsec-start.nsec)<0) {
        temp.sec = end.sec-start.sec-1;
        temp.nsec = 1000000000+end.nsec-start.nsec;
    } else {
        temp.sec = end.sec-start.sec;
        temp.nsec = end.nsec-start.nsec;
    }
    return temp;
}

103
/*static inline double time_to_ms_lf(dumpi_clock t){
104 105 106 107
        return (double) t.sec * 1e3 + (double) t.nsec / 1e6;
}
static inline double time_to_us_lf(dumpi_clock t){
        return (double) t.sec * 1e6 + (double) t.nsec / 1e3;
108
}*/
109 110 111
static inline double time_to_ns_lf(dumpi_clock t){
        return (double) t.sec * 1e9 + (double) t.nsec;
}
112
/*static int32_t get_unique_req_id(int32_t request_id)
113 114 115 116
{
    uint32_t pc = 0, pb = 0;
    bj_hashlittle2(&request_id, sizeof(int32_t), &pc, &pb);
    return pc;
117
}*/
118
/*static inline double time_to_s_lf(dumpi_clock t){
119
        return (double) t.sec + (double) t.nsec / 1e9;
120
}*/
121

122
/* load the trace */
123
static int dumpi_trace_nw_workload_load(const char* params, int app_id, int rank);
124 125

/* dumpi implementation of get next operation in the workload */
126
static void dumpi_trace_nw_workload_get_next(int app_id, int rank, struct codes_workload_op *op);
127 128

/* get number of bytes from the workload data type and count */
129
static uint64_t get_num_bytes(rank_mpi_context* my_ctx, dumpi_datatype dt);
130 131

/* computes the delay between MPI operations */
132
static void update_compute_time(const dumpi_time* time, rank_mpi_context* my_ctx);
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

/* initializes the data structures */
static void* dumpi_init_op_data();

/* removes next operations from the dynamic array */
static void dumpi_remove_next_op(void *mpi_op_array, struct codes_workload_op *mpi_op,
                                      double last_op_time);

/* resets the counters for the dynamic array once the workload is completely loaded*/
static void dumpi_finalize_mpi_op_data(void *mpi_op_array);

/* insert next operation */
static void dumpi_insert_next_op(void *mpi_op_array, struct codes_workload_op *mpi_op);

/* initialize the array data structure */
static void* dumpi_init_op_data()
{
	dumpi_op_data_array* tmp;
	
	tmp = malloc(sizeof(dumpi_op_data_array));
	assert(tmp);
	tmp->op_array = malloc(MAX_OPERATIONS * sizeof(struct codes_workload_op));
	assert(tmp->op_array);
        tmp->op_arr_ndx = 0;
	tmp->op_arr_cnt = MAX_OPERATIONS;

	return (void *)tmp;	
}

/* inserts next operation in the array */
static void dumpi_insert_next_op(void *mpi_op_array, struct codes_workload_op *mpi_op)
{
	dumpi_op_data_array *array = (dumpi_op_data_array*)mpi_op_array;
	struct codes_workload_op *tmp;

	/*check if array is full.*/
	if (array->op_arr_ndx == array->op_arr_cnt)
	{
		tmp = malloc((array->op_arr_cnt + MAX_OPERATIONS) * sizeof(struct codes_workload_op));
		assert(tmp);
		memcpy(tmp, array->op_array, array->op_arr_cnt * sizeof(struct codes_workload_op));
		free(array->op_array);
	        array->op_array = tmp;
	        array->op_arr_cnt += MAX_OPERATIONS;
	}

	/* add the MPI operation to the op array */
	array->op_array[array->op_arr_ndx] = *mpi_op;
	//printf("\n insert time %f end time %f ", array->op_array[array->op_arr_ndx].start_time, array->op_array[array->op_arr_ndx].end_time);
	array->op_arr_ndx++;
	return;
}

/* resets the counters after file is fully loaded */
static void dumpi_finalize_mpi_op_data(void *mpi_op_array)
{
	struct dumpi_op_data_array* array = (struct dumpi_op_data_array*)mpi_op_array;

	array->op_arr_cnt = array->op_arr_ndx;	
	array->op_arr_ndx = 0;
}

195 196 197 198 199 200 201
/* rolls back to previous index */
static void dumpi_roll_back_prev_op(void * mpi_op_array)
{
    dumpi_op_data_array *array = (dumpi_op_data_array*)mpi_op_array;
    array->op_arr_ndx--;
    assert(array->op_arr_ndx >= 0);
}
202 203 204 205
/* removes the next operation from the array */
static void dumpi_remove_next_op(void *mpi_op_array, struct codes_workload_op *mpi_op,
                                      double last_op_time)
{
206 207
    (void)last_op_time;

208 209 210 211 212 213 214 215 216 217
	dumpi_op_data_array *array = (dumpi_op_data_array*)mpi_op_array;
	//printf("\n op array index %d array count %d ", array->op_arr_ndx, array->op_arr_cnt);
	if (array->op_arr_ndx == array->op_arr_cnt)
	 {
		mpi_op->op_type = CODES_WK_END;
	 }
	else
	{
		struct codes_workload_op *tmp = &(array->op_array[array->op_arr_ndx]);
		*mpi_op = *tmp;
218
        array->op_arr_ndx++;
219
	}
220
	/*if(mpi_op->op_type == CODES_WK_END)
221 222 223
	{
		free(array->op_array);
		free(array);
224
	}*/
225 226
}

227 228 229 230 231 232 233 234 235 236
/* check for initialization and normalize reported time */
static inline void check_set_init_time(const dumpi_time *t, rank_mpi_context * my_ctx)
{
    if (!my_ctx->is_init) {
        my_ctx->is_init = 1;
        my_ctx->init_time = time_to_ns_lf(t->start);
        my_ctx->last_op_time = time_to_ns_lf(t->stop) - my_ctx->init_time;
    }
}

237 238 239
/* introduce delay between operations: delay is the compute time NOT spent in MPI operations*/
void update_compute_time(const dumpi_time* time, rank_mpi_context* my_ctx)
{
240 241
    double start = time_to_ns_lf(time->start) - my_ctx->init_time;
    double stop = time_to_ns_lf(time->stop) - my_ctx->init_time;
242
    if((start - my_ctx->last_op_time) > DUMPI_IGNORE_DELAY)
243
    {
244 245 246 247 248 249 250
        struct codes_workload_op wrkld_per_rank;

        wrkld_per_rank.op_type = CODES_WK_DELAY;
        wrkld_per_rank.start_time = my_ctx->last_op_time;
        wrkld_per_rank.end_time = start;
        wrkld_per_rank.u.delay.seconds = (start - my_ctx->last_op_time) / 1e9;
        dumpi_insert_next_op(my_ctx->dumpi_mpi_array, &wrkld_per_rank); 
251
    }
252
    my_ctx->last_op_time = stop;
253 254
}

255 256 257 258 259 260 261 262
static int handleDUMPIInit(
        const dumpi_init *prm,
        uint16_t thread,
        const dumpi_time *cpu,
        const dumpi_time *wall,
        const dumpi_perfinfo *perf,
        void *uarg)
{
263 264 265 266 267 268
    (void)prm;
    (void)thread;
    (void)cpu;
    (void)wall;
    (void)perf;

269
    rank_mpi_context *myctx = (rank_mpi_context*)uarg;
270
    check_set_init_time(wall, myctx);
271 272 273
    return 0;
}

274 275
int handleDUMPIError(const void* prm, uint16_t thread, const dumpi_time *cpu, const dumpi_time *wall, const dumpi_perfinfo *perf, void *uarg)
{
276 277 278 279 280 281 282
    (void)prm;
    (void)thread;
    (void)cpu;
    (void)wall;
    (void)perf;
    (void)uarg;

283 284 285
    tw_error(TW_LOC, "\n MPI operation not supported by the MPI-Sim Layer ");
}

286
int handleDUMPIIgnore(const void* prm, uint16_t thread, const dumpi_time *cpu, const dumpi_time *wall, const dumpi_perfinfo *perf, void *uarg)
287
{
288 289 290 291 292 293 294
    (void)prm;
    (void)thread;
    (void)cpu;
    (void)wall;
    (void)perf;
	
    rank_mpi_context* myctx = (rank_mpi_context*)uarg;
295

296
    check_set_init_time(wall, myctx);
297
	update_compute_time(wall, myctx);
298 299 300 301

	return 0;
}

302 303 304 305 306
static void update_times_and_insert(
        struct codes_workload_op *op,
        const dumpi_time *t,
        rank_mpi_context *ctx)
{
307
    check_set_init_time(t, ctx);
308 309 310
    op->start_time = time_to_ns_lf(t->start) - ctx->init_time;
    op->end_time = time_to_ns_lf(t->stop) - ctx->init_time;
    update_compute_time(t, ctx);
311
    dumpi_insert_next_op(ctx->dumpi_mpi_array, op);
312 313 314
}


315 316 317 318
int handleDUMPIWait(const dumpi_wait *prm, uint16_t thread,
                    const dumpi_time *cpu, const dumpi_time *wall,
                    const dumpi_perfinfo *perf, void *userarg)
{
319 320 321 322 323
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)perf;
        
324 325 326
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
        struct codes_workload_op wrkld_per_rank;

327
        wrkld_per_rank.op_type = CODES_WK_WAIT;
328 329
        wrkld_per_rank.u.wait.req_id = prm->request;

330 331
        update_times_and_insert(&wrkld_per_rank, wall, myctx);

332 333 334 335 336 337 338
        return 0;
}

int handleDUMPIWaitsome(const dumpi_waitsome *prm, uint16_t thread,
                    const dumpi_time *cpu, const dumpi_time *wall,
                    const dumpi_perfinfo *perf, void *userarg)
{
339 340 341 342 343
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
        
344 345 346 347
        int i;
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
        struct codes_workload_op wrkld_per_rank;

348
        wrkld_per_rank.op_type = CODES_WK_WAITSOME;
349
        wrkld_per_rank.u.waits.count = prm->count;
350
        wrkld_per_rank.u.waits.req_ids = (int*)malloc(prm->count * sizeof(int));
351 352

        for( i = 0; i < prm->count; i++ )
353
                wrkld_per_rank.u.waits.req_ids[i] = prm->requests[i];
354

355
        update_times_and_insert(&wrkld_per_rank, wall, myctx);
356 357 358 359 360 361 362
        return 0;
}

int handleDUMPIWaitany(const dumpi_waitany *prm, uint16_t thread,
                    const dumpi_time *cpu, const dumpi_time *wall,
                    const dumpi_perfinfo *perf, void *userarg)
{
363 364 365 366 367 368
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
        
369 370 371 372
        int i;
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
        struct codes_workload_op wrkld_per_rank;

373
        wrkld_per_rank.op_type = CODES_WK_WAITANY;
374
        wrkld_per_rank.u.waits.count = prm->count;
375
        wrkld_per_rank.u.waits.req_ids = (int*)malloc(prm->count * sizeof(int));
376 377

        for( i = 0; i < prm->count; i++ )
378
                wrkld_per_rank.u.waits.req_ids[i] = prm->requests[i];
379

380
        update_times_and_insert(&wrkld_per_rank, wall, myctx);
381 382 383 384 385 386 387
        return 0;
}

int handleDUMPIWaitall(const dumpi_waitall *prm, uint16_t thread,
                    const dumpi_time *cpu, const dumpi_time *wall,
                    const dumpi_perfinfo *perf, void *userarg)
{
388 389 390 391 392
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
393
        int i;
394
        
395 396 397
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
        struct codes_workload_op wrkld_per_rank;

398
        wrkld_per_rank.op_type = CODES_WK_WAITALL;
399 400

        wrkld_per_rank.u.waits.count = prm->count;
401
        wrkld_per_rank.u.waits.req_ids = (int*)malloc(prm->count * sizeof(int));
402 403 404
        for( i = 0; i < prm->count; i++ )
                wrkld_per_rank.u.waits.req_ids[i] = prm->requests[i];

405
        update_times_and_insert(&wrkld_per_rank, wall, myctx);
406 407 408
        return 0;
}

409 410
int handleDUMPIISend(const dumpi_isend *prm, uint16_t thread, const dumpi_time *cpu, const dumpi_time *wall, const dumpi_perfinfo *perf, void *userarg)
{
411 412 413 414 415 416 417 418
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
        struct codes_workload_op wrkld_per_rank;
419

420 421 422 423 424
        wrkld_per_rank.op_type = CODES_WK_ISEND;
        wrkld_per_rank.u.send.tag = prm->tag;
        wrkld_per_rank.u.send.count = prm->count;
        wrkld_per_rank.u.send.data_type = prm->datatype;
        wrkld_per_rank.u.send.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
425
        
426
        assert(wrkld_per_rank.u.send.num_bytes >= 0);
427
    	wrkld_per_rank.u.send.req_id = prm->request;
428 429
        wrkld_per_rank.u.send.dest_rank = prm->dest;
        wrkld_per_rank.u.send.source_rank = myctx->my_rank;
430 431

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
432 433
	
        return 0;
434 435 436 437
}

int handleDUMPIIRecv(const dumpi_irecv *prm, uint16_t thread, const dumpi_time *cpu, const dumpi_time *wall, const dumpi_perfinfo *perf, void *userarg)
{
438 439 440 441 442 443 444 445
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	
        //printf("\n irecv source %d count %d data type %d", prm->source, prm->count, prm->datatype);
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
446 447 448
        struct codes_workload_op wrkld_per_rank;

        wrkld_per_rank.op_type = CODES_WK_IRECV;
449 450 451
	    wrkld_per_rank.u.recv.data_type = prm->datatype;
	    wrkld_per_rank.u.recv.count = prm->count;
	    wrkld_per_rank.u.recv.tag = prm->tag;
452
        wrkld_per_rank.u.recv.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
453
	    
454
        assert(wrkld_per_rank.u.recv.num_bytes >= 0);
455 456
        wrkld_per_rank.u.recv.source_rank = prm->source;
        wrkld_per_rank.u.recv.dest_rank = -1;
457
	    wrkld_per_rank.u.recv.req_id = prm->request;
458 459

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
460 461 462 463 464 465 466
        return 0;
}

int handleDUMPISend(const dumpi_send *prm, uint16_t thread,
                      const dumpi_time *cpu, const dumpi_time *wall,
                      const dumpi_perfinfo *perf, void *uarg)
{
467 468 469 470 471 472 473
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	    
        rank_mpi_context* myctx = (rank_mpi_context*)uarg;
474 475 476
        struct codes_workload_op wrkld_per_rank;

        wrkld_per_rank.op_type = CODES_WK_SEND;
477
	    wrkld_per_rank.u.send.tag = prm->tag;
478 479
        wrkld_per_rank.u.send.count = prm->count;
        wrkld_per_rank.u.send.data_type = prm->datatype;
480
        wrkld_per_rank.u.send.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
481
	    assert(wrkld_per_rank.u.send.num_bytes >= 0);
482 483
        wrkld_per_rank.u.send.dest_rank = prm->dest;
        wrkld_per_rank.u.send.source_rank = myctx->my_rank;
484
         wrkld_per_rank.u.send.req_id = -1;
485

486 487
        
         update_times_and_insert(&wrkld_per_rank, wall, myctx);
488 489 490 491 492 493 494
        return 0;
}

int handleDUMPIRecv(const dumpi_recv *prm, uint16_t thread,
                      const dumpi_time *cpu, const dumpi_time *wall,
                      const dumpi_perfinfo *perf, void *uarg)
{
495 496 497 498 499 500
     (void)prm;
     (void)thread;
     (void)cpu;
     (void)wall;
     (void)perf;

501 502 503 504
	rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	struct codes_workload_op wrkld_per_rank;

	wrkld_per_rank.op_type = CODES_WK_RECV;
505 506 507
    wrkld_per_rank.u.recv.tag = prm->tag;
    wrkld_per_rank.u.recv.count = prm->count;
    wrkld_per_rank.u.recv.data_type = prm->datatype;
508
    wrkld_per_rank.u.recv.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
509
	assert(wrkld_per_rank.u.recv.num_bytes >= 0);
510
	wrkld_per_rank.u.recv.req_id = -1;
511 512
    wrkld_per_rank.u.recv.source_rank = prm->source;
    wrkld_per_rank.u.recv.dest_rank = -1;
513

514
	//printf("\n recv source %d count %d data type %d bytes %lld ", prm->source, prm->count, prm->datatype, wrkld_per_rank.u.recv.num_bytes);
515 516
    update_times_and_insert(&wrkld_per_rank, wall, myctx);
    return 0;
517 518 519

}

520 521 522 523
int handleDUMPISendrecv(const dumpi_sendrecv* prm, uint16_t thread,
			const dumpi_time *cpu, const dumpi_time *wall,
			const dumpi_perfinfo *perf, void *uarg)
{
524 525 526 527 528 529 530
     (void)prm;
     (void)thread;
     (void)cpu;
     (void)wall;
     (void)perf;
	
     rank_mpi_context* myctx = (rank_mpi_context*)uarg;
531

532
     /* Issue a non-blocking send */
533 534
	{
		struct codes_workload_op wrkld_per_rank;
535
		wrkld_per_rank.op_type = CODES_WK_ISEND;
536 537 538 539
		wrkld_per_rank.u.send.tag = prm->sendtag;
		wrkld_per_rank.u.send.count = prm->sendcount;
		wrkld_per_rank.u.send.data_type = prm->sendtype;
		wrkld_per_rank.u.send.num_bytes = prm->sendcount * get_num_bytes(myctx,prm->sendtype);
540

541 542
		
        assert(wrkld_per_rank.u.send.num_bytes >= 0);
543 544
		wrkld_per_rank.u.send.dest_rank = prm->dest;
		wrkld_per_rank.u.send.source_rank = myctx->my_rank;
545
		wrkld_per_rank.u.send.req_id = myctx->num_reqs;
546
		update_times_and_insert(&wrkld_per_rank, wall, myctx);
547

548 549
	}

550
    /* issue a blocking receive */
551 552 553 554 555 556 557
	{
		struct codes_workload_op wrkld_per_rank;
		wrkld_per_rank.op_type = CODES_WK_RECV;
		wrkld_per_rank.u.recv.tag = prm->recvtag;
		wrkld_per_rank.u.recv.count = prm->recvcount;
		wrkld_per_rank.u.recv.data_type = prm->recvtype;
		wrkld_per_rank.u.recv.num_bytes = prm->recvcount * get_num_bytes(myctx,prm->recvtype);
558 559

        assert(wrkld_per_rank.u.recv.num_bytes >= 0);
560 561 562 563
		wrkld_per_rank.u.recv.source_rank = prm->source;
		wrkld_per_rank.u.recv.dest_rank = -1;
		update_times_and_insert(&wrkld_per_rank, wall, myctx);
	}
564 565 566 567 568 569 570 571 572 573 574 575 576
    
    /* Issue a wait operation */
    {
        struct codes_workload_op wrkld_per_rank;

        wrkld_per_rank.op_type = CODES_WK_WAIT;
        wrkld_per_rank.u.wait.req_id = myctx->num_reqs;

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
    
        myctx->num_reqs++;
    }

577 578 579
	return 0;
}

580 581 582 583
int handleDUMPIBcast(const dumpi_bcast *prm, uint16_t thread,
                       const dumpi_time *cpu, const dumpi_time *wall,
                       const dumpi_perfinfo *perf, void *uarg)
{
584 585 586 587 588 589 590
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
        rank_mpi_context* myctx = (rank_mpi_context*)uarg;
        struct codes_workload_op wrkld_per_rank;
591 592

        wrkld_per_rank.op_type = CODES_WK_BCAST;
593
        wrkld_per_rank.u.collective.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
594
	    assert(wrkld_per_rank.u.collective.num_bytes >= 0);
595 596

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
597 598 599 600 601 602 603
        return 0;
}

int handleDUMPIAllgather(const dumpi_allgather *prm, uint16_t thread,
                           const dumpi_time *cpu, const dumpi_time *wall,
                           const dumpi_perfinfo *perf, void *uarg)
{
604 605 606 607 608
    (void)prm;
    (void)thread;
    (void)cpu;
    (void)wall;
    (void)perf;
609 610 611
	rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	struct codes_workload_op wrkld_per_rank;

612 613
    wrkld_per_rank.op_type = CODES_WK_ALLGATHER;
    wrkld_per_rank.u.collective.num_bytes = prm->sendcount * get_num_bytes(myctx,prm->sendtype);
614
	assert(wrkld_per_rank.u.collective.num_bytes > 0);
615

616 617
    update_times_and_insert(&wrkld_per_rank, wall, myctx);
    return 0;
618 619 620 621 622 623
}

int handleDUMPIAllgatherv(const dumpi_allgatherv *prm, uint16_t thread,
                            const dumpi_time *cpu, const dumpi_time *wall,
                            const dumpi_perfinfo *perf, void *uarg)
{
624 625 626 627 628 629 630
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	    rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	    struct codes_workload_op wrkld_per_rank;
631 632

        wrkld_per_rank.op_type = CODES_WK_ALLGATHERV;
633
        wrkld_per_rank.u.collective.num_bytes = prm->sendcount * get_num_bytes(myctx,prm->sendtype);
634
	    assert(wrkld_per_rank.u.collective.num_bytes > 0);
635 636

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
637 638 639 640 641 642 643
        return 0;
}

int handleDUMPIAlltoall(const dumpi_alltoall *prm, uint16_t thread,
                          const dumpi_time *cpu, const dumpi_time *wall,
                          const dumpi_perfinfo *perf, void *uarg)
{
644 645 646 647 648 649 650
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	    rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	    struct codes_workload_op wrkld_per_rank;
651 652

        wrkld_per_rank.op_type = CODES_WK_ALLTOALL;
653
        wrkld_per_rank.u.collective.num_bytes = prm->sendcount * get_num_bytes(myctx,prm->sendtype);
654
	    assert(wrkld_per_rank.u.collective.num_bytes > 0);
655 656

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
657 658 659 660 661 662 663
        return 0;
}

int handleDUMPIAlltoallv(const dumpi_alltoallv *prm, uint16_t thread,
                           const dumpi_time *cpu, const dumpi_time *wall,
                           const dumpi_perfinfo *perf, void *uarg)
{
664 665 666 667 668 669 670 671
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	
        rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	    struct codes_workload_op wrkld_per_rank;
672 673

        wrkld_per_rank.op_type = CODES_WK_ALLTOALLV;
674
        wrkld_per_rank.u.collective.num_bytes = prm->sendcounts[0] * get_num_bytes(myctx,prm->sendtype);
675
	    assert(wrkld_per_rank.u.collective.num_bytes > 0);
676 677

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
678 679 680 681 682 683 684
        return 0;
}

int handleDUMPIReduce(const dumpi_reduce *prm, uint16_t thread,
                        const dumpi_time *cpu, const dumpi_time *wall,
                        const dumpi_perfinfo *perf, void *uarg)
{
685 686 687 688 689 690 691 692
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	
        rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	    struct codes_workload_op wrkld_per_rank;
693 694

        wrkld_per_rank.op_type = CODES_WK_REDUCE;
695
        wrkld_per_rank.u.collective.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
696
	    assert(wrkld_per_rank.u.collective.num_bytes > 0);
697 698

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
699 700 701 702 703 704 705
        return 0;
}

int handleDUMPIAllreduce(const dumpi_allreduce *prm, uint16_t thread,
                           const dumpi_time *cpu, const dumpi_time *wall,
                           const dumpi_perfinfo *perf, void *uarg)
{
706 707 708 709 710 711 712 713
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	
        rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	    struct codes_workload_op wrkld_per_rank;
714 715

        wrkld_per_rank.op_type = CODES_WK_ALLREDUCE;
716
        wrkld_per_rank.u.collective.num_bytes = prm->count * get_num_bytes(myctx,prm->datatype);
717
	    assert(wrkld_per_rank.u.collective.num_bytes > 0);
718 719

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
720 721 722 723 724
        return 0;
}

int handleDUMPIFinalize(const dumpi_finalize *prm, uint16_t thread, const dumpi_time *cpu, const dumpi_time *wall, const dumpi_perfinfo *perf, void *uarg)
{
725 726 727 728 729 730 731 732
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
	
        rank_mpi_context* myctx = (rank_mpi_context*)uarg;
	    struct codes_workload_op wrkld_per_rank;
733 734

        wrkld_per_rank.op_type = CODES_WK_END;
735 736

        update_times_and_insert(&wrkld_per_rank, wall, myctx);
737 738 739
        return 0;
}

740 741
int handleDUMPIReqFree(const dumpi_request_free *prm, uint16_t thread, const dumpi_time *cpu, const dumpi_time *wall, const dumpi_perfinfo *perf, void *userarg)
{
742 743 744 745 746 747 748 749
        (void)prm;
        (void)thread;
        (void)cpu;
        (void)wall;
        (void)perf;
    
        rank_mpi_context* myctx = (rank_mpi_context*)userarg;
        struct codes_workload_op wrkld_per_rank;
750

751 752
        wrkld_per_rank.op_type = CODES_WK_REQ_FREE;
        wrkld_per_rank.u.free.req_id = prm->request;
753

754 755
        update_times_and_insert(&wrkld_per_rank, wall, myctx);
        return 0;
756 757
}

758 759
static int hash_rank_compare(void *key, struct qhash_head *link)
{
760
    rank_mpi_compare *in = key;
761 762 763
    rank_mpi_context *tmp;

    tmp = qhash_entry(link, rank_mpi_context, hash_link);
764
    if (tmp->my_rank == in->rank && tmp->my_app_id == in->app)
765 766 767 768
        return 1;
    return 0;
}

769
int dumpi_trace_nw_workload_load(const char* params, int app_id, int rank)
770 771 772
{
	libundumpi_callbacks callbacks;
	libundumpi_cbpair callarr[DUMPI_END_OF_STREAM];
773 774 775 776
#ifdef ENABLE_CORTEX
	libundumpi_cbpair transarr[DUMPI_END_OF_STREAM];
#endif
	PROFILE_TYPE profile;
777
	dumpi_trace_params* dumpi_params = (dumpi_trace_params*)params;
778
	char file_name[MAX_LENGTH_FILE];
779 780 781 782 783 784

	if(rank >= dumpi_params->num_net_traces)
		return -1;

	if(!rank_tbl)
    	{
785
            rank_tbl = qhash_init(hash_rank_compare, quickhash_64bit_hash, RANK_HASH_TABLE_SIZE);
786 787 788 789 790 791 792 793
            if(!rank_tbl)
                  return -1;
    	}
	
	rank_mpi_context *my_ctx;
	my_ctx = malloc(sizeof(rank_mpi_context));
	assert(my_ctx);
	my_ctx->my_rank = rank;
794
    my_ctx->my_app_id = app_id;
795
	my_ctx->last_op_time = 0.0;
796 797
    my_ctx->is_init = 0;
    my_ctx->num_reqs = 0;
798
	my_ctx->dumpi_mpi_array = dumpi_init_op_data();
799
    my_ctx->num_ops = 0;
800 801 802 803 804 805 806 807 808

	if(rank < 10)
            sprintf(file_name, "%s000%d.bin", dumpi_params->file_name, rank);
         else if(rank >=10 && rank < 100)
            sprintf(file_name, "%s00%d.bin", dumpi_params->file_name, rank);
           else if(rank >=100 && rank < 1000)
             sprintf(file_name, "%s0%d.bin", dumpi_params->file_name, rank);
             else
              sprintf(file_name, "%s%d.bin", dumpi_params->file_name, rank);
809
#ifdef ENABLE_CORTEX
810 811 812 813 814
	if(strcmp(dumpi_params->file_name,"none") == 0) {
		profile = cortex_undumpi_open(NULL, app_id, dumpi_params->num_net_traces, rank);
	} else {
		profile = cortex_undumpi_open(file_name, app_id, dumpi_params->num_net_traces, rank);
	}
815
	
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
	{ int i;
	for(i=0; i < dumpi_params->num_net_traces; i++) {
		struct codes_jobmap_id id = {
			.job = app_id,
			.rank = i
		};
		uint32_t cn_id;
		if(jobmap_ctx) {
			cn_id = codes_jobmap_to_global_id(id, jobmap_ctx);
		} else {
			cn_id = i;
		}
		cortex_placement_set(profile, i, cn_id);
	}
	}
831
	
832
	cortex_topology_set(profile,&model_net_topology);
833
#else
834
	profile =  undumpi_open(file_name);
835
#endif
836
        my_ctx->profile = profile;
837 838 839 840 841 842 843
        if(NULL == profile) {
                printf("Error: unable to open DUMPI trace: %s", file_name);
                exit(-1);
        }
	
	memset(&callbacks, 0, sizeof(libundumpi_callbacks));
        memset(&callarr, 0, sizeof(libundumpi_cbpair) * DUMPI_END_OF_STREAM);
844 845 846
#ifdef ENABLE_CORTEX
	memset(&transarr, 0, sizeof(libundumpi_cbpair) * DUMPI_END_OF_STREAM);
#endif
847 848

	/* handle MPI function calls */	        
849
        callbacks.on_init = handleDUMPIInit;
850 851 852 853 854 855
	callbacks.on_send = (dumpi_send_call)handleDUMPISend;
        callbacks.on_recv = (dumpi_recv_call)handleDUMPIRecv;
        callbacks.on_isend = (dumpi_isend_call)handleDUMPIISend;
        callbacks.on_irecv = (dumpi_irecv_call)handleDUMPIIRecv;
        callbacks.on_allreduce = (dumpi_allreduce_call)handleDUMPIAllreduce;
	callbacks.on_bcast = (dumpi_bcast_call)handleDUMPIBcast;
856 857 858 859 860 861 862 863 864
	callbacks.on_get_count = (dumpi_get_count_call)handleDUMPIIgnore;
	callbacks.on_bsend = (dumpi_bsend_call)handleDUMPIIgnore;
	callbacks.on_ssend = (dumpi_ssend_call)handleDUMPIIgnore;
	callbacks.on_rsend = (dumpi_rsend_call)handleDUMPIIgnore;
	callbacks.on_buffer_attach = (dumpi_buffer_attach_call)handleDUMPIIgnore;
	callbacks.on_buffer_detach = (dumpi_buffer_detach_call)handleDUMPIIgnore;
	callbacks.on_ibsend = (dumpi_ibsend_call)handleDUMPIIgnore;
	callbacks.on_issend = (dumpi_issend_call)handleDUMPIIgnore;
	callbacks.on_irsend = (dumpi_irsend_call)handleDUMPIIgnore;
865
	callbacks.on_wait = (dumpi_wait_call)handleDUMPIWait;
866
	callbacks.on_test = (dumpi_test_call)handleDUMPIIgnore;
867
	callbacks.on_request_free = (dumpi_request_free_call)handleDUMPIReqFree;
868
	callbacks.on_waitany = (dumpi_waitany_call)handleDUMPIWaitany;
869
	callbacks.on_testany = (dumpi_testany_call)handleDUMPIIgnore;
870
	callbacks.on_waitall = (dumpi_waitall_call)handleDUMPIWaitall;
871
	callbacks.on_testall = (dumpi_testall_call)handleDUMPIIgnore;
872
	callbacks.on_waitsome = (dumpi_waitsome_call)handleDUMPIWaitsome;
873 874 875 876 877 878 879 880 881 882 883 884
	callbacks.on_testsome = (dumpi_testsome_call)handleDUMPIIgnore;
	callbacks.on_iprobe = (dumpi_iprobe_call)handleDUMPIIgnore;
	callbacks.on_probe = (dumpi_probe_call)handleDUMPIIgnore;
	callbacks.on_cancel = (dumpi_cancel_call)handleDUMPIIgnore;
	callbacks.on_test_cancelled = (dumpi_test_cancelled_call)handleDUMPIIgnore;
	callbacks.on_send_init = (dumpi_send_init_call)handleDUMPIIgnore;
	callbacks.on_bsend_init = (dumpi_bsend_init_call)handleDUMPIIgnore;
	callbacks.on_ssend_init = (dumpi_ssend_init_call)handleDUMPIIgnore;
	callbacks.on_rsend_init = (dumpi_rsend_init_call)handleDUMPIIgnore;
	callbacks.on_recv_init = (dumpi_recv_init_call)handleDUMPIIgnore;
	callbacks.on_start = (dumpi_start_call)handleDUMPIIgnore;
	callbacks.on_startall = (dumpi_startall_call)handleDUMPIIgnore;
885
	callbacks.on_sendrecv = (dumpi_sendrecv_call)handleDUMPISendrecv;
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	callbacks.on_sendrecv_replace = (dumpi_sendrecv_replace_call)handleDUMPIIgnore;
	callbacks.on_type_contiguous = (dumpi_type_contiguous_call)handleDUMPIIgnore;
	callbacks.on_barrier = (dumpi_barrier_call)handleDUMPIIgnore;
        callbacks.on_gather = (dumpi_gather_call)handleDUMPIIgnore;
        callbacks.on_gatherv = (dumpi_gatherv_call)handleDUMPIIgnore;
        callbacks.on_scatter = (dumpi_scatter_call)handleDUMPIIgnore;
        callbacks.on_scatterv = (dumpi_scatterv_call)handleDUMPIIgnore;
        callbacks.on_allgather = (dumpi_allgather_call)handleDUMPIIgnore;
        callbacks.on_allgatherv = (dumpi_allgatherv_call)handleDUMPIIgnore;
        callbacks.on_alltoall = (dumpi_alltoall_call)handleDUMPIIgnore;
        callbacks.on_alltoallv = (dumpi_alltoallv_call)handleDUMPIIgnore;
        callbacks.on_alltoallw = (dumpi_alltoallw_call)handleDUMPIIgnore;
        callbacks.on_reduce = (dumpi_reduce_call)handleDUMPIIgnore;
        callbacks.on_reduce_scatter = (dumpi_reduce_scatter_call)handleDUMPIIgnore;
        callbacks.on_group_size = (dumpi_group_size_call)handleDUMPIIgnore;
        callbacks.on_group_rank = (dumpi_group_rank_call)handleDUMPIIgnore;
        callbacks.on_comm_size = (dumpi_comm_size_call)handleDUMPIIgnore;
        callbacks.on_comm_rank = (dumpi_comm_rank_call)handleDUMPIIgnore;
        callbacks.on_comm_get_attr = (dumpi_comm_get_attr_call)handleDUMPIIgnore;
905 906
        callbacks.on_comm_dup = (dumpi_comm_dup_call)handleDUMPIError;
        callbacks.on_comm_create = (dumpi_comm_create_call)handleDUMPIError;
907
        callbacks.on_wtime = (dumpi_wtime_call)handleDUMPIIgnore;
908 909 910 911
        callbacks.on_finalize = (dumpi_finalize_call)handleDUMPIFinalize;

        libundumpi_populate_callbacks(&callbacks, callarr);

912
#ifdef ENABLE_CORTEX
913
#ifdef ENABLE_CORTEX_PYTHON
914 915 916 917 918
	if(dumpi_params->cortex_script[0] != 0) {
		libundumpi_populate_callbacks(CORTEX_PYTHON_TRANSLATION, transarr);
	} else {
		libundumpi_populate_callbacks(CORTEX_MPICH_TRANSLATION, transarr);
	}
919 920 921
#else
	libundumpi_populate_callbacks(CORTEX_MPICH_TRANSLATION, transarr);
#endif
922 923
#endif
        DUMPI_START_STREAM_READ(profile);
924 925
        //dumpi_header* trace_header = undumpi_read_header(profile);
        //dumpi_free_header(trace_header);
926

927
#ifdef ENABLE_CORTEX_PYTHON
928 929 930 931 932 933 934 935 936 937
	if(dumpi_params->cortex_script[0] != 0) {
		if(dumpi_params->cortex_class[0] != 0) {
			cortex_python_set_module(dumpi_params->cortex_script, dumpi_params->cortex_class);
		} else {
			cortex_python_set_module(dumpi_params->cortex_script, NULL);
		}
		if(dumpi_params->cortex_gen[0] != 0) {
			cortex_python_call_generator(profile, dumpi_params->cortex_gen);
		}
	}
938 939
#endif

940 941 942 943 944 945
        int finalize_reached = 0;
        int active = 1;
        int num_calls = 0;
        while(active && !finalize_reached)
        {
           num_calls++;
946
           my_ctx->num_ops++;
947
#ifdef ENABLE_CORTEX
948 949 950 951 952 953 954 955 956 957 958 959
           if(my_ctx->num_ops < max_threshold)
	        active = cortex_undumpi_read_single_call(profile, callarr, transarr, (void*)my_ctx, &finalize_reached);
           else
           {
                struct codes_workload_op op;
                op.op_type = CODES_WK_END;

                op.start_time = my_ctx->last_op_time;
                op.end_time = my_ctx->last_op_time + 1;
                dumpi_insert_next_op(my_ctx->dumpi_mpi_array, &op);
                break;
           }
960
#else
961
           active = undumpi_read_single_call(profile, callarr, (void*)my_ctx, &finalize_reached);
962
#endif
963
        }
964
	UNDUMPI_CLOSE(profile);
965 966
	dumpi_finalize_mpi_op_data(my_ctx->dumpi_mpi_array);
	/* add this rank context to hash table */	
967 968 969 970
        rank_mpi_compare cmp;
        cmp.app = my_ctx->my_app_id;
        cmp.rank = my_ctx->my_rank;
	qhash_add(rank_tbl, &cmp, &(my_ctx->hash_link));
971 972 973 974
	rank_tbl_pop++;

	return 0;
}
975 976 977
/* Data types are for 64-bit archs. Source:
 * https://www.tutorialspoint.com/cprogramming/c_data_types.htm 
 * */
978
static uint64_t get_num_bytes(rank_mpi_context* myctx, dumpi_datatype dt)
979
{
980 981
    (void)myctx;

982 983 984
#ifdef ENABLE_CORTEX
   return cortex_datatype_get_size(myctx->profile,dt);
#endif
985 986 987 988
   switch(dt)
   {
	case DUMPI_DATATYPE_ERROR:
	case DUMPI_DATATYPE_NULL:
989
		tw_error(TW_LOC, "\n data type error");
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	break;

	case DUMPI_CHAR:
	case DUMPI_UNSIGNED_CHAR:
	case DUMPI_SIGNED_CHAR:
	case DUMPI_BYTE:
		return 1; /* 1 byte for char */
	break;

	case DUMPI_WCHAR:
		return 4; /* 4 bytes for a 64-bit version */
	break;

	case DUMPI_SHORT:
	case DUMPI_SHORT_INT:
	case DUMPI_UNSIGNED_SHORT:
		return 2;
	break;

	 case DUMPI_INT:
1010 1011 1012
		return 4;
	 break;

1013
	 case DUMPI_UNSIGNED:
1014 1015 1016
     return 4;
     break;

1017 1018
	 case DUMPI_FLOAT:
	 case DUMPI_FLOAT_INT:
1019 1020
        return 4;
     break;
1021 1022

	case DUMPI_DOUBLE:
1023 1024 1025
     return 8;
    break;

1026
	case DUMPI_LONG:
1027 1028 1029
     return 8;
     break;

1030
	case DUMPI_LONG_INT:
1031 1032 1033
     return 8;
     break;

1034
	case DUMPI_UNSIGNED_LONG:
1035 1036 1037
     return 8;
     break;

1038
	case DUMPI_LONG_LONG_INT:
1039 1040 1041
     return 8;
     break;

1042
	case DUMPI_UNSIGNED_LONG_LONG:
1043 1044 1045
     return 8;
     break;

1046
	case DUMPI_LONG_LONG:
1047 1048 1049
     return 8;
     break;

1050 1051 1052 1053 1054
	case DUMPI_DOUBLE_INT:
		return 8;
	break;

	case DUMPI_LONG_DOUBLE_INT:
1055 1056 1057 1058
	case DUMPI_LONG_DOUBLE:
        return 10;
        break;

1059 1060
	default:
	  {
1061
        tw_error(TW_LOC, "\n undefined data type");
1062 1063 1064 1065 1066
		return 0;	
	  }	
   } 
}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
void dumpi_trace_nw_workload_get_next_rc2(int app_id, int rank)
{
    rank_mpi_context* temp_data; 
    struct qhash_head *hash_link = NULL;  
    rank_mpi_compare cmp;  
    cmp.rank = rank;
    cmp.app = app_id;

    hash_link = qhash_search(rank_tbl, &cmp);

    assert(hash_link);
    temp_data = qhash_entry(hash_link, rank_mpi_context, hash_link); 
    assert(temp_data);

    dumpi_roll_back_prev_op(temp_data->dumpi_mpi_array);
}
1083
void dumpi_trace_nw_workload_get_next(int app_id, int rank, struct codes_workload_op *op)
1084 1085 1086
{
   rank_mpi_context* temp_data;
   struct qhash_head *hash_link = NULL;
1087 1088 1089 1090
   rank_mpi_compare cmp;
   cmp.rank = rank;
   cmp.app = app_id;
   hash_link = qhash_search(rank_tbl, &cmp);
1091 1092
   if(!hash_link)
   {
yangxuserene's avatar
yangxuserene committed
1093
      printf("\n not found for rank id %d , %d", rank, app_id);
1094 1095 1096 1097 1098 1099
      op->op_type = CODES_WK_END;
      return;
   }
  temp_data = qhash_entry(hash_link, rank_mpi_context, hash_link);
  assert(temp_data);

1100 1101 1102 1103
  struct codes_workload_op mpi_op;
  dumpi_remove_next_op(temp_data->dumpi_mpi_array, &mpi_op, temp_data->last_op_time);
  *op = mpi_op;
  /*if( mpi_op.op_type == CODES_WK_END)
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
  {
	qhash_del(hash_link);
        free(temp_data);

        rank_tbl_pop--;
        if (!rank_tbl_pop)
        {
            qhash_finalize(rank_tbl);
            rank_tbl = NULL;
        }
1114
  }*/
1115 1116 1117 1118 1119 1120 1121
  return;
}

/* implements the codes workload method */
struct codes_workload_method dumpi_trace_workload_method =
{
    .method_name = "dumpi-trace-workload",
1122
    .codes_workload_read_config = NULL,
1123 1124
    .codes_workload_load = dumpi_trace_nw_workload_load,
    .codes_workload_get_next = dumpi_trace_nw_workload_get_next,
1125
    .codes_workload_get_next_rc2 = dumpi_trace_nw_workload_get_next_rc2,
1126
};
Jonathan Jenkins's avatar
Jonathan Jenkins committed
1127 1128 1129 1130 1131

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
1132
 *  indent-tabs-mode: nil
Jonathan Jenkins's avatar
Jonathan Jenkins committed
1133 1134 1135 1136
 * End:
 *
 * vim: ft=c ts=8 sts=4 sw=4 expandtab
 */