model-net-mpi-replay.c 88.1 KB
Newer Older
1 2 3 4 5 6 7
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */
#include <ross.h>
#include <inttypes.h>
8
#include <sys/stat.h>
9 10 11 12 13 14 15 16

#include "codes/codes-workload.h"
#include "codes/codes.h"
#include "codes/configuration.h"
#include "codes/codes_mapping.h"
#include "codes/model-net.h"
#include "codes/rc-stack.h"
#include "codes/quicklist.h"
17
#include "codes/quickhash.h"
18
#include "codes/codes-jobmap.h"
19

20
/* turning on track lp will generate a lot of output messages */
21
#define MN_LP_NM "modelnet_dragonfly_custom"
22
#define CONTROL_MSG_SZ 64
23
#define TRACE -1
24
#define MAX_WAIT_REQS 512
25
#define CS_LP_DBG 1
26
#define RANK_HASH_TABLE_SZ 2000
27
#define NW_LP_NM "nw-lp"
28 29 30
#define lprintf(_fmt, ...) \
        do {if (CS_LP_DBG) printf(_fmt, __VA_ARGS__);} while (0)
#define MAX_STATS 65536
31

32 33 34
static int msg_size_hash_compare(
            void *key, struct qhash_head *link);

35
/* NOTE: Message tracking works in sequential mode only! */
36
static int debug_cols = 0;
37 38 39
/* Turning on this option slows down optimistic mode substantially. Only turn
 * on if you get issues with wait-all completion with traces. */
static int preserve_wait_ordering = 0;
40
static int enable_msg_tracking = 0;
41
static int is_synthetic = 0;
42
tw_lpid TRACK_LP = -1;
43
int nprocs = 0;
44
static double total_syn_data = 0;
45
static int unmatched = 0;
46
char workload_type[128];
47
char workload_name[128];
48 49 50 51
char workload_file[8192];
char offset_file[8192];
static int wrkld_id;
static int num_net_traces = 0;
52
static int num_dumpi_traces = 0;
53
static int64_t EAGER_THRESHOLD = 8192;
54

55
static int alloc_spec = 0;
56 57
static tw_stime self_overhead = 10.0;
static tw_stime mean_interval = 100000;
58
static int payload_sz = 1024;
59 60

/* Doing LP IO*/
61
static char * params = NULL;
62
static char lp_io_dir[256] = {'\0'};
63
static char sampling_dir[32] = {'\0'};
64
static char mpi_msg_dir[32] = {'\0'};
65 66 67 68
static lp_io_handle io_handle;
static unsigned int lp_io_use_suffix = 0;
static int do_lp_io = 0;

69 70 71 72
/* variables for loading multiple applications */
char workloads_conf_file[8192];
char alloc_file[8192];
int num_traces_of_job[5];
73 74 75
tw_stime soft_delay_mpi = 2500;
tw_stime nic_delay = 1000;
tw_stime copy_per_byte_eager = 0.55;
76 77 78 79 80
char file_name_of_job[5][8192];

struct codes_jobmap_ctx *jobmap_ctx;
struct codes_jobmap_params_list jobmap_p;

81 82
/* Variables for Cortex Support */
/* Matthieu's additions start */
83
#ifdef ENABLE_CORTEX_PYTHON
84 85 86
static char cortex_file[512] = "\0";
static char cortex_class[512] = "\0";
static char cortex_gen[512] = "\0";
87
#endif
88 89
/* Matthieu's additions end */

90 91
typedef struct nw_state nw_state;
typedef struct nw_message nw_message;
92
typedef unsigned int dumpi_req_id;
93 94

static int net_id = 0;
95
static float noise = 1.0;
96 97 98
static int num_nw_lps = 0, num_mpi_lps = 0;

static int num_syn_clients;
99
static int syn_type = 0;
100

101
FILE * workload_log = NULL;
102
FILE * msg_size_log = NULL;
103 104 105 106
FILE * workload_agg_log = NULL;
FILE * workload_meta_log = NULL;

static uint64_t sample_bytes_written = 0;
107

108 109 110
long long num_bytes_sent=0;
long long num_bytes_recvd=0;

111 112 113
long long num_syn_bytes_sent = 0;
long long num_syn_bytes_recvd = 0;

114 115 116 117 118 119
double max_time = 0,  max_comm_time = 0, max_wait_time = 0, max_send_time = 0, max_recv_time = 0;
double avg_time = 0, avg_comm_time = 0, avg_wait_time = 0, avg_send_time = 0, avg_recv_time = 0;


/* runtime option for disabling computation time simulation */
static int disable_delay = 0;
120 121 122
static int enable_sampling = 0;
static double sampling_interval = 5000000;
static double sampling_end_time = 3000000000;
123
static int enable_debug = 0;
124

125 126 127
/* set group context */
struct codes_mctx group_ratio;

128
/* MPI_OP_GET_NEXT is for getting next MPI operation when the previous operation completes.
129
* MPI_SEND_ARRIVED is issued when a MPI message arrives at its destination (the message is transported by model-net and an event is invoked when it arrives.
130 131 132 133 134 135 136
* MPI_SEND_POSTED is issued when a MPI message has left the source LP (message is transported via model-net). */
enum MPI_NW_EVENTS
{
	MPI_OP_GET_NEXT=1,
	MPI_SEND_ARRIVED,
    MPI_SEND_ARRIVED_CB, // for tracking message times on sender
	MPI_SEND_POSTED,
137 138 139 140 141 142
    MPI_REND_ARRIVED,
    MPI_REND_ACK_ARRIVED,
    CLI_BCKGND_FIN,
    CLI_BCKGND_ARRIVE,
    CLI_BCKGND_GEN,
    CLI_NBR_FINISH,
143 144
};

145 146 147 148 149 150 151 152
/* type of synthetic traffic */
enum TRAFFIC
{
    UNIFORM = 1, /* sends message to a randomly selected node */
    NEAREST_NEIGHBOR = 2, /* sends message to the next node (potentially connected to the same router) */
    ALLTOALL = 3, /* sends message to all other nodes */
    STENCIL = 4  /* sends message to 4 nearby neighbors */
};
153 154 155 156
struct mpi_workload_sample
{
    /* Sampling data */
    int nw_id;
157
    int app_id;
158 159 160 161 162
    unsigned long num_sends_sample;
    unsigned long num_bytes_sample;
    unsigned long num_waits_sample;
    double sample_end_time;
};
163 164 165 166 167 168 169
/* stores pointers of pending MPI operations to be matched with their respective sends/receives. */
struct mpi_msgs_queue
{
    int op_type;
    int tag;
    int source_rank;
    int dest_rank;
170
    int64_t num_bytes;
171
    int64_t seq_id;
172 173 174 175 176 177 178 179
    tw_stime req_init_time;
	dumpi_req_id req_id;
    struct qlist_head ql;
};

/* stores request IDs of completed MPI operations (Isends or Irecvs) */
struct completed_requests
{
180
	unsigned int req_id;
181
    struct qlist_head ql;
182
    int index;
183 184 185 186 187 188
};

/* for wait operations, store the pending operation and number of completed waits so far. */
struct pending_waits
{
    int op_type;
189
    unsigned int req_ids[MAX_WAIT_REQS];
190
	int num_completed;
191 192
	int count;
    tw_stime start_time;
193 194 195
    struct qlist_head ql;
};

196 197 198 199 200 201
struct msg_size_info
{
    int64_t msg_size;
    int num_msgs;
    tw_stime agg_latency;
    tw_stime avg_latency;
202
    struct qhash_head  hash_link;
203 204
    struct qlist_head ql; 
};
205 206 207 208 209 210 211 212 213 214
typedef struct mpi_msgs_queue mpi_msgs_queue;
typedef struct completed_requests completed_requests;
typedef struct pending_waits pending_waits;

/* state of the network LP. It contains the pointers to send/receive lists */
struct nw_state
{
	long num_events_per_lp;
	tw_lpid nw_id;
	short wrkld_end;
215 216
    int app_id;
    int local_rank;
217

218
    int synthetic_pattern;
219 220 221
    int is_finished;
    int neighbor_completed;

222
    struct rc_stack * processed_ops;
223
    struct rc_stack * processed_wait_op;
224
    struct rc_stack * matched_reqs;
225
//    struct rc_stack * indices;
226 227 228 229 230 231 232 233 234 235

    /* count of sends, receives, collectives and delays */
	unsigned long num_sends;
	unsigned long num_recvs;
	unsigned long num_cols;
	unsigned long num_delays;
	unsigned long num_wait;
	unsigned long num_waitall;
	unsigned long num_waitsome;

236

237 238
	/* time spent by the LP in executing the app trace*/
	double start_time;
239 240 241 242 243 244 245 246 247

    double col_time;

    double reduce_time;
    int num_reduce;

    double all_reduce_time;
    int num_all_reduce;

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	double elapsed_time;
	/* time spent in compute operations */
	double compute_time;
	/* time spent in message send/isend */
	double send_time;
	/* time spent in message receive */
	double recv_time;
	/* time spent in wait operation */
	double wait_time;
	/* FIFO for isend messages arrived on destination */
	struct qlist_head arrival_queue;
	/* FIFO for irecv messages posted but not yet matched with send operations */
	struct qlist_head pending_recvs_queue;
	/* List of completed send/receive requests */
	struct qlist_head completed_reqs;
263

264
    tw_stime cur_interval_end;
265
    
266 267
    /* Pending wait operation */
    struct pending_waits * wait_op;
268

269 270 271 272 273 274
    /* Message size latency information */
    struct qhash_table * msg_sz_table;
    struct qlist_head msg_sz_list;

    /* quick hash for maintaining message latencies */

275 276 277
    unsigned long num_bytes_sent;
    unsigned long num_bytes_recvd;

278 279 280
    unsigned long syn_data;
    unsigned long gen_data;
    
281 282 283 284
    /* For sampling data */
    int sampling_indx;
    int max_arr_size;
    struct mpi_workload_sample * mpi_wkld_samples;
285
    char output_buf[512];
286
    char col_stats[64];
287 288 289 290
};

/* data for handling reverse computation.
* saved_matched_req holds the request ID of matched receives/sends for wait operations.
291
* ptr_match_op holds the matched MPI operation which are removed from the queues when a send is matched with the receive in forward event handler.
292 293 294
* network event being sent. op is the MPI operation issued by the network workloads API. rv_data holds the data for reverse computation (TODO: Fill this data structure only when the simulation runs in optimistic mode). */
struct nw_message
{
295
   // forward message handler
296
   int msg_type;
297
   int op_type;
298
   model_net_event_return event_rc;
299
   struct codes_workload_op * mpi_op;
300

301 302 303
   struct
   {
       tw_lpid src_rank;
304
       int dest_rank;
305
       int64_t num_bytes;
306 307 308 309 310
       int num_matched;
       int data_type;
       double sim_start_time;
       // for callbacks - time message was received
       double msg_send_time;
311
       unsigned int req_id;
312
       int matched_req;
313
       int tag;
314
       int app_id;
315 316
       int found_match;
       short wait_completed;
317
       short rend_send;
318 319 320 321 322 323 324
   } fwd;
   struct
   {
       double saved_send_time;
       double saved_recv_time;
       double saved_wait_time;
       double saved_delay;
325
       int64_t saved_num_bytes;
326
       int saved_syn_length;
327
   } rc;
328 329
};

330
static void send_ack_back(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp, mpi_msgs_queue * mpi_op, int matched_req);
331 332

static void send_ack_back_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
333 334
/* executes MPI isend and send operations */
static void codes_exec_mpi_send(
335
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op, int is_rend);
336 337
/* execute MPI irecv operation */
static void codes_exec_mpi_recv(
338
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp, struct codes_workload_op * mpi_op);
339 340
/* reverse of mpi recv function. */
static void codes_exec_mpi_recv_rc(
341
        nw_state* s, tw_bf * bf, nw_message* m, tw_lp* lp);
342 343
/* execute the computational delay */
static void codes_exec_comp_delay(
344
        nw_state* s, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op);
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/* gets the next MPI operation from the network-workloads API. */
static void get_next_mpi_operation(
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
/* reverse handler of get next mpi operation. */
static void get_next_mpi_operation_rc(
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
/* Makes a call to get_next_mpi_operation. */
static void codes_issue_next_event(tw_lp* lp);
/* reverse handler of next operation */
static void codes_issue_next_event_rc(tw_lp* lp);


///////////////////// HELPER FUNCTIONS FOR MPI MESSAGE QUEUE HANDLING ///////////////
/* upon arrival of local completion message, inserts operation in completed send queue */
/* upon arrival of an isend operation, updates the arrival queue of the network */
360 361 362 363 364 365 366 367
static void update_completed_queue(
        nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp, dumpi_req_id req_id);
/* reverse of the above function */
static void update_completed_queue_rc(
        nw_state*s,
        tw_bf * bf,
        nw_message * m,
        tw_lp * lp);
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
static void update_arrival_queue(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* reverse of the above function */
static void update_arrival_queue_rc(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* callback to a message sender for computing message time */
static void update_message_time(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* reverse for computing message time */
static void update_message_time_rc(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

/* conversion from seconds to eanaoseconds */
static tw_stime s_to_ns(tw_stime ns);

383
/*static void update_message_size_rc(
384 385 386 387
        struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
388
{*/
389
/*TODO: Complete reverse handler */
390
/*    (void)ns;
391 392 393
    (void)lp;
    (void)bf;
    (void)m;
394
}*/
395 396 397 398 399 400 401 402 403 404
/* update the message size */
static void update_message_size(
        struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m,
        mpi_msgs_queue * qitem,
        int is_eager,
        int is_send)
{
405 406 407
            (void)bf;
            (void)is_eager;

408 409 410
            struct qhash_head * hash_link = NULL;
            tw_stime msg_init_time = qitem->req_init_time;
        
411
            if(ns->msg_sz_table == NULL)
412 413 414 415 416 417 418 419 420 421
                ns->msg_sz_table = qhash_init(msg_size_hash_compare, quickhash_64bit_hash, RANK_HASH_TABLE_SZ); 
            
            hash_link = qhash_search(ns->msg_sz_table, &(qitem->num_bytes));

            if(is_send)
                msg_init_time = m->fwd.sim_start_time;
            
            /* update hash table */
            if(!hash_link)
            {
422
                struct msg_size_info * msg_info = (struct msg_size_info*)malloc(sizeof(struct msg_size_info));
423 424
                msg_info->msg_size = qitem->num_bytes;
                msg_info->num_msgs = 1;
425
                msg_info->agg_latency = tw_now(lp) - msg_init_time;
426
                msg_info->avg_latency = msg_info->agg_latency;
427
                qhash_add(ns->msg_sz_table, &(msg_info->msg_size), &(msg_info->hash_link));
428
                qlist_add(&msg_info->ql, &ns->msg_sz_list);
429
                //printf("\n Msg size %d aggregate latency %f num messages %d ", m->fwd.num_bytes, msg_info->agg_latency, msg_info->num_msgs);
430 431 432 433 434
            }
            else
            {
                struct msg_size_info * tmp = qhash_entry(hash_link, struct msg_size_info, hash_link);
                tmp->num_msgs++;
435
                tmp->agg_latency += tw_now(lp) - msg_init_time;  
436
                tmp->avg_latency = (tmp->agg_latency / tmp->num_msgs);
437
//                printf("\n Msg size %d aggregate latency %f num messages %d ", qitem->num_bytes, tmp->agg_latency, tmp->num_msgs);
438 439 440 441 442 443 444 445
            }
}
static void notify_background_traffic_rc(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
446 447 448
    (void)ns;
    (void)bf;
    (void)m;
449 450 451 452 453
        
    int num_jobs = codes_jobmap_get_num_jobs(jobmap_ctx); 
    
    for(int i = 0; i < num_jobs - 1; i++)
        tw_rand_reverse_unif(lp->rng); 
454 455 456 457 458 459 460 461
}

static void notify_background_traffic(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
462 463 464
        (void)bf;
        (void)m;

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
        struct codes_jobmap_id jid; 
        jid = codes_jobmap_to_local_id(ns->nw_id, jobmap_ctx);
        
        int num_jobs = codes_jobmap_get_num_jobs(jobmap_ctx); 
        
        for(int other_id = 0; other_id < num_jobs; other_id++)
        {
            if(other_id == jid.job)
                continue;

            struct codes_jobmap_id other_jid;
            other_jid.job = other_id;

            int num_other_ranks = codes_jobmap_get_num_ranks(other_id, jobmap_ctx);

480
            lprintf("\n Other ranks %d ", num_other_ranks);
481 482 483 484 485 486 487 488 489 490 491 492
            tw_stime ts = (1.1 * g_tw_lookahead) + tw_rand_exponential(lp->rng, mean_interval/10000);
            tw_lpid global_dest_id;
     
            for(int k = 0; k < num_other_ranks; k++)    
            {
                other_jid.rank = k;
                int intm_dest_id = codes_jobmap_to_global_id(other_jid, jobmap_ctx); 
                global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);

                tw_event * e;
                struct nw_message * m_new;  
                e = tw_event_new(global_dest_id, ts, lp);
493
                m_new = (struct nw_message*)tw_event_data(e);
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
                m_new->msg_type = CLI_BCKGND_FIN;
                tw_event_send(e);   
            }
        }
        return;
}
static void notify_neighbor_rc(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
       if(bf->c0)
       {
            notify_background_traffic_rc(ns, lp, bf, m);
            return;
       }
   
       if(bf->c1)
       {
          tw_rand_reverse_unif(lp->rng); 
       }
} 
static void notify_neighbor(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
    if(ns->local_rank == num_dumpi_traces - 1 
            && ns->is_finished == 1
            && ns->neighbor_completed == 1)
    {
527
//        printf("\n All workloads completed, notifying background traffic ");
528 529 530 531 532 533 534 535 536 537 538 539 540
        bf->c0 = 1;
        notify_background_traffic(ns, lp, bf, m);
        return;
    }
    
    struct codes_jobmap_id nbr_jid;
    nbr_jid.job = ns->app_id;
    tw_lpid global_dest_id;

    if(ns->is_finished == 1 && (ns->neighbor_completed == 1 || ns->local_rank == 0))
    {
        bf->c1 = 1;

541
//        printf("\n Local rank %d notifying neighbor %d ", ns->local_rank, ns->local_rank+1);
542 543 544 545 546 547 548 549 550 551
        tw_stime ts = (1.1 * g_tw_lookahead) + tw_rand_exponential(lp->rng, mean_interval/10000);
        nbr_jid.rank = ns->local_rank + 1;
        
        /* Send a notification to the neighbor about completion */
        int intm_dest_id = codes_jobmap_to_global_id(nbr_jid, jobmap_ctx); 
        global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);
       
        tw_event * e;
        struct nw_message * m_new;  
        e = tw_event_new(global_dest_id, ts, lp);
552
        m_new = (struct nw_message*)tw_event_data(e); 
553 554 555 556 557 558 559 560 561 562
        m_new->msg_type = CLI_NBR_FINISH;
        tw_event_send(e);   
    }
}
void finish_bckgnd_traffic_rc(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
563 564 565 566
        (void)b;
        (void)msg;
        (void)lp;

567 568 569 570 571 572 573 574 575
        ns->is_finished = 0;
        return;
}
void finish_bckgnd_traffic(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
576 577
        (void)b;
        (void)msg;
578
        ns->is_finished = 1;
579
        lprintf("\n LP %llu completed sending data %lu completed at time %lf ", LLU(lp->gid), ns->gen_data, tw_now(lp));
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
        return;
}

void finish_nbr_wkld_rc(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
    ns->neighbor_completed = 0;
    
    notify_neighbor_rc(ns, lp, b, msg);
}

void finish_nbr_wkld(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
    ns->neighbor_completed = 1;

    notify_neighbor(ns, lp, b, msg);
}
static void gen_synthetic_tr_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
    if(bf->c0)
        return;

609 610 611 612 613 614 615
    int i;
    for (i=0; i < m->rc.saved_syn_length; i++){
        model_net_event_rc2(lp, &m->event_rc);
        s->gen_data -= payload_sz;
        num_syn_bytes_sent -= payload_sz;
    }
        tw_rand_reverse_unif(lp->rng);
616 617 618 619 620 621 622 623 624 625 626 627 628 629

}

/* generate synthetic traffic */
static void gen_synthetic_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
    if(s->is_finished == 1)
    {
        bf->c0 = 1;
        return;
    }

    /* Get job information */
    tw_lpid global_dest_id;
630 631
    int intm_dest_id;
    nw_message remote_m;
632 633 634 635 636 637

    struct codes_jobmap_id jid;
    jid = codes_jobmap_to_local_id(s->nw_id, jobmap_ctx); 

    int num_clients = codes_jobmap_get_num_ranks(jid.job, jobmap_ctx);

638 639 640 641
    /* Find destination */
    int* dest_svr = NULL; 
    int i, length=0;
    switch(s->synthetic_pattern)
642
    {
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
        case UNIFORM:
        {
            length = 1;
            dest_svr = (int*) calloc(1, sizeof(int));
            dest_svr[0] = tw_rand_integer(lp->rng, 0, num_clients - 1);
            if(dest_svr[0] == s->local_rank)
                dest_svr[0] = (s->local_rank + 1) % num_clients;
        }
        break;
        case NEAREST_NEIGHBOR:
        {
            length = 1;
            dest_svr = (int*) calloc(1, sizeof(int));
            dest_svr[0] = (s->local_rank + 1) % num_clients;
        }
        break;
        case ALLTOALL:
        {
            dest_svr = (int*) calloc(num_clients-1, sizeof(int));
            int index = 0;
            for (i=0;i<num_clients;i++)
            {
                if(i!=s->local_rank) 
                {
                    dest_svr[index] = i;
                    index++;
                    length++;
                }
            }
        }
        break;
        case STENCIL:  //2D 4-point stencil
        {
            /* I think this code snippet is coming from the LLNL stencil patterns. */
            int digits, x=1, y=1, row, col, temp=num_clients;
            length = 4;
            dest_svr = (int*) calloc(4, sizeof(int));
            for (digits = 0; temp > 0; temp >>= 1)
                digits++;
            digits = digits/2;
            for (i = 0; i < digits; i++)
                x = x * 2;
            y = num_clients / x;
            //printf("\nStencil Syn: x=%d, y=%d", x, y);
            row = s->local_rank / y;
            col = s->local_rank % y;

            dest_svr[0] = row * y + ((col-1+y)%y);   /* left neighbor */
            dest_svr[1] = row * y + ((col+1+y)%y);   /* right neighbor */
            dest_svr[2] = ((row-1+x)%x) * y + col;   /* bottom neighbor */
            dest_svr[3] = ((row+1+x)%x) * y + col;   /* up neighbor */
        }
        break;
        default:
            tw_error(TW_LOC, "Undefined traffic pattern");
    }   
    /* Record length for reverse handler*/
    m->rc.saved_syn_length = length;

    if(length > 0)
    {
        // m->event_array_rc = (model_net_event_return) malloc(length * sizeof(model_net_event_return));
        //printf("\nRANK %d Dests %d", s->local_rank, length);
        for (i = 0; i < length; i++)
        {
            /* Generate synthetic traffic */
            jid.rank = dest_svr[i];
            intm_dest_id = codes_jobmap_to_global_id(jid, jobmap_ctx); 
            global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);

            remote_m.fwd.sim_start_time = tw_now(lp);
            remote_m.fwd.dest_rank = dest_svr[i];
            remote_m.msg_type = CLI_BCKGND_ARRIVE;
            remote_m.fwd.num_bytes = payload_sz;
            remote_m.fwd.app_id = s->app_id;
            remote_m.fwd.src_rank = s->local_rank;

            // printf("\nAPP %d SRC %d Dest %d (twid %llu)", jid.job, s->local_rank, dest_svr[i], global_dest_id);
            m->event_rc = model_net_event(net_id, "synthetic-tr", global_dest_id, payload_sz, 0.0, 
                    sizeof(nw_message), (const void*)&remote_m, 
                    0, NULL, lp);
            
            s->gen_data += payload_sz;
            num_syn_bytes_sent += payload_sz; 
        }
728 729
    }
    /* New event after MEAN_INTERVAL */  
730
    tw_stime ts = mean_interval  + tw_rand_exponential(lp->rng, noise); 
731 732 733
    tw_event * e;
    nw_message * m_new;
    e = tw_event_new(lp->gid, ts, lp);
734
    m_new = (struct nw_message*)tw_event_data(e);
735 736
    m_new->msg_type = CLI_BCKGND_GEN;
    tw_event_send(e);
737 738

    free(dest_svr);
739 740 741 742
}

void arrive_syn_tr_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
743 744 745
    (void)bf;
    (void)m;
    (void)lp;
746 747 748 749 750 751 752
//    printf("\n Data arrived %d total data %ld ", m->fwd.num_bytes, s->syn_data);
    int data = m->fwd.num_bytes;
    s->syn_data -= data;
    num_syn_bytes_recvd -= data;
}
void arrive_syn_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
753 754 755
    (void)bf;
    (void)lp;

756 757 758 759 760 761 762 763 764 765 766 767 768
//    printf("\n Data arrived %d total data %ld ", m->fwd.num_bytes, s->syn_data);
    if(s->local_rank == 0)
     {
    	printf("\n Data arrived %lld rank %llu total data %ld ", m->fwd.num_bytes, s->nw_id, s->syn_data);
/*	if(s->syn_data > upper_threshold)
	{ 
        	struct rusage mem_usage;
		int who = RUSAGE_SELF;
		int err = getrusage(who, &mem_usage);
		printf("\n Memory usage %lf gigabytes", ((double)mem_usage.ru_maxrss / (1024.0 * 1024.0)));
		upper_threshold += 1048576;
	}*/
     }
769 770 771 772
    int data = m->fwd.num_bytes;
    s->syn_data += data;
    num_syn_bytes_recvd += data;
}
773
/* Debugging functions, may generate unused function warning */
774
/*static void print_waiting_reqs(uint32_t * reqs, int count)
775
{
776
    lprintf("\n Waiting reqs: %d count", count);
777 778
    int i;
    for(i = 0; i < count; i++ )
779
        lprintf(" %d ", reqs[i]);
780
}*/
781 782 783 784 785 786 787 788 789 790 791 792
static void print_msgs_queue(struct qlist_head * head, int is_send)
{
    if(is_send)
        printf("\n Send msgs queue: ");
    else
        printf("\n Recv msgs queue: ");

    struct qlist_head * ent = NULL;
    mpi_msgs_queue * current = NULL;
    qlist_for_each(ent, head)
       {
            current = qlist_entry(ent, mpi_msgs_queue, ql);
793
            printf(" \n Source %d Dest %d bytes %"PRId64" tag %d ", current->source_rank, current->dest_rank, current->num_bytes, current->tag);
794 795
       }
}
796
static void print_completed_queue(tw_lp * lp, struct qlist_head * head)
797
{
798
//    printf("\n Completed queue: ");
799 800
      struct qlist_head * ent = NULL;
      struct completed_requests* current = NULL;
801
      tw_output(lp, "\n");
802 803 804
      qlist_for_each(ent, head)
       {
            current = qlist_entry(ent, completed_requests, ql);
805
            tw_output(lp, " %llu ", current->req_id);
806
       }
807
}
808
static int clear_completed_reqs(nw_state * s,
809
        tw_lp * lp,
810
        unsigned int * reqs, int count)
811
{
812 813 814
    (void)s;
    (void)lp;

815
    int i, matched = 0;
816

817 818 819
    for( i = 0; i < count; i++)
    {
      struct qlist_head * ent = NULL;
820 821 822
      struct completed_requests * current = NULL;
      struct completed_requests * prev = NULL;

823
      int index = 0;
824 825
      qlist_for_each(ent, &s->completed_reqs)
       {
826 827
           if(prev)
           {
828
              rc_stack_push(lp, prev, free, s->matched_reqs);
829 830
              prev = NULL;
           }
831
            
832 833
           current = qlist_entry(ent, completed_requests, ql);
           current->index = index; 
834 835
            if(current->req_id == reqs[i])
            {
836
                ++matched;
837
                qlist_del(&current->ql);
838
                prev = current;
839
            }
840
            ++index;
841
       }
842 843

      if(prev)
844 845 846 847
      {
         rc_stack_push(lp, prev, free, s->matched_reqs);
         prev = NULL;
      }
848
    }
849
    return matched;
850
}
851
static void add_completed_reqs(nw_state * s,
852 853
        tw_lp * lp,
        int count)
854
{
855
    (void)lp;
856
    for(int i = 0; i < count; i++)
857
    {
858 859
       struct completed_requests * req = (struct completed_requests*)rc_stack_pop(s->matched_reqs);
       // turn on only if wait-all unmatched error arises in optimistic mode.
860
       qlist_add(&req->ql, &s->completed_reqs);
861
    }//end for
862
}
863

864 865 866 867 868 869
/* helper function - maps an MPI rank to an LP id */
static tw_lpid rank_to_lpid(int rank)
{
    return codes_mapping_get_lpid_from_relative(rank, NULL, "nw-lp", NULL, 0);
}

870
static int notify_posted_wait(nw_state* s,
871
        tw_bf * bf, nw_message * m, tw_lp * lp,
872
        unsigned int completed_req)
873
{
874 875
    (void)bf;

876 877
    struct pending_waits* wait_elem = s->wait_op;
    int wait_completed = 0;
878

879
    m->fwd.wait_completed = 0;
880

881 882
    if(!wait_elem)
        return 0;
883

884
    int op_type = wait_elem->op_type;
885

886 887 888
    if(op_type == CODES_WK_WAIT &&
            (wait_elem->req_ids[0] == completed_req))
    {
889
            m->fwd.wait_completed = 1;
890 891
            wait_completed = 1;
    }
892 893
    else if(op_type == CODES_WK_WAITALL
            || op_type == CODES_WK_WAITANY
894 895 896 897 898 899
            || op_type == CODES_WK_WAITSOME)
    {
        int i;
        for(i = 0; i < wait_elem->count; i++)
        {
            if(wait_elem->req_ids[i] == completed_req)
900
            {
901
                wait_elem->num_completed++;
902
                if(wait_elem->num_completed > wait_elem->count)
903
                    printf("\n Num completed %d count %d LP %llu ",
904 905
                            wait_elem->num_completed,
                            wait_elem->count,
906
                            LLU(lp->gid));
907 908
//                if(wait_elem->num_completed > wait_elem->count)
//                    tw_lp_suspend(lp, 1, 0);
909

910
                if(wait_elem->num_completed >= wait_elem->count)
911
                {
912
                    if(enable_debug)
913
                        fprintf(workload_log, "\n(%lf) APP ID %d MPI WAITALL COMPLETED AT %llu ", tw_now(lp), s->app_id, LLU(s->nw_id));
914
                    wait_completed = 1;
915
                }
916

917
                m->fwd.wait_completed = 1;
918
            }
919
        }
920
    }
921
    return wait_completed;
922
}
923

924
/* reverse handler of MPI wait operation */
925
static void codes_exec_mpi_wait_rc(nw_state* s, tw_bf * bf, tw_lp* lp, nw_message * m)
926
{
927
   if(bf->c1)
928
    {
929
        completed_requests * qi = (completed_requests*)rc_stack_pop(s->processed_ops);
930 931
        if(m->fwd.found_match == 0)
        {
932
            qlist_add(&qi->ql, &s->completed_reqs);
933
        }
934 935 936 937 938 939 940 941 942 943 944 945 946
        else
        {
           int index = 1;
           struct qlist_head * ent = NULL;
           qlist_for_each(ent, &s->completed_reqs)
           {
                if(index == m->fwd.found_match)
                {
                    qlist_add(&qi->ql, ent);
                    break;
                }
                index++;
           }
947
        }
948
        codes_issue_next_event_rc(lp);
949
        return;
950
    }
951 952 953
         struct pending_waits * wait_op = s->wait_op;
         free(wait_op);
         s->wait_op = NULL;
954
}
955

956
/* execute MPI wait operation */
957
static void codes_exec_mpi_wait(nw_state* s, tw_bf * bf, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op)
958
{
959
    /* check in the completed receives queue if the request ID has already been completed.*/
960 961
                
//    printf("\n Wait posted rank id %d ", s->nw_id);
962
    assert(!s->wait_op);
963
    unsigned int req_id = mpi_op->u.wait.req_id;
964

965
    struct completed_requests* current = NULL;
966

967
    struct qlist_head * ent = NULL;
968
    int index = 0;
969 970 971 972 973
    qlist_for_each(ent, &s->completed_reqs)
    {
        current = qlist_entry(ent, completed_requests, ql);
        if(current->req_id == req_id)
        {
974
            bf->c1=1;
975
            qlist_del(&current->ql);
976
            rc_stack_push(lp, current, free, s->processed_ops);
977
            codes_issue_next_event(lp);
978
            m->fwd.found_match = index;
979
            if(s->nw_id == (tw_lpid)TRACK_LP)
980 981 982
            {
                tw_output(lp, "\n wait matched at post %d ", req_id);
                print_completed_queue(lp, &s->completed_reqs);
983
            }
984 985
            return;
        }
986
        ++index;
987
    }
988

989 990 991 992 993
    /*if(s->nw_id == (tw_lpid)TRACK_LP)
    {
        tw_output(lp, "\n wait posted %llu ", req_id);
        print_completed_queue(lp, &s->completed_reqs);
    }*/
994
    /* If not, add the wait operation in the pending 'waits' list. */
995
    struct pending_waits* wait_op = (struct pending_waits*)malloc(sizeof(struct pending_waits));
996 997 998
    wait_op->op_type = mpi_op->op_type;
    wait_op->req_ids[0] = req_id;
    wait_op->count = 1;
999 1000
    wait_op->num_completed = 0;
    wait_op->start_time = tw_now(lp);
1001
    s->wait_op = wait_op;
1002

1003
    return;
1004 1005
}

1006
static void codes_exec_mpi_wait_all_rc(
1007
        nw_state* s,
1008 1009
        tw_bf * bf,
        nw_message * m,
1010
        tw_lp* lp)
1011
{
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
  if(bf->c1)
  {
    int sampling_indx = s->sampling_indx;
    s->mpi_wkld_samples[sampling_indx].num_waits_sample--;

    if(bf->c2)
    {
        s->cur_interval_end -= sampling_interval;
        s->sampling_indx--;
    }
  }
1023 1024 1025 1026 1027 1028 1029 1030
  if(s->wait_op)
  {
      struct pending_waits * wait_op = s->wait_op;
      free(wait_op);
      s->wait_op = NULL;
  }
  else
  {
1031
      add_completed_reqs(s, lp, m->fwd.num_matched);
1032 1033 1034
      codes_issue_next_event_rc(lp);
  }
  return;
1035
}
1036

1037
static void codes_exec_mpi_wait_all(
1038
        nw_state* s,
1039 1040
        tw_bf * bf,
        nw_message * m,
1041
        tw_lp* lp,
1042
        struct codes_workload_op * mpi_op)
1043
{
1044
  if(enable_debug)
1045
    fprintf(workload_log, "\n MPI WAITALL POSTED AT %llu ", LLU(s->nw_id));
1046

1047 1048 1049 1050 1051 1052 1053 1054
  if(enable_sampling)
  {
    bf->c1 = 1;
    if(tw_now(lp) >= s->cur_interval_end)
    {
        bf->c2 = 1;
        int indx = s->sampling_indx;
        s->mpi_wkld_samples[indx].nw_id = s->nw_id;
1055
        s->mpi_wkld_samples[indx].app_id = s->app_id;
1056 1057 1058 1059 1060 1061
        s->mpi_wkld_samples[indx].sample_end_time = s->cur_interval_end;
        s->cur_interval_end += sampling_interval;
        s->sampling_indx++;
    }
    if(s->sampling_indx >= MAX_STATS)
    {
1062
        struct mpi_workload_sample * tmp = (struct mpi_workload_sample*)calloc((MAX_STATS + s->max_arr_size), sizeof(struct mpi_workload_sample));
1063 1064 1065 1066 1067 1068 1069 1070
        memcpy(tmp, s->mpi_wkld_samples, s->sampling_indx);
        free(s->mpi_wkld_samples);
        s->mpi_wkld_samples = tmp;
        s->max_arr_size += MAX_STATS;
    }
    int indx = s->sampling_indx;
    s->mpi_wkld_samples[indx].num_waits_sample++;
  }
1071
  int count = mpi_op->u.waits.count;
1072 1073
  /* If the count is not less than max wait reqs then stop */
  assert(count < MAX_WAIT_REQS);
1074

1075
  int i = 0, num_matched = 0;
1076
  m->fwd.num_matched = 0;
1077

1078
  /*if(lp->gid == TRACK_LP)
1079
  {
1080
      printf("\n MPI Wait all posted ");
1081
      print_waiting_reqs(mpi_op->u.waits.req_ids, count);
1082 1083
      print_completed_queue(lp, &s->completed_reqs);
  }*/
1084
      /* check number of completed irecvs in the completion queue */
1085 1086
  for(i = 0; i < count; i++)
  {
1087
      unsigned int req_id = mpi_op->u.waits.req_ids[i];
1088 1089 1090 1091
      struct qlist_head * ent = NULL;
      struct completed_requests* current = NULL;
      qlist_for_each(ent, &s->completed_reqs)
       {
1092
            current = qlist_entry(ent, struct completed_requests, ql);
1093 1094 1095 1096
            if(current->req_id == req_id)
                num_matched++;
       }
  }
1097

1098
  m->fwd.found_match = num_matched;
1099 1100 1101 1102
  if(num_matched == count)
  {
    /* No need to post a MPI Wait all then, issue next event */
      /* Remove all completed requests from the list */
1103 1104 1105
      m->fwd.num_matched = clear_completed_reqs(s, lp, mpi_op->u.waits.req_ids, count);
      struct pending_waits* wait_op = s->wait_op;
      free(wait_op);
1106 1107
      s->wait_op = NULL;
      codes_issue_next_event(lp);
1108 1109
  }
  else
1110 1111
  {
      /* If not, add the wait operation in the pending 'waits' list. */
1112
	  struct pending_waits* wait_op = (struct pending_waits*)malloc(sizeof(struct pending_waits));
1113 1114 1115 1116 1117 1118 1119 1120
	  wait_op->count = count;
      wait_op->op_type = mpi_op->op_type;
      assert(count < MAX_WAIT_REQS);

      for(i = 0; i < count; i++)
          wait_op->req_ids[i] =  mpi_op->u.waits.req_ids[i];

	  wait_op->num_completed = num_matched;
1121
	  wait_op->start_time = tw_now(lp);
1122
      s->wait_op = wait_op;
1123
  }
1124 1125
  return;
}
1126

1127 1128
/* search for a matching mpi operation and remove it from the list.
 * Record the index in the list from where the element got deleted.
1129
 * Index is used for inserting the element once again in the queue for reverse computation. */
1130
static int rm_matching_rcv(nw_state * ns,
1131
        tw_bf * bf,
1132 1133
        nw_message * m,
        tw_lp * lp,
1134
        mpi_msgs_queue * qitem)
1135 1136
{
    int matched = 0;
1137
    int index = 0;
1138
    int is_rend = 0;
1139 1140
    struct qlist_head *ent = NULL;
    mpi_msgs_queue * qi = NULL;
1141

1142 1143
    qlist_for_each(ent, &ns->pending_recvs_queue){
        qi = qlist_entry(ent, mpi_msgs_queue, ql);
1144 1145
        if(//(qi->num_bytes == qitem->num_bytes)
                //&& 
1146
               ((qi->tag == qitem->tag) || qi->tag == -1)
1147
                && ((qi->source_rank == qitem->source_rank) || qi->source_rank == -1))
1148 1149
        {
            matched = 1;
1150
            qi->num_bytes = qitem->num_bytes;
1151 1152
            break;
        }
1153
        ++index;
1154
    }
1155

1156 1157
    if(matched)
    {
1158 1159 1160 1161 1162 1163 1164
        if(enable_msg_tracking && qitem->num_bytes < EAGER_THRESHOLD)
        {
            update_message_size(ns, lp, bf, m, qitem, 1, 1);
        }
        if(qitem->num_bytes >= EAGER_THRESHOLD)
        {
            /* Matching receive found, need to notify the sender to transmit
1165 1166
             * the data * (only works in sequential mode)*/
            bf->c10 = 1;
1167 1168
            is_rend = 1;
            send_ack_back(ns, bf, m, lp, qitem, qi->req_id);
1169
        }
1170 1171
        else
        {
1172
            bf->c12 = 1;
1173 1174 1175 1176
            m->rc.saved_recv_time = ns->recv_time;
            ns->recv_time += (tw_now(lp) - m->fwd.sim_start_time);
        }
        if(qi->op_type == CODES_WK_IRECV && !is_rend)
1177
        {
1178
            bf->c9 = 1;
1179 1180 1181 1182
            /*if(ns->nw_id == (tw_lpid)TRACK_LP)
            {
                printf("\n Completed irecv req id %d ", qi->req_id);
            }*/
1183
            update_completed_queue(ns, bf, m, lp, qi->req_id);
1184
        }
1185 1186 1187
        else if(qi->op_type == CODES_WK_RECV && !is_rend)
        {
            bf->c8 = 1;
1188
            codes_issue_next_event(lp);
1189
        }
1190

1191
        qlist_del(&qi->ql);
1192

1193
        rc_stack_push(lp, qi, free, ns->processed_ops);
1194
        return index;
1195 1196 1197 1198
    }
    return -1;
}

1199
static int rm_matching_send(nw_state * ns,
1200 1201 1202
        tw_bf * bf,
        nw_message * m,
        tw_lp * lp, mpi_msgs_queue * qitem)
1203 1204 1205 1206 1207
{
    int matched = 0;
    struct qlist_head *ent = NULL;
    mpi_msgs_queue * qi = NULL;

1208
    int index = 0;
1209 1210
    qlist_for_each(ent, &ns->arrival_queue){
        qi = qlist_entry(ent, mpi_msgs_queue, ql);
1211 1212 1213
        if(//(qi->num_bytes == qitem->num_bytes) // it is not a requirement in MPI that the send and receive sizes match
                // && 
		(qi->tag == qitem->tag || qitem->tag == -1)
1214 1215
                && ((qi->source_rank == qitem->source_rank) || qitem->source_rank == -1))
        {
1216
            qitem->num_bytes = qi->num_bytes;
1217 1218 1219
            matched = 1;
            break;
        }
1220
        ++index;
1221 1222 1223 1224
    }

    if(matched)
    {
1225 1226 1227
        if(enable_msg_tracking && (qi->num_bytes < EAGER_THRESHOLD))
            update_message_size(ns, lp, bf, m, qi, 1, 0);
        
1228 1229
        m->fwd.matched_req = qitem->req_id;
        int is_rend = 0;
1230 1231 1232 1233
        if(qitem->num_bytes >= EAGER_THRESHOLD)
        {
            /* Matching receive found, need to notify the sender to transmit
             * the data */
1234
            bf->c10 = 1;
1235 1236
            is_rend = 1;
            send_ack_back(ns, bf, m, lp, qi, qitem->req_id);
1237
        }
1238

1239
        m->rc.saved_recv_time = ns->recv_time;
1240 1241
        ns->recv_time += (tw_now(lp) - qitem->req_init_time);

1242 1243 1244 1245 1246 1247
        /*if(ns->nw_id == (tw_lpid)TRACK_LP && qitem->op_type == CODES_WK_IRECV)
        {
            tw_output(lp, "\n Completed recv req id %d ", qitem->req_id);
            print_completed_queue(lp, &ns->completed_reqs);
        }*/
        
1248 1249 1250
        if(qitem->op_type == CODES_WK_IRECV && !is_rend)
        {
            bf->c9 = 1;
1251
            update_completed_queue(ns, bf, m, lp, qitem->req_id);
1252
        }
1253 1254 1255 1256 1257 1258
        else
         if(qitem->op_type == CODES_WK_RECV && !is_rend)
         {
            bf->c6 = 1;
            codes_issue_next_event(lp);
         }
1259

1260

1261
        qlist_del(&qi->ql);
1262

1263
	    rc_stack_push(lp, qi, free, ns->processed_ops);
1264
        return index;
1265 1266 1267 1268 1269
    }
    return -1;
}
static void codes_issue_next_event_rc(tw_lp * lp)
{
1270
	    tw_rand_reverse_unif(lp->rng);
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
}

/* Trigger getting next event at LP */
static void codes_issue_next_event(tw_lp* lp)
{
   tw_event *e;
   nw_message* msg;

   tw_stime ts;

   ts = g_tw_lookahead + 0.1 + tw_rand_exponential(lp->rng, noise);
1282
   assert(ts > 0);
1283
   e = tw_event_new( lp->gid, ts, lp );
1284
   msg = (nw_message*)tw_event_data(e);
1285 1286 1287 1288 1289 1290 1291

   msg->msg_type = MPI_OP_GET_NEXT;
   tw_event_send(e);
}

/* Simulate delays between MPI operations */
static void codes_exec_comp_delay(
1292
        nw_state* s, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op)
1293 1294 1295 1296 1297
{
	tw_event* e;
	tw_stime ts;
	nw_message* msg;

1298
    m->rc.saved_delay = s->compute_time;