model-net-mpi-replay.c 75.3 KB
Newer Older
1 2 3 4 5 6 7
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */
#include <ross.h>
#include <inttypes.h>
8
#include <sys/stat.h>
9 10 11 12 13 14 15 16

#include "codes/codes-workload.h"
#include "codes/codes.h"
#include "codes/configuration.h"
#include "codes/codes_mapping.h"
#include "codes/model-net.h"
#include "codes/rc-stack.h"
#include "codes/quicklist.h"
17
#include "codes/quickhash.h"
18
#include "codes/codes-jobmap.h"
19

20
/* turning on track lp will generate a lot of output messages */
21
#define MN_LP_NM "modelnet_dragonfly_custom"
22
#define CONTROL_MSG_SZ 64
23
#define TRACE -1
24
#define MAX_WAIT_REQS 512
25
#define CS_LP_DBG 1
26
#define EAGER_THRESHOLD 8192000
27 28 29
#define RANK_HASH_TABLE_SZ 2000
#define NOISE 3.0
#define NW_LP_NM "nw-lp"
30 31 32
#define lprintf(_fmt, ...) \
        do {if (CS_LP_DBG) printf(_fmt, __VA_ARGS__);} while (0)
#define MAX_STATS 65536
33
#define PAYLOAD_SZ 1024
34

35 36 37
static int msg_size_hash_compare(
            void *key, struct qhash_head *link);

38
int enable_msg_tracking = 0;
39
tw_lpid TRACK_LP = -1;
40 41

int unmatched = 0;
42 43 44 45 46
char workload_type[128];
char workload_file[8192];
char offset_file[8192];
static int wrkld_id;
static int num_net_traces = 0;
47 48
static int num_dumpi_traces = 0;

49
static int alloc_spec = 0;
50 51
static tw_stime self_overhead = 10.0;
static tw_stime mean_interval = 100000;
52 53 54

/* Doing LP IO*/
static char lp_io_dir[256] = {'\0'};
55
static char sampling_dir[32] = {'\0'};
56 57 58 59
static lp_io_handle io_handle;
static unsigned int lp_io_use_suffix = 0;
static int do_lp_io = 0;

60 61 62 63
/* variables for loading multiple applications */
char workloads_conf_file[8192];
char alloc_file[8192];
int num_traces_of_job[5];
64 65 66
tw_stime soft_delay_mpi = 2500;
tw_stime nic_delay = 1000;
tw_stime copy_per_byte_eager = 0.55;
67 68 69 70 71
char file_name_of_job[5][8192];

struct codes_jobmap_ctx *jobmap_ctx;
struct codes_jobmap_params_list jobmap_p;

72 73
/* Variables for Cortex Support */
/* Matthieu's additions start */
74
#ifdef ENABLE_CORTEX_PYTHON
75 76 77
static char cortex_file[512] = "\0";
static char cortex_class[512] = "\0";
static char cortex_gen[512] = "\0";
78
#endif
79 80
/* Matthieu's additions end */

81 82
typedef struct nw_state nw_state;
typedef struct nw_message nw_message;
83
typedef int32_t dumpi_req_id;
84 85 86

static int net_id = 0;
static float noise = 5.0;
87 88 89
static int num_nw_lps = 0, num_mpi_lps = 0;

static int num_syn_clients;
90

91
FILE * workload_log = NULL;
92
FILE * msg_size_log = NULL;
93 94 95 96
FILE * workload_agg_log = NULL;
FILE * workload_meta_log = NULL;

static uint64_t sample_bytes_written = 0;
97

98 99 100
long long num_bytes_sent=0;
long long num_bytes_recvd=0;

101 102 103
long long num_syn_bytes_sent = 0;
long long num_syn_bytes_recvd = 0;

104 105 106 107 108 109
double max_time = 0,  max_comm_time = 0, max_wait_time = 0, max_send_time = 0, max_recv_time = 0;
double avg_time = 0, avg_comm_time = 0, avg_wait_time = 0, avg_send_time = 0, avg_recv_time = 0;


/* runtime option for disabling computation time simulation */
static int disable_delay = 0;
110 111 112
static int enable_sampling = 0;
static double sampling_interval = 5000000;
static double sampling_end_time = 3000000000;
113
static int enable_debug = 0;
114

115 116 117
/* set group context */
struct codes_mctx group_ratio;

118
/* MPI_OP_GET_NEXT is for getting next MPI operation when the previous operation completes.
119
* MPI_SEND_ARRIVED is issued when a MPI message arrives at its destination (the message is transported by model-net and an event is invoked when it arrives.
120 121 122 123 124 125 126
* MPI_SEND_POSTED is issued when a MPI message has left the source LP (message is transported via model-net). */
enum MPI_NW_EVENTS
{
	MPI_OP_GET_NEXT=1,
	MPI_SEND_ARRIVED,
    MPI_SEND_ARRIVED_CB, // for tracking message times on sender
	MPI_SEND_POSTED,
127 128 129 130 131 132
    MPI_REND_ARRIVED,
    MPI_REND_ACK_ARRIVED,
    CLI_BCKGND_FIN,
    CLI_BCKGND_ARRIVE,
    CLI_BCKGND_GEN,
    CLI_NBR_FINISH,
133 134
};

135 136 137 138
struct mpi_workload_sample
{
    /* Sampling data */
    int nw_id;
139
    int app_id;
140 141 142 143 144
    unsigned long num_sends_sample;
    unsigned long num_bytes_sample;
    unsigned long num_waits_sample;
    double sample_end_time;
};
145 146 147 148 149 150 151
/* stores pointers of pending MPI operations to be matched with their respective sends/receives. */
struct mpi_msgs_queue
{
    int op_type;
    int tag;
    int source_rank;
    int dest_rank;
152
    uint64_t num_bytes;
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
    tw_stime req_init_time;
	dumpi_req_id req_id;
    struct qlist_head ql;
};

/* stores request IDs of completed MPI operations (Isends or Irecvs) */
struct completed_requests
{
	dumpi_req_id req_id;
    struct qlist_head ql;
};

/* for wait operations, store the pending operation and number of completed waits so far. */
struct pending_waits
{
    int op_type;
169
    int32_t req_ids[MAX_WAIT_REQS];
170
	int num_completed;
171 172
	int count;
    tw_stime start_time;
173 174 175
    struct qlist_head ql;
};

176 177 178 179 180 181 182 183 184
struct msg_size_info
{
    int64_t msg_size;
    int num_msgs;
    tw_stime agg_latency;
    tw_stime avg_latency;
    struct qhash_head * hash_link;
    struct qlist_head ql; 
};
185 186 187 188 189 190 191 192 193 194
typedef struct mpi_msgs_queue mpi_msgs_queue;
typedef struct completed_requests completed_requests;
typedef struct pending_waits pending_waits;

/* state of the network LP. It contains the pointers to send/receive lists */
struct nw_state
{
	long num_events_per_lp;
	tw_lpid nw_id;
	short wrkld_end;
195 196
    int app_id;
    int local_rank;
197

198 199 200
    int is_finished;
    int neighbor_completed;

201
    struct rc_stack * processed_ops;
202
    struct rc_stack * matched_reqs;
203 204 205 206 207 208 209 210 211 212

    /* count of sends, receives, collectives and delays */
	unsigned long num_sends;
	unsigned long num_recvs;
	unsigned long num_cols;
	unsigned long num_delays;
	unsigned long num_wait;
	unsigned long num_waitall;
	unsigned long num_waitsome;

213

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
	/* time spent by the LP in executing the app trace*/
	double start_time;
	double elapsed_time;
	/* time spent in compute operations */
	double compute_time;
	/* time spent in message send/isend */
	double send_time;
	/* time spent in message receive */
	double recv_time;
	/* time spent in wait operation */
	double wait_time;
	/* FIFO for isend messages arrived on destination */
	struct qlist_head arrival_queue;
	/* FIFO for irecv messages posted but not yet matched with send operations */
	struct qlist_head pending_recvs_queue;
	/* List of completed send/receive requests */
	struct qlist_head completed_reqs;
231

232 233
    tw_stime cur_interval_end;

234 235
    /* Pending wait operation */
    struct pending_waits * wait_op;
236

237 238 239 240 241 242
    /* Message size latency information */
    struct qhash_table * msg_sz_table;
    struct qlist_head msg_sz_list;

    /* quick hash for maintaining message latencies */

243 244 245
    unsigned long num_bytes_sent;
    unsigned long num_bytes_recvd;

246 247 248
    unsigned long syn_data;
    unsigned long gen_data;
    
249 250 251 252
    /* For sampling data */
    int sampling_indx;
    int max_arr_size;
    struct mpi_workload_sample * mpi_wkld_samples;
253
    char output_buf[512];
254 255 256 257
};

/* data for handling reverse computation.
* saved_matched_req holds the request ID of matched receives/sends for wait operations.
258
* ptr_match_op holds the matched MPI operation which are removed from the queues when a send is matched with the receive in forward event handler.
259 260 261
* network event being sent. op is the MPI operation issued by the network workloads API. rv_data holds the data for reverse computation (TODO: Fill this data structure only when the simulation runs in optimistic mode). */
struct nw_message
{
262
   // forward message handler
263
   int msg_type;
264
   int op_type;
265
   model_net_event_return event_rc;
266

267 268 269
   struct
   {
       tw_lpid src_rank;
270
       int dest_rank;
271
       int64_t num_bytes;
272 273 274 275 276
       int num_matched;
       int data_type;
       double sim_start_time;
       // for callbacks - time message was received
       double msg_send_time;
277
       int16_t req_id;
278
       int tag;
279
       int app_id;
280 281 282 283 284 285 286 287 288
       int found_match;
       short wait_completed;
   } fwd;
   struct
   {
       double saved_send_time;
       double saved_recv_time;
       double saved_wait_time;
       double saved_delay;
289
       int16_t saved_num_bytes;
290
       struct codes_workload_op * saved_op;
291
   } rc;
292 293
};

294
static void send_ack_back(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp, mpi_msgs_queue * mpi_op);
295 296

static void send_ack_back_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
297 298
/* executes MPI isend and send operations */
static void codes_exec_mpi_send(
299
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op, int is_rend);
300 301
/* execute MPI irecv operation */
static void codes_exec_mpi_recv(
302
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp, struct codes_workload_op * mpi_op);
303 304
/* reverse of mpi recv function. */
static void codes_exec_mpi_recv_rc(
305
        nw_state* s, tw_bf * bf, nw_message* m, tw_lp* lp);
306 307
/* execute the computational delay */
static void codes_exec_comp_delay(
308
        nw_state* s, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op);
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
/* gets the next MPI operation from the network-workloads API. */
static void get_next_mpi_operation(
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
/* reverse handler of get next mpi operation. */
static void get_next_mpi_operation_rc(
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
/* Makes a call to get_next_mpi_operation. */
static void codes_issue_next_event(tw_lp* lp);
/* reverse handler of next operation */
static void codes_issue_next_event_rc(tw_lp* lp);


///////////////////// HELPER FUNCTIONS FOR MPI MESSAGE QUEUE HANDLING ///////////////
/* upon arrival of local completion message, inserts operation in completed send queue */
/* upon arrival of an isend operation, updates the arrival queue of the network */
324 325 326 327 328 329 330 331
static void update_completed_queue(
        nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp, dumpi_req_id req_id);
/* reverse of the above function */
static void update_completed_queue_rc(
        nw_state*s,
        tw_bf * bf,
        nw_message * m,
        tw_lp * lp);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
static void update_arrival_queue(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* reverse of the above function */
static void update_arrival_queue_rc(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* callback to a message sender for computing message time */
static void update_message_time(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* reverse for computing message time */
static void update_message_time_rc(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

/* conversion from seconds to eanaoseconds */
static tw_stime s_to_ns(tw_stime ns);

347 348 349 350 351 352
static void update_message_size_rc(
        struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
353 354 355 356 357
/*TODO: Complete reverse handler */
    (void)ns;
    (void)lp;
    (void)bf;
    (void)m;
358
}
359 360 361 362 363 364 365 366 367 368
/* update the message size */
static void update_message_size(
        struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m,
        mpi_msgs_queue * qitem,
        int is_eager,
        int is_send)
{
369 370 371
            (void)bf;
            (void)is_eager;

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
            struct qhash_head * hash_link = NULL;
            tw_stime msg_init_time = qitem->req_init_time;
        
            if(!ns->msg_sz_table)
                ns->msg_sz_table = qhash_init(msg_size_hash_compare, quickhash_64bit_hash, RANK_HASH_TABLE_SZ); 
            
            hash_link = qhash_search(ns->msg_sz_table, &(qitem->num_bytes));

            if(is_send)
                msg_init_time = m->fwd.sim_start_time;
            
            /* update hash table */
            if(!hash_link)
            {
                struct msg_size_info * msg_info = malloc(sizeof(struct msg_size_info));
                msg_info->msg_size = qitem->num_bytes;
                msg_info->num_msgs = 1;
389
                msg_info->agg_latency = tw_now(lp) - msg_init_time;
390
                msg_info->avg_latency = msg_info->agg_latency;
391
                qhash_add(ns->msg_sz_table, &(msg_info->msg_size), msg_info->hash_link);
392 393 394 395 396 397 398
                qlist_add(&msg_info->ql, &ns->msg_sz_list);
                //printf("\n Msg size %d aggregate latency %f num messages %d ", m->fwd.num_bytes, msg_info->agg_latency, msg_info->num_msgs);
            }
            else
            {
                struct msg_size_info * tmp = qhash_entry(hash_link, struct msg_size_info, hash_link);
                tmp->num_msgs++;
399
                tmp->agg_latency += tw_now(lp) - msg_init_time;  
400 401 402 403 404 405 406 407 408 409
                tmp->avg_latency = (tmp->agg_latency / tmp->num_msgs);
//                printf("\n Msg size %d aggregate latency %f num messages %d ", qitem->num_bytes, tmp->agg_latency, tmp->num_msgs);
            }
}
static void notify_background_traffic_rc(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
410 411 412
    (void)ns;
    (void)bf;
    (void)m;
413 414 415 416 417 418 419 420 421
    tw_rand_reverse_unif(lp->rng); 
}

static void notify_background_traffic(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
422 423 424
        (void)bf;
        (void)m;

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
        struct codes_jobmap_id jid; 
        jid = codes_jobmap_to_local_id(ns->nw_id, jobmap_ctx);
        
        int num_jobs = codes_jobmap_get_num_jobs(jobmap_ctx); 
        
        for(int other_id = 0; other_id < num_jobs; other_id++)
        {
            if(other_id == jid.job)
                continue;

            struct codes_jobmap_id other_jid;
            other_jid.job = other_id;

            int num_other_ranks = codes_jobmap_get_num_ranks(other_id, jobmap_ctx);

440
            lprintf("\n Other ranks %d ", num_other_ranks);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
            tw_stime ts = (1.1 * g_tw_lookahead) + tw_rand_exponential(lp->rng, mean_interval/10000);
            tw_lpid global_dest_id;
     
            for(int k = 0; k < num_other_ranks; k++)    
            {
                other_jid.rank = k;
                int intm_dest_id = codes_jobmap_to_global_id(other_jid, jobmap_ctx); 
                global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);

                tw_event * e;
                struct nw_message * m_new;  
                e = tw_event_new(global_dest_id, ts, lp);
                m_new = tw_event_data(e);
                m_new->msg_type = CLI_BCKGND_FIN;
                tw_event_send(e);   
            }
        }
        return;
}
static void notify_neighbor_rc(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
       if(bf->c0)
       {
            notify_background_traffic_rc(ns, lp, bf, m);
            return;
       }
   
       if(bf->c1)
       {
          tw_rand_reverse_unif(lp->rng); 
       }
} 
static void notify_neighbor(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
    if(ns->local_rank == num_dumpi_traces - 1 
            && ns->is_finished == 1
            && ns->neighbor_completed == 1)
    {
        printf("\n All workloads completed, notifying background traffic ");
        bf->c0 = 1;
        notify_background_traffic(ns, lp, bf, m);
        return;
    }
    
    struct codes_jobmap_id nbr_jid;
    nbr_jid.job = ns->app_id;
    tw_lpid global_dest_id;

    if(ns->is_finished == 1 && (ns->neighbor_completed == 1 || ns->local_rank == 0))
    {
        bf->c1 = 1;

        printf("\n Local rank %d notifying neighbor %d ", ns->local_rank, ns->local_rank+1);
        tw_stime ts = (1.1 * g_tw_lookahead) + tw_rand_exponential(lp->rng, mean_interval/10000);
        nbr_jid.rank = ns->local_rank + 1;
        
        /* Send a notification to the neighbor about completion */
        int intm_dest_id = codes_jobmap_to_global_id(nbr_jid, jobmap_ctx); 
        global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);
       
        tw_event * e;
        struct nw_message * m_new;  
        e = tw_event_new(global_dest_id, ts, lp);
        m_new = tw_event_data(e); 
        m_new->msg_type = CLI_NBR_FINISH;
        tw_event_send(e);   
    }
}
void finish_bckgnd_traffic_rc(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
523 524 525 526
        (void)b;
        (void)msg;
        (void)lp;

527 528 529 530 531 532 533 534 535
        ns->is_finished = 0;
        return;
}
void finish_bckgnd_traffic(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
536 537
        (void)b;
        (void)msg;
538
        ns->is_finished = 1;
539
        lprintf("\n LP %llu completed sending data %lu completed at time %lf ", lp->gid, ns->gen_data, tw_now(lp));
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
        return;
}

void finish_nbr_wkld_rc(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
    ns->neighbor_completed = 0;
    
    notify_neighbor_rc(ns, lp, b, msg);
}

void finish_nbr_wkld(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
    printf("\n Workload completed, notifying neighbor ");
    ns->neighbor_completed = 1;

    notify_neighbor(ns, lp, b, msg);
}
static void gen_synthetic_tr_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
    if(bf->c0)
        return;

    model_net_event_rc2(lp, &m->event_rc);
    s->gen_data -= PAYLOAD_SZ;

    num_syn_bytes_sent -= PAYLOAD_SZ;
    tw_rand_reverse_unif(lp->rng);
    tw_rand_reverse_unif(lp->rng);

}

/* generate synthetic traffic */
static void gen_synthetic_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
    if(s->is_finished == 1)
    {
        bf->c0 = 1;
        return;
    }

    /* Get job information */
    tw_lpid global_dest_id;

    struct codes_jobmap_id jid;
    jid = codes_jobmap_to_local_id(s->nw_id, jobmap_ctx); 

    int num_clients = codes_jobmap_get_num_ranks(jid.job, jobmap_ctx);
    int dest_svr = tw_rand_integer(lp->rng, 0, num_clients - 1);

    if(dest_svr == s->local_rank)
    {
       dest_svr = (s->local_rank + 1) % num_clients;
    }
   
    jid.rank = dest_svr;

    int intm_dest_id = codes_jobmap_to_global_id(jid, jobmap_ctx); 
    global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);

    nw_message remote_m;
    remote_m.fwd.sim_start_time = tw_now(lp);
    remote_m.fwd.dest_rank = dest_svr;
    remote_m.msg_type = CLI_BCKGND_ARRIVE;
    remote_m.fwd.num_bytes = PAYLOAD_SZ;
    remote_m.fwd.app_id = s->app_id;
    remote_m.fwd.src_rank = s->local_rank;

    m->event_rc = model_net_event(net_id, "synthetic-tr", global_dest_id, PAYLOAD_SZ, 0.0, 
            sizeof(nw_message), (const void*)&remote_m, 
            0, NULL, lp);
    
    s->gen_data += PAYLOAD_SZ;
    num_syn_bytes_sent += PAYLOAD_SZ; 

    /* New event after MEAN_INTERVAL */  
    tw_stime ts = mean_interval  + tw_rand_exponential(lp->rng, NOISE); 
    tw_event * e;
    nw_message * m_new;
    e = tw_event_new(lp->gid, ts, lp);
    m_new = tw_event_data(e);
    m_new->msg_type = CLI_BCKGND_GEN;
    tw_event_send(e);
}

void arrive_syn_tr_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
634 635 636
    (void)bf;
    (void)m;
    (void)lp;
637 638 639 640 641 642 643
//    printf("\n Data arrived %d total data %ld ", m->fwd.num_bytes, s->syn_data);
    int data = m->fwd.num_bytes;
    s->syn_data -= data;
    num_syn_bytes_recvd -= data;
}
void arrive_syn_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
644 645 646
    (void)bf;
    (void)lp;

647 648 649 650 651
//    printf("\n Data arrived %d total data %ld ", m->fwd.num_bytes, s->syn_data);
    int data = m->fwd.num_bytes;
    s->syn_data += data;
    num_syn_bytes_recvd += data;
}
652
/* Debugging functions, may generate unused function warning */
653
static void print_waiting_reqs(int32_t * reqs, int count)
654
{
655
    lprintf("\n Waiting reqs: %d count", count);
656 657
    int i;
    for(i = 0; i < count; i++ )
658
        lprintf(" %d ", reqs[i]);
659
}
660 661 662 663 664 665 666 667 668 669 670 671
static void print_msgs_queue(struct qlist_head * head, int is_send)
{
    if(is_send)
        printf("\n Send msgs queue: ");
    else
        printf("\n Recv msgs queue: ");

    struct qlist_head * ent = NULL;
    mpi_msgs_queue * current = NULL;
    qlist_for_each(ent, head)
       {
            current = qlist_entry(ent, mpi_msgs_queue, ql);
672
            printf(" \n Source %d Dest %d bytes %llu tag %d ", current->source_rank, current->dest_rank, current->num_bytes, current->tag);
673 674
       }
}
675 676 677 678 679 680 681 682
static void print_completed_queue(struct qlist_head * head)
{
    printf("\n Completed queue: ");
      struct qlist_head * ent = NULL;
      struct completed_requests* current = NULL;
      qlist_for_each(ent, head)
       {
            current = qlist_entry(ent, completed_requests, ql);
683
            printf(" %d ", current->req_id);
684 685
       }
}
686
static int clear_completed_reqs(nw_state * s,
687
        tw_lp * lp,
688
        int32_t * reqs, int count)
689
{
690 691 692
    (void)s;
    (void)lp;

693
    int i, matched = 0;
694

695 696 697
    for( i = 0; i < count; i++)
    {
      struct qlist_head * ent = NULL;
698 699 700
      struct completed_requests * current = NULL;
      struct completed_requests * prev = NULL;

701 702
      qlist_for_each(ent, &s->completed_reqs)
       {
703 704 705 706 707
            current = qlist_entry(ent, completed_requests, ql);
            
            if(prev)
              rc_stack_push(lp, prev, free, s->matched_reqs);
            
708 709
            if(current->req_id == reqs[i])
            {
710
                ++matched;
711
                qlist_del(&current->ql);
712
                prev = current;
713
            }
714 715
            else
                prev = NULL;
716
       }
717 718 719

      if(prev)
          rc_stack_push(lp, prev, free, s->matched_reqs);
720
    }
721
    return matched;
722
}
723
static void add_completed_reqs(nw_state * s,
724 725
        tw_lp * lp,
        int count)
726
{
727
    (void)lp;
728 729 730
    int i;
    for( i = 0; i < count; i++)
    {
731 732
       struct completed_requests * req = rc_stack_pop(s->matched_reqs);
       qlist_add(&req->ql, &s->completed_reqs);
733 734
    }
}
735

736 737 738 739 740 741
/* helper function - maps an MPI rank to an LP id */
static tw_lpid rank_to_lpid(int rank)
{
    return codes_mapping_get_lpid_from_relative(rank, NULL, "nw-lp", NULL, 0);
}

742
static int notify_posted_wait(nw_state* s,
743
        tw_bf * bf, nw_message * m, tw_lp * lp,
744
        dumpi_req_id completed_req)
745
{
746 747
    (void)bf;

748 749
    struct pending_waits* wait_elem = s->wait_op;
    int wait_completed = 0;
750

751
    m->fwd.wait_completed = 0;
752

753 754
    if(!wait_elem)
        return 0;
755

756
    int op_type = wait_elem->op_type;
757

758 759 760 761 762
    if(op_type == CODES_WK_WAIT &&
            (wait_elem->req_ids[0] == completed_req))
    {
            wait_completed = 1;
    }
763 764
    else if(op_type == CODES_WK_WAITALL
            || op_type == CODES_WK_WAITANY
765 766 767 768 769 770
            || op_type == CODES_WK_WAITSOME)
    {
        int i;
        for(i = 0; i < wait_elem->count; i++)
        {
            if(wait_elem->req_ids[i] == completed_req)
771
            {
772
                wait_elem->num_completed++;
773
                if(wait_elem->num_completed > wait_elem->count)
774
                    printf("\n Num completed %d count %d LP %llu ",
775 776 777
                            wait_elem->num_completed,
                            wait_elem->count,
                            lp->gid);
778 779
//                if(wait_elem->num_completed > wait_elem->count)
//                    tw_lp_suspend(lp, 1, 0);
780

781
                if(wait_elem->num_completed == wait_elem->count)
782
                {
783
                    if(enable_debug)
784
                        fprintf(workload_log, "\n(%lf) APP ID %d MPI WAITALL COMPLETED AT %llu ", tw_now(lp), s->app_id, s->nw_id);
785
                    wait_completed = 1;
786
                }
787

788
                m->fwd.wait_completed = 1;
789
            }
790
        }
791
    }
792
    return wait_completed;
793
}
794

795
/* reverse handler of MPI wait operation */
796
static void codes_exec_mpi_wait_rc(nw_state* s, tw_lp* lp)
797
{
798
    if(s->wait_op)
799
     {
800 801 802
         struct pending_waits * wait_op = s->wait_op;
         free(wait_op);
         s->wait_op = NULL;
803 804 805 806
     }
   else
    {
        codes_issue_next_event_rc(lp);
807
        completed_requests * qi = rc_stack_pop(s->processed_ops);
808
        qlist_add(&qi->ql, &s->completed_reqs);
809
    }
810
    return;
811
}
812

813
/* execute MPI wait operation */
814
static void codes_exec_mpi_wait(nw_state* s, tw_lp* lp, struct codes_workload_op * mpi_op)
815
{
816 817
    /* check in the completed receives queue if the request ID has already been completed.*/
    assert(!s->wait_op);
818
    dumpi_req_id req_id = mpi_op->u.wait.req_id;
819
    struct completed_requests* current = NULL;
820

821 822 823 824 825 826 827
    struct qlist_head * ent = NULL;
    qlist_for_each(ent, &s->completed_reqs)
    {
        current = qlist_entry(ent, completed_requests, ql);
        if(current->req_id == req_id)
        {
            qlist_del(&current->ql);
828
            rc_stack_push(lp, current, free, s->processed_ops);
829 830 831 832
            codes_issue_next_event(lp);
            return;
        }
    }
833 834 835 836 837
    /* If not, add the wait operation in the pending 'waits' list. */
    struct pending_waits* wait_op = malloc(sizeof(struct pending_waits));
    wait_op->op_type = mpi_op->op_type;
    wait_op->req_ids[0] = req_id;
    wait_op->count = 1;
838 839
    wait_op->num_completed = 0;
    wait_op->start_time = tw_now(lp);
840
    s->wait_op = wait_op;
841

842
    return;
843 844
}

845
static void codes_exec_mpi_wait_all_rc(
846
        nw_state* s,
847 848
        tw_bf * bf,
        nw_message * m,
849
        tw_lp* lp)
850
{
851 852 853 854 855 856 857 858 859 860 861
  if(bf->c1)
  {
    int sampling_indx = s->sampling_indx;
    s->mpi_wkld_samples[sampling_indx].num_waits_sample--;

    if(bf->c2)
    {
        s->cur_interval_end -= sampling_interval;
        s->sampling_indx--;
    }
  }
862 863 864 865 866 867 868 869
  if(s->wait_op)
  {
      struct pending_waits * wait_op = s->wait_op;
      free(wait_op);
      s->wait_op = NULL;
  }
  else
  {
870
      add_completed_reqs(s, lp, m->fwd.num_matched);
871 872 873
      codes_issue_next_event_rc(lp);
  }
  return;
874
}
875

876
static void codes_exec_mpi_wait_all(
877
        nw_state* s,
878 879
        tw_bf * bf,
        nw_message * m,
880
        tw_lp* lp,
881
        struct codes_workload_op * mpi_op)
882
{
883
  if(enable_debug)
884
    fprintf(workload_log, "\n MPI WAITALL POSTED AT %llu ", s->nw_id);
885

886 887 888 889 890 891 892 893
  if(enable_sampling)
  {
    bf->c1 = 1;
    if(tw_now(lp) >= s->cur_interval_end)
    {
        bf->c2 = 1;
        int indx = s->sampling_indx;
        s->mpi_wkld_samples[indx].nw_id = s->nw_id;
894
        s->mpi_wkld_samples[indx].app_id = s->app_id;
895 896 897 898 899 900
        s->mpi_wkld_samples[indx].sample_end_time = s->cur_interval_end;
        s->cur_interval_end += sampling_interval;
        s->sampling_indx++;
    }
    if(s->sampling_indx >= MAX_STATS)
    {
901
        struct mpi_workload_sample * tmp = calloc((MAX_STATS + s->max_arr_size), sizeof(struct mpi_workload_sample));
902 903 904 905 906 907 908 909
        memcpy(tmp, s->mpi_wkld_samples, s->sampling_indx);
        free(s->mpi_wkld_samples);
        s->mpi_wkld_samples = tmp;
        s->max_arr_size += MAX_STATS;
    }
    int indx = s->sampling_indx;
    s->mpi_wkld_samples[indx].num_waits_sample++;
  }
910
  int count = mpi_op->u.waits.count;
911 912
  /* If the count is not less than max wait reqs then stop */
  assert(count < MAX_WAIT_REQS);
913

914
  int i = 0, num_matched = 0;
915
  m->fwd.num_matched = 0;
916

917
  //if(lp->gid == TRACK_LP)
918
  {
919
      printf("\n MPI Wait all posted ");
920 921
      print_waiting_reqs(mpi_op->u.waits.req_ids, count);
      print_completed_queue(&s->completed_reqs);
922
  }
923
      /* check number of completed irecvs in the completion queue */
924 925 926 927 928 929 930
  for(i = 0; i < count; i++)
  {
      dumpi_req_id req_id = mpi_op->u.waits.req_ids[i];
      struct qlist_head * ent = NULL;
      struct completed_requests* current = NULL;
      qlist_for_each(ent, &s->completed_reqs)
       {
931
            current = qlist_entry(ent, struct completed_requests, ql);
932 933 934 935
            if(current->req_id == req_id)
                num_matched++;
       }
  }
936

937
  m->fwd.found_match = num_matched;
938 939 940 941
  if(num_matched == count)
  {
    /* No need to post a MPI Wait all then, issue next event */
      /* Remove all completed requests from the list */
942 943 944
      m->fwd.num_matched = clear_completed_reqs(s, lp, mpi_op->u.waits.req_ids, count);
      struct pending_waits* wait_op = s->wait_op;
      free(wait_op);
945 946
      s->wait_op = NULL;
      codes_issue_next_event(lp);
947 948
  }
  else
949 950 951 952 953 954 955 956 957 958 959
  {
      /* If not, add the wait operation in the pending 'waits' list. */
	  struct pending_waits* wait_op = malloc(sizeof(struct pending_waits));
	  wait_op->count = count;
      wait_op->op_type = mpi_op->op_type;
      assert(count < MAX_WAIT_REQS);

      for(i = 0; i < count; i++)
          wait_op->req_ids[i] =  mpi_op->u.waits.req_ids[i];

	  wait_op->num_completed = num_matched;
960
	  wait_op->start_time = tw_now(lp);
961
      s->wait_op = wait_op;
962
  }
963 964
  return;
}
965

966 967
/* search for a matching mpi operation and remove it from the list.
 * Record the index in the list from where the element got deleted.
968
 * Index is used for inserting the element once again in the queue for reverse computation. */
969
static int rm_matching_rcv(nw_state * ns,
970
        tw_bf * bf,
971 972
        nw_message * m,
        tw_lp * lp,
973
        mpi_msgs_queue * qitem)
974 975
{
    int matched = 0;
976
    int index = 0;
977 978
    struct qlist_head *ent = NULL;
    mpi_msgs_queue * qi = NULL;
979

980 981
    qlist_for_each(ent, &ns->pending_recvs_queue){
        qi = qlist_entry(ent, mpi_msgs_queue, ql);
982 983
        if(//(qi->num_bytes == qitem->num_bytes)
                //&& 
984
               ((qi->tag == qitem->tag) || qi->tag == -1)
985
                && ((qi->source_rank == qitem->source_rank) || qi->source_rank == -1))
986 987
        {
            matched = 1;
988
            qitem->num_bytes = qi->num_bytes;
989 990
            break;
        }
991
        ++index;
992
    }
993

994 995
    if(matched)
    {
996 997 998 999 1000 1001 1002
        if(enable_msg_tracking && qitem->num_bytes < EAGER_THRESHOLD)
        {
            update_message_size(ns, lp, bf, m, qitem, 1, 1);
        }
        if(qitem->num_bytes >= EAGER_THRESHOLD)
        {
            /* Matching receive found, need to notify the sender to transmit
1003 1004
             * the data * (only works in sequential mode)*/
            bf->c10 = 1;
1005 1006
            send_ack_back(ns, bf, m, lp, qitem);
        }
1007
        m->rc.saved_recv_time = ns->recv_time;
1008
        ns->recv_time += (tw_now(lp) - m->fwd.sim_start_time);
1009

1010 1011
        if(qi->op_type == CODES_WK_IRECV)
            update_completed_queue(ns, bf, m, lp, qi->req_id);
1012 1013
        else if(qi->op_type == CODES_WK_RECV)
            codes_issue_next_event(lp);
1014

1015
        qlist_del(&qi->ql);
1016

1017
        rc_stack_push(lp, qi, free, ns->processed_ops);
1018
        return index;
1019 1020 1021 1022
    }
    return -1;
}

1023
static int rm_matching_send(nw_state * ns,
1024 1025 1026
        tw_bf * bf,
        nw_message * m,
        tw_lp * lp, mpi_msgs_queue * qitem)
1027 1028 1029 1030 1031
{
    int matched = 0;
    struct qlist_head *ent = NULL;
    mpi_msgs_queue * qi = NULL;

1032
    int index = 0;
1033 1034
    qlist_for_each(ent, &ns->arrival_queue){
        qi = qlist_entry(ent, mpi_msgs_queue, ql);
1035 1036 1037
        if(//(qi->num_bytes == qitem->num_bytes) // it is not a requirement in MPI that the send and receive sizes match
                // && 
		(qi->tag == qitem->tag || qitem->tag == -1)
1038 1039
                && ((qi->source_rank == qitem->source_rank) || qitem->source_rank == -1))
        {
1040
            qi->num_bytes = qitem->num_bytes;
1041 1042 1043
            matched = 1;
            break;
        }
1044
        ++index;
1045 1046 1047 1048
    }

    if(matched)
    {
1049 1050 1051 1052 1053 1054 1055
        if(enable_msg_tracking && (qi->num_bytes < EAGER_THRESHOLD))
            update_message_size(ns, lp, bf, m, qi, 1, 0);
        
        if(qitem->num_bytes >= EAGER_THRESHOLD)
        {
            /* Matching receive found, need to notify the sender to transmit
             * the data */
1056
            bf->c10 = 1;
1057 1058
            send_ack_back(ns, bf, m, lp, qi);
        }
1059 1060
	    rc_stack_push(lp, qi, free, ns->processed_ops);

1061
        m->rc.saved_recv_time = ns->recv_time;
1062 1063 1064 1065 1066
        ns->recv_time += (tw_now(lp) - qitem->req_init_time);

        if(qitem->op_type == CODES_WK_IRECV)
            update_completed_queue(ns, bf, m, lp, qitem->req_id);

1067
        qlist_del(&qi->ql);
1068

1069
        return index;
1070 1071 1072 1073 1074
    }
    return -1;
}
static void codes_issue_next_event_rc(tw_lp * lp)
{
1075
	    tw_rand_reverse_unif(lp->rng);
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
}

/* Trigger getting next event at LP */
static void codes_issue_next_event(tw_lp* lp)
{
   tw_event *e;
   nw_message* msg;

   tw_stime ts;

   ts = g_tw_lookahead + 0.1 + tw_rand_exponential(lp->rng, noise);
1087
   assert(ts > 0);
1088 1089 1090 1091 1092 1093 1094 1095 1096
   e = tw_event_new( lp->gid, ts, lp );
   msg = tw_event_data(e);

   msg->msg_type = MPI_OP_GET_NEXT;
   tw_event_send(e);
}

/* Simulate delays between MPI operations */
static void codes_exec_comp_delay(
1097
        nw_state* s, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op)
1098 1099 1100 1101 1102
{
	tw_event* e;
	tw_stime ts;
	nw_message* msg;

1103
    m->rc.saved_delay = s->compute_time;
1104 1105
    s->compute_time += s_to_ns(mpi_op->u.delay.seconds);
    ts = s_to_ns(mpi_op->u.delay.seconds);
1106 1107

	ts += g_tw_lookahead + 0.1 + tw_rand_exponential(lp->rng, noise);
1108
    assert(ts > 0);
1109

1110 1111 1112
	e = tw_event_new( lp->gid, ts , lp );
	msg = tw_event_data(e);
	msg->msg_type = MPI_OP_GET_NEXT;
1113 1114
	tw_event_send(e);

1115 1116 1117
}

/* reverse computation operation for MPI irecv */
1118
static void codes_exec_mpi_recv_rc(
1119 1120 1121
        nw_state* ns,
        tw_bf * bf,
        nw_message* m,
1122
        tw_lp* lp)
1123
{
1124
	ns->recv_time = m->rc.saved_recv_time;
1125 1126 1127 1128

    if(bf->c10)
        send_ack_back_rc(ns, bf, m, lp);

1129
	if(m->fwd.found_match >= 0)
1130
	  {
1131
		ns->recv_time = m->rc.saved_recv_time;
1132 1133 1134 1135
        int queue_count = qlist_count(&ns->arrival_queue);

        mpi_msgs_queue * qi = rc_stack_pop(ns->processed_ops);

1136
        if(!m->fwd.found_match)
1137 1138 1139
        {
            qlist_add(&qi->ql, &ns->arrival_queue);
        }
1140
        else if(m->fwd.found_match >= queue_count)
1141 1142 1143
        {
            qlist_add_tail(&qi->ql, &ns->arrival_queue);
        }
1144
        else if(m->fwd.found_match > 0 && m->fwd.found_match < queue_count)
1145
        {
1146 1147 1148 1149
            int index = 1;
            struct qlist_head * ent = NULL;
            qlist_for_each(ent, &ns->arrival_queue)
            {
1150
               if(index == m->fwd.found_match)
1151 1152 1153 1154
               {
                 qlist_add(&qi->ql, ent);
                 break;
               }
1155
               index++;
1156
            }
1157
        }
1158
        if(qi->op_type == CODES_WK_IRECV)
1159
        {
1160
            update_completed_queue_rc(ns, bf, m, lp);
1161
        }
1162 1163
        codes_issue_next_event_rc(lp);
      }
1164
	else if(m->fwd.found_match < 0)
1165
	    {
1166
            struct qlist_head * ent = qlist_pop_back(&ns->pending_recvs_queue);
1167 1168
            mpi_msgs_queue * qi = qlist_entry(ent, mpi_msgs_queue, ql);
            free(qi);
1169

1170
            if(m->op_type == CODES_WK_IRECV)
1171
                codes_issue_next_event_rc(lp);
1172 1173 1174
	    }
}

1175
/* Execute MPI Irecv operation (non-blocking receive) */
1176
static void codes_exec_mpi_recv(
1177
        nw_state* s,
1178
        tw_bf * bf,
1179 1180
        nw_message * m,
        tw_lp* lp,
1181
        struct codes_workload_op * mpi_op)
1182 1183 1184 1185 1186
{
/* Once an irecv is posted, list of completed sends is checked to find a matching isend.
   If no matching isend is found, the receive operation is queued in the pending queue of
   receive operations. */

1187
	m->rc.saved_recv_time = s->recv_time;
1188 1189
    m->rc.saved_num_bytes = mpi_op->u.recv.num_bytes;

1190 1191 1192 1193 1194 1195 1196 1197 1198
    mpi_msgs_queue * recv_op = (mpi_msgs_queue*) malloc(sizeof(mpi_msgs_queue));
    recv_op->req_init_time = tw_now(lp);
    recv_op->op_type = mpi_op->op_type;
    recv_op->source_rank = mpi_op->u.recv.source_rank;
    recv_op->dest_rank = mpi_op->u.recv.dest_rank;
    recv_op->num_bytes = mpi_op->u.recv.num_bytes;
    recv_op->tag = mpi_op->u.recv.tag;
    recv_op->req_id = mpi_op->u.recv.req_id;

1199 1200
    if(s->nw_id == (tw_lpid)TRACK_LP)
        printf("\n Receive op posted num bytes %llu source %d ", recv_op->num_bytes,
1201 1202
                recv_op->source_rank);

1203
	int found_matching_sends = rm_matching_send(s, bf, m, lp, recv_op);
1204 1205 1206 1207

	/* save the req id inserted in the completed queue for reverse computation. */
	if(found_matching_sends < 0)
	  {
1208
	   	  m->fwd.found_match = -1;
1209
          qlist_add_tail(&recv_op->ql, &s->pending_recvs_queue);
1210

1211 1212 1213
	       /* for mpi irecvs, this is a non-blocking receive so just post it and move on with the trace read. */
		if(mpi_op->op_type == CODES_WK_IRECV)
		   {
1214
			codes_issue_next_event(lp);
1215 1216
			return;
		   }
1217
      }
1218 1219
	else
	  {
1220
        m->fwd.found_match = found_matching_sends;
1221
        codes_issue_next_event(lp);
1222
      }
1223 1224
}

1225 1226 1227 1228 1229 1230 1231 1232
int get_global_id_of_job_rank(tw_lpid job_rank, int app_id)
{
    struct codes_jobmap_id lid;
    lid.job = app_id;
    lid.rank = job_rank;
    int global_rank = codes_jobmap_to_global_id(lid, jobmap_ctx);
    return global_rank;
}
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
static void codes_exec_mpi_send_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
        if(enable_sampling)
        {
           int indx = s->sampling_indx;

           s->mpi_wkld_samples[indx].num_sends_sample--;
           s->mpi_wkld_samples[indx].num_bytes_sample -= m->rc.saved_num_bytes;

           if(bf->c1)
           {
               s->sampling_indx--;
               s->cur_interval_end -= sampling_interval;
           }
        }
        model_net_event_rc2(lp, &m->event_rc);
1249 1250

        if(bf->c4)
1251 1252
            codes_issue_next_event_rc(lp);
        s->num_sends--;
1253 1254 1255 1256 1257 1258

        if(bf->c3)
        {
            s->num_bytes_sent -= m->rc.saved_num_bytes;
            num_bytes_sent -= m->rc.saved_num_bytes;
        }
1259
}
1260
/* executes MPI send and isend operations */
1261
static void codes_exec_mpi_send(nw_state* s,
1262 1263
        tw_bf * bf,
        nw_message * m,
1264
        tw_lp* lp,
1265 1266
        struct codes_workload_op * mpi_op,
        int is_rend)
1267
{
1268 1269
	/* model-net event */
    int global_dest_rank = mpi_op->u.send.dest_rank;
1270

1271 1272 1273 1274 1275
    if(alloc_spec)
    {
        global_dest_rank = get_global_id_of_job_rank(mpi_op->u.send.dest_rank, s->app_id);
    }

1276
//    printf("\n Sender rank %llu global dest rank %d dest-rank %d bytes %d Tag %d", s->nw_id, global_dest_rank, mpi_op->u.send.dest_rank, mpi_op->u.send.num_bytes, mpi_op->u.send.tag);
1277
    m->rc.saved_num_bytes = mpi_op->u.send.num_bytes;
1278
	/* model-net event */
1279
	tw_lpid dest_rank = codes_mapping_get_lpid_from_relative(global_dest_rank, NULL, "nw-lp", NULL, 0);
1280

1281 1282
    if(!is_rend)
    {
1283
        bf->c3 = 1;
1284 1285 1286
        num_bytes_sent += mpi_op->u.send.num_bytes;
        s->num_bytes_sent += mpi_op->u.send.num_bytes;
    }
1287 1288 1289 1290 1291 1292 1293
    if(enable_sampling)
    {
        if(tw_now(lp) >= s->cur_interval_end)
        {
            bf->c1 = 1;
            int indx = s->sampling_indx;
            s->mpi_wkld_samples[indx].nw_id = s->nw_id;
1294
            s->mpi_wkld_samples[indx].app_id = s->app_id;
1295 1296
            s->mpi_wkld_samples[indx].sample_end_time = s->cur_interval_end;
            s->sampling_indx++;
1297
            s->cur_interval_end += sampling_interval;
1298 1299 1300
        }
        if(s->sampling_indx >= MAX_STATS)
        {
1301
            struct mpi_workload_sample * tmp = calloc((MAX_STATS + s->max_arr_size), sizeof(struct mpi_workload_sample));
1302 1303 1304 1305 1306 1307 1308 1309 1310
            memcpy(tmp, s->mpi_wkld_samples, s->sampling_indx);
            free(s->mpi_wkld_samples);
            s->mpi_wkld_samples = tmp;
            s->max_arr_size += MAX_STATS;
        }
        int indx = s->sampling_indx;
        s->mpi_wkld_samples[indx].num_sends_sample++;
        s->mpi_wkld_samples[indx].num_bytes_sample += mpi_op->u.send.num_bytes;
    }
1311 1312 1313
	nw_message local_m;
	nw_message remote_m;

1314 1315
    local_m.fwd.dest_rank = mpi_op->u.send.dest_rank;
    local_m.fwd.src_rank = mpi_op->u.send.source_rank;
1316
    local_m.op_type = mpi_op->op_type;
1317
    local_m.msg_type = MPI_SEND_POSTED;
1318 1319 1320
    local_m.fwd.tag = mpi_op->u.send.tag;
    local_m.fwd.num_bytes = mpi_op->u.send.num_bytes;
    local_m.fwd.req_id = mpi_op->u.send.req_id;
1321
    local_m.fwd.app_id = s->app_id;
1322
            
1323 1324 1325 1326
    
    if(mpi_op->u.send.num_bytes < EAGER_THRESHOLD)
    {
        /* directly issue a model-net send */
1327 1328
                
        tw_stime copy_overhead = copy_per_byte_eager * mpi_op->u.send.num_bytes;
1329
        local_m.fwd.sim_start_time = tw_now(lp);
1330

1331 1332 1333
        remote_m = local_m;
        remote_m.msg_type = MPI_SEND_ARRIVED;
    	m->event_rc = model_net_event_mctx(net_id, &group_ratio, &group_ratio, 
1334
            "test", dest_rank, mpi_op->u.send.num_bytes, (self_overhead + copy_overhead + soft_delay_mpi + nic_delay),
1335
	    sizeof(nw_message), (const void*)&remote_m, sizeof(nw_message), (const void*)&local_m, lp);
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
    }
    else if (is_rend == 0)
    {
        /* Initiate the handshake. Issue a control message to the destination first. No local message,
         * only remote message sent. */
        remote_m.fwd.sim_start_time = tw_now(lp);
        remote_m.fwd.dest_rank = mpi_op->u.send.dest_rank;   
        remote_m.fwd.src_rank = mpi_op->u.send.source_rank;
        remote_m.msg_type = MPI_SEND_ARRIVED;
        remote_m.op_type = mpi_op->op_type;
        remote_m.fwd.tag = mpi_op->u.send.tag; 
        remote_m.fwd.num_bytes = mpi_op->u.send.num_bytes;
        remote_m.fwd.req_id = mpi_op->u.send.req_id;  
        remote_m.fwd.app_id = s->app_id;

    	m->event_rc = model_net_event_mctx(net_id, &group_ratio, &group_ratio, 
            "test", dest_rank, CONTROL_MSG_SZ, (self_overhead + soft_delay_mpi + nic_delay),
	    sizeof(nw_message), (const void*)&remote_m, 0, NULL, lp);
    }
    else if(is_rend == 1)
    {
        /* initiate the actual data transfer, local completion message is sent
         * for any blocking sends. */
       local_m.fwd.sim_start_time = mpi_op->sim_start_time;
       rem