model-net-mpi-replay.c 74.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */
#include <ross.h>
#include <inttypes.h>

#include "codes/codes-workload.h"
#include "codes/codes.h"
#include "codes/configuration.h"
#include "codes/codes_mapping.h"
#include "codes/model-net.h"
#include "codes/rc-stack.h"
#include "codes/quicklist.h"
16
#include "codes/quickhash.h"
17
#include "codes/codes-jobmap.h"
18

19
/* turning on track lp will generate a lot of output messages */
20
#define MN_LP_NM "modelnet_dragonfly_custom"
21
#define CONTROL_MSG_SZ 64
22
#define TRACE -1
23
#define MAX_WAIT_REQS 512
24
#define CS_LP_DBG 0
25
#define EAGER_THRESHOLD 81920000
26
27
28
#define RANK_HASH_TABLE_SZ 2000
#define NOISE 3.0
#define NW_LP_NM "nw-lp"
29
30
31
#define lprintf(_fmt, ...) \
        do {if (CS_LP_DBG) printf(_fmt, __VA_ARGS__);} while (0)
#define MAX_STATS 65536
32
#define PAYLOAD_SZ 1024
33

34
35
36
static int msg_size_hash_compare(
            void *key, struct qhash_head *link);

37
int enable_msg_tracking = 0;
38
tw_lpid TRACK_LP = -1;
39
40

int unmatched = 0;
41
42
43
44
45
char workload_type[128];
char workload_file[8192];
char offset_file[8192];
static int wrkld_id;
static int num_net_traces = 0;
46
47
static int num_dumpi_traces = 0;

48
static int alloc_spec = 0;
49
50
static tw_stime self_overhead = 10.0;
static tw_stime mean_interval = 100000;
51
52
53
54
55
56
57

/* Doing LP IO*/
static char lp_io_dir[256] = {'\0'};
static lp_io_handle io_handle;
static unsigned int lp_io_use_suffix = 0;
static int do_lp_io = 0;

58
59
60
61
/* variables for loading multiple applications */
char workloads_conf_file[8192];
char alloc_file[8192];
int num_traces_of_job[5];
62
63
64
tw_stime soft_delay_mpi = 2500;
tw_stime nic_delay = 1000;
tw_stime copy_per_byte_eager = 0.55;
65
66
67
68
69
char file_name_of_job[5][8192];

struct codes_jobmap_ctx *jobmap_ctx;
struct codes_jobmap_params_list jobmap_p;

70
71
/* Variables for Cortex Support */
/* Matthieu's additions start */
72
#ifdef ENABLE_CORTEX_PYTHON
73
74
75
static char cortex_file[512] = "\0";
static char cortex_class[512] = "\0";
static char cortex_gen[512] = "\0";
76
#endif
77
78
/* Matthieu's additions end */

79
80
typedef struct nw_state nw_state;
typedef struct nw_message nw_message;
81
typedef int32_t dumpi_req_id;
82
83
84

static int net_id = 0;
static float noise = 5.0;
85
86
87
static int num_nw_lps = 0, num_mpi_lps = 0;

static int num_syn_clients;
88

89
FILE * workload_log = NULL;
90
FILE * msg_size_log = NULL;
91
92
93
94
FILE * workload_agg_log = NULL;
FILE * workload_meta_log = NULL;

static uint64_t sample_bytes_written = 0;
95

96
97
98
long long num_bytes_sent=0;
long long num_bytes_recvd=0;

99
100
101
long long num_syn_bytes_sent = 0;
long long num_syn_bytes_recvd = 0;

102
103
104
105
106
107
double max_time = 0,  max_comm_time = 0, max_wait_time = 0, max_send_time = 0, max_recv_time = 0;
double avg_time = 0, avg_comm_time = 0, avg_wait_time = 0, avg_send_time = 0, avg_recv_time = 0;


/* runtime option for disabling computation time simulation */
static int disable_delay = 0;
108
109
110
static int enable_sampling = 0;
static double sampling_interval = 5000000;
static double sampling_end_time = 3000000000;
111
static int enable_debug = 0;
112

113
114
115
/* set group context */
struct codes_mctx group_ratio;

116
/* MPI_OP_GET_NEXT is for getting next MPI operation when the previous operation completes.
117
* MPI_SEND_ARRIVED is issued when a MPI message arrives at its destination (the message is transported by model-net and an event is invoked when it arrives.
118
119
120
121
122
123
124
* MPI_SEND_POSTED is issued when a MPI message has left the source LP (message is transported via model-net). */
enum MPI_NW_EVENTS
{
	MPI_OP_GET_NEXT=1,
	MPI_SEND_ARRIVED,
    MPI_SEND_ARRIVED_CB, // for tracking message times on sender
	MPI_SEND_POSTED,
125
126
127
128
129
130
    MPI_REND_ARRIVED,
    MPI_REND_ACK_ARRIVED,
    CLI_BCKGND_FIN,
    CLI_BCKGND_ARRIVE,
    CLI_BCKGND_GEN,
    CLI_NBR_FINISH,
131
132
};

133
134
135
136
struct mpi_workload_sample
{
    /* Sampling data */
    int nw_id;
137
    int app_id;
138
139
140
141
142
    unsigned long num_sends_sample;
    unsigned long num_bytes_sample;
    unsigned long num_waits_sample;
    double sample_end_time;
};
143
144
145
146
147
148
149
/* stores pointers of pending MPI operations to be matched with their respective sends/receives. */
struct mpi_msgs_queue
{
    int op_type;
    int tag;
    int source_rank;
    int dest_rank;
150
    uint64_t num_bytes;
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    tw_stime req_init_time;
	dumpi_req_id req_id;
    struct qlist_head ql;
};

/* stores request IDs of completed MPI operations (Isends or Irecvs) */
struct completed_requests
{
	dumpi_req_id req_id;
    struct qlist_head ql;
};

/* for wait operations, store the pending operation and number of completed waits so far. */
struct pending_waits
{
    int op_type;
167
    int32_t req_ids[MAX_WAIT_REQS];
168
	int num_completed;
169
170
	int count;
    tw_stime start_time;
171
172
173
    struct qlist_head ql;
};

174
175
176
177
178
179
180
181
182
struct msg_size_info
{
    int64_t msg_size;
    int num_msgs;
    tw_stime agg_latency;
    tw_stime avg_latency;
    struct qhash_head * hash_link;
    struct qlist_head ql; 
};
183
184
185
186
187
188
189
190
191
192
typedef struct mpi_msgs_queue mpi_msgs_queue;
typedef struct completed_requests completed_requests;
typedef struct pending_waits pending_waits;

/* state of the network LP. It contains the pointers to send/receive lists */
struct nw_state
{
	long num_events_per_lp;
	tw_lpid nw_id;
	short wrkld_end;
193
194
    int app_id;
    int local_rank;
195

196
197
198
    int is_finished;
    int neighbor_completed;

199
    struct rc_stack * processed_ops;
200
    struct rc_stack * matched_reqs;
201
202
203
204
205
206
207
208
209
210

    /* count of sends, receives, collectives and delays */
	unsigned long num_sends;
	unsigned long num_recvs;
	unsigned long num_cols;
	unsigned long num_delays;
	unsigned long num_wait;
	unsigned long num_waitall;
	unsigned long num_waitsome;

211

212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
	/* time spent by the LP in executing the app trace*/
	double start_time;
	double elapsed_time;
	/* time spent in compute operations */
	double compute_time;
	/* time spent in message send/isend */
	double send_time;
	/* time spent in message receive */
	double recv_time;
	/* time spent in wait operation */
	double wait_time;
	/* FIFO for isend messages arrived on destination */
	struct qlist_head arrival_queue;
	/* FIFO for irecv messages posted but not yet matched with send operations */
	struct qlist_head pending_recvs_queue;
	/* List of completed send/receive requests */
	struct qlist_head completed_reqs;
229

230
231
    tw_stime cur_interval_end;

232
233
    /* Pending wait operation */
    struct pending_waits * wait_op;
234

235
236
237
238
239
240
    /* Message size latency information */
    struct qhash_table * msg_sz_table;
    struct qlist_head msg_sz_list;

    /* quick hash for maintaining message latencies */

241
242
243
    unsigned long num_bytes_sent;
    unsigned long num_bytes_recvd;

244
245
246
    unsigned long syn_data;
    unsigned long gen_data;
    
247
248
249
250
    /* For sampling data */
    int sampling_indx;
    int max_arr_size;
    struct mpi_workload_sample * mpi_wkld_samples;
251
    char output_buf[512];
252
253
254
255
};

/* data for handling reverse computation.
* saved_matched_req holds the request ID of matched receives/sends for wait operations.
256
* ptr_match_op holds the matched MPI operation which are removed from the queues when a send is matched with the receive in forward event handler.
257
258
259
* network event being sent. op is the MPI operation issued by the network workloads API. rv_data holds the data for reverse computation (TODO: Fill this data structure only when the simulation runs in optimistic mode). */
struct nw_message
{
260
   // forward message handler
261
   int msg_type;
262
   int op_type;
263
   model_net_event_return event_rc;
264

265
266
267
   struct
   {
       tw_lpid src_rank;
268
       int dest_rank;
269
       int64_t num_bytes;
270
271
272
273
274
       int num_matched;
       int data_type;
       double sim_start_time;
       // for callbacks - time message was received
       double msg_send_time;
275
       int16_t req_id;
276
       int tag;
277
       int app_id;
278
279
280
281
282
283
284
285
286
       int found_match;
       short wait_completed;
   } fwd;
   struct
   {
       double saved_send_time;
       double saved_recv_time;
       double saved_wait_time;
       double saved_delay;
287
       int16_t saved_num_bytes;
288
       struct codes_workload_op * saved_op;
289
   } rc;
290
291
};

292
static void send_ack_back(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp, mpi_msgs_queue * mpi_op);
293
294

static void send_ack_back_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
295
296
/* executes MPI isend and send operations */
static void codes_exec_mpi_send(
297
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op, int is_rend);
298
299
/* execute MPI irecv operation */
static void codes_exec_mpi_recv(
300
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp, struct codes_workload_op * mpi_op);
301
302
/* reverse of mpi recv function. */
static void codes_exec_mpi_recv_rc(
303
        nw_state* s, tw_bf * bf, nw_message* m, tw_lp* lp);
304
305
/* execute the computational delay */
static void codes_exec_comp_delay(
306
        nw_state* s, nw_message * m, tw_lp* lp, struct codes_workload_op * mpi_op);
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
/* gets the next MPI operation from the network-workloads API. */
static void get_next_mpi_operation(
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
/* reverse handler of get next mpi operation. */
static void get_next_mpi_operation_rc(
        nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);
/* Makes a call to get_next_mpi_operation. */
static void codes_issue_next_event(tw_lp* lp);
/* reverse handler of next operation */
static void codes_issue_next_event_rc(tw_lp* lp);


///////////////////// HELPER FUNCTIONS FOR MPI MESSAGE QUEUE HANDLING ///////////////
/* upon arrival of local completion message, inserts operation in completed send queue */
/* upon arrival of an isend operation, updates the arrival queue of the network */
322
323
324
325
326
327
328
329
static void update_completed_queue(
        nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp, dumpi_req_id req_id);
/* reverse of the above function */
static void update_completed_queue_rc(
        nw_state*s,
        tw_bf * bf,
        nw_message * m,
        tw_lp * lp);
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
static void update_arrival_queue(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* reverse of the above function */
static void update_arrival_queue_rc(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* callback to a message sender for computing message time */
static void update_message_time(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);
/* reverse for computing message time */
static void update_message_time_rc(
        nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

/* conversion from seconds to eanaoseconds */
static tw_stime s_to_ns(tw_stime ns);

345
346
347
348
349
350
static void update_message_size_rc(
        struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
351
352
353
354
355
/*TODO: Complete reverse handler */
    (void)ns;
    (void)lp;
    (void)bf;
    (void)m;
356
}
357
358
359
360
361
362
363
364
365
366
/* update the message size */
static void update_message_size(
        struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m,
        mpi_msgs_queue * qitem,
        int is_eager,
        int is_send)
{
367
368
369
            (void)bf;
            (void)is_eager;

370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
            struct qhash_head * hash_link = NULL;
            tw_stime msg_init_time = qitem->req_init_time;
        
            if(!ns->msg_sz_table)
                ns->msg_sz_table = qhash_init(msg_size_hash_compare, quickhash_64bit_hash, RANK_HASH_TABLE_SZ); 
            
            hash_link = qhash_search(ns->msg_sz_table, &(qitem->num_bytes));

            if(is_send)
                msg_init_time = m->fwd.sim_start_time;
            
            /* update hash table */
            if(!hash_link)
            {
                struct msg_size_info * msg_info = malloc(sizeof(struct msg_size_info));
                msg_info->msg_size = qitem->num_bytes;
                msg_info->num_msgs = 1;
387
                msg_info->agg_latency = tw_now(lp) - msg_init_time;
388
                msg_info->avg_latency = msg_info->agg_latency;
389
                qhash_add(ns->msg_sz_table, &(msg_info->msg_size), msg_info->hash_link);
390
391
392
393
394
395
396
                qlist_add(&msg_info->ql, &ns->msg_sz_list);
                //printf("\n Msg size %d aggregate latency %f num messages %d ", m->fwd.num_bytes, msg_info->agg_latency, msg_info->num_msgs);
            }
            else
            {
                struct msg_size_info * tmp = qhash_entry(hash_link, struct msg_size_info, hash_link);
                tmp->num_msgs++;
397
                tmp->agg_latency += tw_now(lp) - msg_init_time;  
398
399
400
401
402
403
404
405
406
407
                tmp->avg_latency = (tmp->agg_latency / tmp->num_msgs);
//                printf("\n Msg size %d aggregate latency %f num messages %d ", qitem->num_bytes, tmp->agg_latency, tmp->num_msgs);
            }
}
static void notify_background_traffic_rc(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
408
409
410
    (void)ns;
    (void)bf;
    (void)m;
411
412
413
414
415
416
417
418
419
    tw_rand_reverse_unif(lp->rng); 
}

static void notify_background_traffic(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
420
421
422
        (void)bf;
        (void)m;

423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
        struct codes_jobmap_id jid; 
        jid = codes_jobmap_to_local_id(ns->nw_id, jobmap_ctx);
        
        int num_jobs = codes_jobmap_get_num_jobs(jobmap_ctx); 
        
        for(int other_id = 0; other_id < num_jobs; other_id++)
        {
            if(other_id == jid.job)
                continue;

            struct codes_jobmap_id other_jid;
            other_jid.job = other_id;

            int num_other_ranks = codes_jobmap_get_num_ranks(other_id, jobmap_ctx);

438
            lprintf("\n Other ranks %d ", num_other_ranks);
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
            tw_stime ts = (1.1 * g_tw_lookahead) + tw_rand_exponential(lp->rng, mean_interval/10000);
            tw_lpid global_dest_id;
     
            for(int k = 0; k < num_other_ranks; k++)    
            {
                other_jid.rank = k;
                int intm_dest_id = codes_jobmap_to_global_id(other_jid, jobmap_ctx); 
                global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);

                tw_event * e;
                struct nw_message * m_new;  
                e = tw_event_new(global_dest_id, ts, lp);
                m_new = tw_event_data(e);
                m_new->msg_type = CLI_BCKGND_FIN;
                tw_event_send(e);   
            }
        }
        return;
}
static void notify_neighbor_rc(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
       if(bf->c0)
       {
            notify_background_traffic_rc(ns, lp, bf, m);
            return;
       }
   
       if(bf->c1)
       {
          tw_rand_reverse_unif(lp->rng); 
       }
} 
static void notify_neighbor(
	    struct nw_state * ns,
        tw_lp * lp,
        tw_bf * bf,
        struct nw_message * m)
{
    if(ns->local_rank == num_dumpi_traces - 1 
            && ns->is_finished == 1
            && ns->neighbor_completed == 1)
    {
        printf("\n All workloads completed, notifying background traffic ");
        bf->c0 = 1;
        notify_background_traffic(ns, lp, bf, m);
        return;
    }
    
    struct codes_jobmap_id nbr_jid;
    nbr_jid.job = ns->app_id;
    tw_lpid global_dest_id;

    if(ns->is_finished == 1 && (ns->neighbor_completed == 1 || ns->local_rank == 0))
    {
        bf->c1 = 1;

        printf("\n Local rank %d notifying neighbor %d ", ns->local_rank, ns->local_rank+1);
        tw_stime ts = (1.1 * g_tw_lookahead) + tw_rand_exponential(lp->rng, mean_interval/10000);
        nbr_jid.rank = ns->local_rank + 1;
        
        /* Send a notification to the neighbor about completion */
        int intm_dest_id = codes_jobmap_to_global_id(nbr_jid, jobmap_ctx); 
        global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);
       
        tw_event * e;
        struct nw_message * m_new;  
        e = tw_event_new(global_dest_id, ts, lp);
        m_new = tw_event_data(e); 
        m_new->msg_type = CLI_NBR_FINISH;
        tw_event_send(e);   
    }
}
void finish_bckgnd_traffic_rc(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
521
522
523
524
        (void)b;
        (void)msg;
        (void)lp;

525
526
527
528
529
530
531
532
533
        ns->is_finished = 0;
        return;
}
void finish_bckgnd_traffic(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
534
535
        (void)b;
        (void)msg;
536
        ns->is_finished = 1;
537
        lprintf("\n LP %llu completed sending data %lu completed at time %lf ", lp->gid, ns->gen_data, tw_now(lp));
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
        return;
}

void finish_nbr_wkld_rc(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
    ns->neighbor_completed = 0;
    
    notify_neighbor_rc(ns, lp, b, msg);
}

void finish_nbr_wkld(
    struct nw_state * ns,
    tw_bf * b,
    struct nw_message * msg,
    tw_lp * lp)
{
    printf("\n Workload completed, notifying neighbor ");
    ns->neighbor_completed = 1;

    notify_neighbor(ns, lp, b, msg);
}
static void gen_synthetic_tr_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
    if(bf->c0)
        return;

    model_net_event_rc2(lp, &m->event_rc);
    s->gen_data -= PAYLOAD_SZ;

    num_syn_bytes_sent -= PAYLOAD_SZ;
    tw_rand_reverse_unif(lp->rng);
    tw_rand_reverse_unif(lp->rng);

}

/* generate synthetic traffic */
static void gen_synthetic_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
    if(s->is_finished == 1)
    {
        bf->c0 = 1;
        return;
    }

    /* Get job information */
    tw_lpid global_dest_id;

    struct codes_jobmap_id jid;
    jid = codes_jobmap_to_local_id(s->nw_id, jobmap_ctx); 

    int num_clients = codes_jobmap_get_num_ranks(jid.job, jobmap_ctx);
    int dest_svr = tw_rand_integer(lp->rng, 0, num_clients - 1);

    if(dest_svr == s->local_rank)
    {
       dest_svr = (s->local_rank + 1) % num_clients;
    }
   
    jid.rank = dest_svr;

    int intm_dest_id = codes_jobmap_to_global_id(jid, jobmap_ctx); 
    global_dest_id = codes_mapping_get_lpid_from_relative(intm_dest_id, NULL, NW_LP_NM, NULL, 0);

    nw_message remote_m;
    remote_m.fwd.sim_start_time = tw_now(lp);
    remote_m.fwd.dest_rank = dest_svr;
    remote_m.msg_type = CLI_BCKGND_ARRIVE;
    remote_m.fwd.num_bytes = PAYLOAD_SZ;
    remote_m.fwd.app_id = s->app_id;
    remote_m.fwd.src_rank = s->local_rank;

    m->event_rc = model_net_event(net_id, "synthetic-tr", global_dest_id, PAYLOAD_SZ, 0.0, 
            sizeof(nw_message), (const void*)&remote_m, 
            0, NULL, lp);
    
    s->gen_data += PAYLOAD_SZ;
    num_syn_bytes_sent += PAYLOAD_SZ; 

    /* New event after MEAN_INTERVAL */  
    tw_stime ts = mean_interval  + tw_rand_exponential(lp->rng, NOISE); 
    tw_event * e;
    nw_message * m_new;
    e = tw_event_new(lp->gid, ts, lp);
    m_new = tw_event_data(e);
    m_new->msg_type = CLI_BCKGND_GEN;
    tw_event_send(e);
}

void arrive_syn_tr_rc(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
632
633
634
    (void)bf;
    (void)m;
    (void)lp;
635
636
637
638
639
640
641
//    printf("\n Data arrived %d total data %ld ", m->fwd.num_bytes, s->syn_data);
    int data = m->fwd.num_bytes;
    s->syn_data -= data;
    num_syn_bytes_recvd -= data;
}
void arrive_syn_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
642
643
644
    (void)bf;
    (void)lp;

645
646
647
648
649
//    printf("\n Data arrived %d total data %ld ", m->fwd.num_bytes, s->syn_data);
    int data = m->fwd.num_bytes;
    s->syn_data += data;
    num_syn_bytes_recvd += data;
}
650
/* Debugging functions, may generate unused function warning */
651
static void print_waiting_reqs(int32_t * reqs, int count)
652
{
653
    lprintf("\n Waiting reqs: %d count", count);
654
655
    int i;
    for(i = 0; i < count; i++ )
656
        lprintf(" %d ", reqs[i]);
657
}
658
659
660
661
662
663
664
665
666
667
668
669
static void print_msgs_queue(struct qlist_head * head, int is_send)
{
    if(is_send)
        printf("\n Send msgs queue: ");
    else
        printf("\n Recv msgs queue: ");

    struct qlist_head * ent = NULL;
    mpi_msgs_queue * current = NULL;
    qlist_for_each(ent, head)
       {
            current = qlist_entry(ent, mpi_msgs_queue, ql);
670
            printf(" \n Source %d Dest %d bytes %llu tag %d ", current->source_rank, current->dest_rank, current->num_bytes, current->tag);
671
672
       }
}
673
674
675
676
677
678
679
680
static void print_completed_queue(struct qlist_head * head)
{
    printf("\n Completed queue: ");
      struct qlist_head * ent = NULL;
      struct completed_requests* current = NULL;
      qlist_for_each(ent, head)
       {
            current = qlist_entry(ent, completed_requests, ql);
681
            printf(" %d ", current->req_id);
682
683
       }
}
684
static int clear_completed_reqs(nw_state * s,
685
        tw_lp * lp,
686
        int32_t * reqs, int count)
687
{
688
689
690
    (void)s;
    (void)lp;

691
    int i, matched = 0;
692

693
694
695
    for( i = 0; i < count; i++)
    {
      struct qlist_head * ent = NULL;
696
697
698
      struct completed_requests * current = NULL;
      struct completed_requests * prev = NULL;

699
700
      qlist_for_each(ent, &s->completed_reqs)
       {
701
702
703
704
705
            current = qlist_entry(ent, completed_requests, ql);
            
            if(prev)
              rc_stack_push(lp, prev, free, s->matched_reqs);
            
706
707
            if(current->req_id == reqs[i])
            {
708
                ++matched;
709
                qlist_del(&current->ql);
710
                prev = current;
711
            }
712
713
            else
                prev = NULL;
714
       }
715
716
717

      if(prev)
          rc_stack_push(lp, prev, free, s->matched_reqs);
718
    }
719
    return matched;
720
}
721
static void add_completed_reqs(nw_state * s,
722
723
        tw_lp * lp,
        int count)
724
{
725
    (void)lp;
726
727
728
    int i;
    for( i = 0; i < count; i++)
    {
729
730
       struct completed_requests * req = rc_stack_pop(s->matched_reqs);
       qlist_add(&req->ql, &s->completed_reqs);
731
732
    }
}
733

734
735
736
737
738
739
/* helper function - maps an MPI rank to an LP id */
static tw_lpid rank_to_lpid(int rank)
{
    return codes_mapping_get_lpid_from_relative(rank, NULL, "nw-lp", NULL, 0);
}

740
static int notify_posted_wait(nw_state* s,
741
        tw_bf * bf, nw_message * m, tw_lp * lp,
742
        dumpi_req_id completed_req)
743
{
744
745
    (void)bf;

746
747
    struct pending_waits* wait_elem = s->wait_op;
    int wait_completed = 0;
748

749
    m->fwd.wait_completed = 0;
750

751
752
    if(!wait_elem)
        return 0;
753

754
    int op_type = wait_elem->op_type;
755

756
757
758
759
760
    if(op_type == CODES_WK_WAIT &&
            (wait_elem->req_ids[0] == completed_req))
    {
            wait_completed = 1;
    }
761
762
    else if(op_type == CODES_WK_WAITALL
            || op_type == CODES_WK_WAITANY
763
764
765
766
767
768
            || op_type == CODES_WK_WAITSOME)
    {
        int i;
        for(i = 0; i < wait_elem->count; i++)
        {
            if(wait_elem->req_ids[i] == completed_req)
769
            {
770
                wait_elem->num_completed++;
771
                if(wait_elem->num_completed > wait_elem->count)
772
                    printf("\n Num completed %d count %d LP %llu ",
773
774
775
                            wait_elem->num_completed,
                            wait_elem->count,
                            lp->gid);
776
777
//                if(wait_elem->num_completed > wait_elem->count)
//                    tw_lp_suspend(lp, 1, 0);
778

779
                if(wait_elem->num_completed == wait_elem->count)
780
                {
781
                    if(enable_debug)
782
                        fprintf(workload_log, "\n(%lf) APP ID %d MPI WAITALL COMPLETED AT %llu ", tw_now(lp), s->app_id, s->nw_id);
783
                    wait_completed = 1;
784
                }
785

786
                m->fwd.wait_completed = 1;
787
            }
788
        }
789
    }
790
    return wait_completed;
791
}
792

793
/* reverse handler of MPI wait operation */
794
static void codes_exec_mpi_wait_rc(nw_state* s, tw_lp* lp)
795
{
796
    if(s->wait_op)
797
     {
798
799
800
         struct pending_waits * wait_op = s->wait_op;
         free(wait_op);
         s->wait_op = NULL;
801
802
803
804
     }
   else
    {
        codes_issue_next_event_rc(lp);
805
        completed_requests * qi = rc_stack_pop(s->processed_ops);
806
        qlist_add(&qi->ql, &s->completed_reqs);
807
    }
808
    return;
809
}
810

811
/* execute MPI wait operation */
812
static void codes_exec_mpi_wait(nw_state* s, tw_lp* lp, struct codes_workload_op * mpi_op)
813
{
814
815
    /* check in the completed receives queue if the request ID has already been completed.*/
    assert(!s->wait_op);
816
    dumpi_req_id req_id = mpi_op->u.wait.req_id;
817
    struct completed_requests* current = NULL;
818

819
820
821
822
823
824
825
    struct qlist_head * ent = NULL;
    qlist_for_each(ent, &s->completed_reqs)
    {
        current = qlist_entry(ent, completed_requests, ql);
        if(current->req_id == req_id)
        {
            qlist_del(&current->ql);
826
            rc_stack_push(lp, current, free, s->processed_ops);
827
828
829
830
            codes_issue_next_event(lp);
            return;
        }
    }
831
832
833
834
835
    /* If not, add the wait operation in the pending 'waits' list. */
    struct pending_waits* wait_op = malloc(sizeof(struct pending_waits));
    wait_op->op_type = mpi_op->op_type;
    wait_op->req_ids[0] = req_id;
    wait_op->count = 1;
836
837
    wait_op->num_completed = 0;
    wait_op->start_time = tw_now(lp);
838
    s->wait_op = wait_op;
839

840
    return;
841
842
}

843
static void codes_exec_mpi_wait_all_rc(
844
        nw_state* s,
845
846
        tw_bf * bf,
        nw_message * m,
847
        tw_lp* lp)
848
{
849
850
851
852
853
854
855
856
857
858
859
  if(bf->c1)
  {
    int sampling_indx = s->sampling_indx;
    s->mpi_wkld_samples[sampling_indx].num_waits_sample--;

    if(bf->c2)
    {
        s->cur_interval_end -= sampling_interval;
        s->sampling_indx--;
    }
  }
860
861
862
863
864
865
866
867
  if(s->wait_op)
  {
      struct pending_waits * wait_op = s->wait_op;
      free(wait_op);
      s->wait_op = NULL;
  }
  else
  {
868
      add_completed_reqs(s, lp, m->fwd.num_matched);
869
870
871
      codes_issue_next_event_rc(lp);
  }
  return;
872
}
873

874
static void codes_exec_mpi_wait_all(
875
        nw_state* s,
876
877
        tw_bf * bf,
        nw_message * m,
878
        tw_lp* lp,
879
        struct codes_workload_op * mpi_op)
880
{
881
  if(enable_debug)
882
    fprintf(workload_log, "\n MPI WAITALL POSTED AT %llu ", s->nw_id);
883

884
885
886
887
888
889
890
891
  if(enable_sampling)
  {
    bf->c1 = 1;
    if(tw_now(lp) >= s->cur_interval_end)
    {
        bf->c2 = 1;
        int indx = s->sampling_indx;
        s->mpi_wkld_samples[indx].nw_id = s->nw_id;
892
        s->mpi_wkld_samples[indx].app_id = s->app_id;
893
894
895
896
897
898
        s->mpi_wkld_samples[indx].sample_end_time = s->cur_interval_end;
        s->cur_interval_end += sampling_interval;
        s->sampling_indx++;
    }
    if(s->sampling_indx >= MAX_STATS)
    {
899
        struct mpi_workload_sample * tmp = calloc((MAX_STATS + s->max_arr_size), sizeof(struct mpi_workload_sample));
900
901
902
903
904
905
906
907
        memcpy(tmp, s->mpi_wkld_samples, s->sampling_indx);
        free(s->mpi_wkld_samples);
        s->mpi_wkld_samples = tmp;
        s->max_arr_size += MAX_STATS;
    }
    int indx = s->sampling_indx;
    s->mpi_wkld_samples[indx].num_waits_sample++;
  }
908
  int count = mpi_op->u.waits.count;
909
910
  /* If the count is not less than max wait reqs then stop */
  assert(count < MAX_WAIT_REQS);
911

912
  int i = 0, num_matched = 0;
913
  m->fwd.num_matched = 0;
914

915
  if(lp->gid == TRACK_LP)
916
  {
917
      printf("\n MPI Wait all posted ");
918
919
      print_waiting_reqs(mpi_op->u.waits.req_ids, count);
      print_completed_queue(&s->completed_reqs);
920
  }
921
      /* check number of completed irecvs in the completion queue */
922
923
924
925
926
927
928
  for(i = 0; i < count; i++)
  {
      dumpi_req_id req_id = mpi_op->u.waits.req_ids[i];
      struct qlist_head * ent = NULL;
      struct completed_requests* current = NULL;
      qlist_for_each(ent, &s->completed_reqs)
       {
929
            current = qlist_entry(ent, struct completed_requests, ql);
930
931
932
933
            if(current->req_id == req_id)
                num_matched++;
       }
  }
934

935
  m->fwd.found_match = num_matched;
936
937
938
939
  if(num_matched == count)
  {
    /* No need to post a MPI Wait all then, issue next event */
      /* Remove all completed requests from the list */
940
941
942
      m->fwd.num_matched = clear_completed_reqs(s, lp, mpi_op->u.waits.req_ids, count);
      struct pending_waits* wait_op = s->wait_op;
      free(wait_op);
943
944
      s->wait_op = NULL;
      codes_issue_next_event(lp);
945
946
  }
  else
947
948
949
950
951
952
953
954
955
956
957
  {
      /* If not, add the wait operation in the pending 'waits' list. */
	  struct pending_waits* wait_op = malloc(sizeof(struct pending_waits));
	  wait_op->count = count;
      wait_op->op_type = mpi_op->op_type;
      assert(count < MAX_WAIT_REQS);

      for(i = 0; i < count; i++)
          wait_op->req_ids[i] =  mpi_op->u.waits.req_ids[i];

	  wait_op->num_completed = num_matched;
958
	  wait_op->start_time = tw_now(lp);
959
      s->wait_op = wait_op;
960
  }
961
962
  return;
}
963

964
965
/* search for a matching mpi operation and remove it from the list.
 * Record the index in the list from where the element got deleted.
966
 * Index is used for inserting the element once again in the queue for reverse computation. */
967
static int rm_matching_rcv(nw_state * ns,
968
        tw_bf * bf,
969
970
        nw_message * m,
        tw_lp * lp,
971
        mpi_msgs_queue * qitem)
972
973
{
    int matched = 0;
974
    int index = 0;
975
976
    struct qlist_head *ent = NULL;
    mpi_msgs_queue * qi = NULL;
977

978
979
    qlist_for_each(ent, &ns->pending_recvs_queue){
        qi = qlist_entry(ent, mpi_msgs_queue, ql);
980
981
        if(//(qi->num_bytes == qitem->num_bytes)
                //&& 
982
               ((qi->tag == qitem->tag) || qi->tag == -1)
983
                && ((qi->source_rank == qitem->source_rank) || qi->source_rank == -1))
984
985
        {
            matched = 1;
986
            //qitem->num_bytes = qi->num_bytes;
987
988
            break;
        }
989
        ++index;
990
    }
991

992
993
    if(matched)
    {
994
995
996
997
998
999
1000
        if(enable_msg_tracking && qitem->num_bytes < EAGER_THRESHOLD)
        {
            update_message_size(ns, lp, bf, m, qitem, 1, 1);
        }
        if(qitem->num_bytes >= EAGER_THRESHOLD)
        {
            /* Matching receive found, need to notify the sender to transmit
For faster browsing, not all history is shown. View entire blame