model-net-mpi-wrklds.c 42.6 KB
Newer Older
1 2 3 4 5 6
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */
#include <ross.h>
7
#include <inttypes.h>
8

9
#include "codes/codes-workload.h"
10 11 12 13 14
#include "codes/codes.h"
#include "codes/configuration.h"
#include "codes/codes_mapping.h"
#include "codes/model-net.h"

15
#define TRACE -1
16 17 18 19 20 21 22 23 24

char workload_type[128];
char workload_file[8192];
char offset_file[8192];
static int wrkld_id;
static int num_net_traces = 0;

typedef struct nw_state nw_state;
typedef struct nw_message nw_message;
25
typedef int16_t dumpi_req_id;
26 27 28 29 30 31

static int net_id = 0;
static float noise = 5.0;
static int num_net_lps, num_nw_lps;
long long num_bytes_sent=0;
long long num_bytes_recvd=0;
32 33
double max_time = 0,  max_comm_time = 0, max_wait_time = 0, max_send_time = 0, max_recv_time = 0;
double avg_time = 0, avg_comm_time = 0, avg_wait_time = 0, avg_send_time = 0, avg_recv_time = 0;
34 35 36 37 38

/* global variables for codes mapping */
static char lp_group_name[MAX_NAME_LENGTH], lp_type_name[MAX_NAME_LENGTH], annotation[MAX_NAME_LENGTH];
static int mapping_grp_id, mapping_type_id, mapping_rep_id, mapping_offset;

39 40 41
/* MPI_OP_GET_NEXT is for getting next MPI operation when the previous operation completes.
* MPI_SEND_ARRIVED is issued when a MPI message arrives at its destination (the message is transported by model-net and an event is invoked when it arrives. 
* MPI_SEND_POSTED is issued when a MPI message has left the source LP (message is transported via model-net). */
42 43 44 45 46 47 48
enum MPI_NW_EVENTS
{
	MPI_OP_GET_NEXT=1,
	MPI_SEND_ARRIVED,
	MPI_SEND_POSTED,
};

49
/* stores pointers of pending MPI operations to be matched with their respective sends/receives. */
50 51
struct mpi_msgs_queue
{
52
	struct codes_workload_op* mpi_op;
53 54 55
	struct mpi_msgs_queue* next;
};

56
/* stores request IDs of completed MPI operations (Isends or Irecvs) */
57 58 59 60 61 62
struct completed_requests
{
	dumpi_req_id req_id;
	struct completed_requests* next;
};

63
/* for wait operations, store the pending operation and number of completed waits so far. */
64 65
struct pending_waits
{
66
	struct codes_workload_op* mpi_op;
67 68 69 70
	int num_completed;
	tw_stime start_time;
};

71
/* maintains the head and tail of the queue, as well as the number of elements currently in queue. Queues are pending_recvs queue (holds unmatched MPI recv operations) and arrival_queue (holds unmatched MPI send messages). */
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
struct mpi_queue_ptrs
{
	int num_elems;
	struct mpi_msgs_queue* queue_head;
	struct mpi_msgs_queue* queue_tail;
};

/* state of the network LP. It contains the pointers to send/receive lists */
struct nw_state
{
	long num_events_per_lp;
	tw_lpid nw_id;
	short wrkld_end;

	/* count of sends, receives, collectives and delays */
	unsigned long num_sends;
	unsigned long num_recvs;
	unsigned long num_cols;
	unsigned long num_delays;
91 92 93
	unsigned long num_wait;
	unsigned long num_waitall;
	unsigned long num_waitsome;
94 95

	/* time spent by the LP in executing the app trace*/
96 97
	double elapsed_time;

98
	/* time spent in compute operations */
99 100 101 102 103 104 105 106 107 108
	double compute_time;

	/* time spent in message send/isend */
	double send_time;

	/* time spent in message receive */
	double recv_time;
	
	/* time spent in wait operation */
	double wait_time;
109 110 111

	/* FIFO for isend messages arrived on destination */
	struct mpi_queue_ptrs* arrival_queue;
112

113 114
	/* FIFO for irecv messages posted but not yet matched with send operations */
	struct mpi_queue_ptrs* pending_recvs_queue;
115

116
	/* list of pending waits (and saved pending wait for reverse computation) */
117 118 119 120
	struct pending_waits* pending_waits;

	/* List of completed send/receive requests */
	struct completed_requests* completed_reqs;
121 122
};

123 124 125 126
/* data for handling reverse computation.
* saved_matched_req holds the request ID of matched receives/sends for wait operations.
* ptr_match_op holds the matched MPI operation which are removed from the queues when a send is matched with the receive in forward event handler. 
* network event being sent. op is the MPI operation issued by the network workloads API. rv_data holds the data for reverse computation (TODO: Fill this data structure only when the simulation runs in optimistic mode). */
127 128
struct nw_message
{
129 130 131
   int msg_type;
   /* for reverse computation */
   struct codes_workload_op * op;
132

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
   struct
   {
     /* forward event handler */
     struct
     {
        int op_type;
        tw_lpid src_rank;
        tw_lpid dest_rank;
        int num_bytes;
        int data_type;
        double sim_start_time;
        int16_t req_id;   
        int tag;
     } msg_info;

     /* required for reverse computation*/
     struct 
      {
151 152 153
	int found_match;
	short matched_op;
	dumpi_req_id saved_matched_req;
154
	struct codes_workload_op* ptr_match_op;
155 156 157 158 159
	struct pending_waits* saved_pending_wait;

	double saved_send_time;
	double saved_recv_time;
	double saved_wait_time;
160 161
      } rc;
  } u;
162 163
};

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
/* executes MPI wait operation */
static void codes_exec_mpi_wait(nw_state* s, tw_bf* bf, nw_message* m, tw_lp* lp);

/* reverse of mpi wait function. */
static void codes_exec_mpi_wait_rc(nw_state* s, tw_bf* bf, nw_message* m, tw_lp* lp);

/* executes MPI isend and send operations */
static void codes_exec_mpi_send(nw_state* s, nw_message* m, tw_lp* lp);

/* execute MPI irecv operation */
static void codes_exec_mpi_recv(nw_state* s, nw_message* m, tw_lp* lp);

/* reverse of mpi recv function. */
static void codes_exec_mpi_recv_rc(nw_state* s, nw_message* m, tw_lp* lp);

/* execute the computational delay */
static void codes_exec_comp_delay(nw_state* s, nw_message* m, tw_lp* lp);

/* execute collective operation, currently only skips these operations. */
static void codes_exec_mpi_col(nw_state* s, nw_message* m, tw_lp* lp);

/* gets the next MPI operation from the network-workloads API. */
186 187
static void get_next_mpi_operation(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);

188 189 190 191 192 193 194
/* reverse handler of get next mpi operation. */
static void get_next_mpi_operation_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp);

/* Makes a call to get_next_mpi_operation. */
static void codes_issue_next_event(tw_lp* lp);

///////////////////// HELPER FUNCTIONS FOR MPI MESSAGE QUEUE HANDLING ///////////////
195 196 197 198 199 200 201 202 203 204 205 206
/* upon arrival of local completion message, inserts operation in completed send queue */
static void update_send_completion_queue(nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

/* reverse of the above function */
static void update_send_completion_queue_rc(nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

/* upon arrival of an isend operation, updates the arrival queue of the network */
static void update_arrival_queue(nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

/* reverse of the above function */
static void update_arrival_queue_rc(nw_state*s, tw_bf* bf, nw_message* m, tw_lp * lp);

207
/* insert MPI operation in the waiting queue*/
208
static void mpi_pending_queue_insert_op(struct mpi_queue_ptrs* mpi_queue, struct codes_workload_op* mpi_op);
209

210
/* remove completed request IDs from the queue for reuse. Reverse of above function. */
211
static void remove_req_id(struct completed_requests** requests, int16_t req_id);
212

213
/* remove MPI operation from the waiting queue.*/
214
static int mpi_queue_remove_matching_op(nw_state* s, tw_lp* lp, struct mpi_queue_ptrs* mpi_queue, nw_message * m);
215

216
/* remove the tail of the MPI operation from waiting queue */
217
static int mpi_queue_remove_tail(tw_lpid lpid, struct mpi_queue_ptrs* mpi_queue);
218

219
/* insert completed MPI requests in the queue. */
220
static void mpi_completed_queue_insert_op(struct completed_requests** mpi_completed_queue, dumpi_req_id req_id);
221

222 223
/* notifies the wait operations (if any) about the completed receives and sends requests. */
static int notify_waits(nw_state* s, tw_bf* bf, tw_lp* lp, nw_message* m, dumpi_req_id req_id);
224

225 226
/* reverse of notify waits function. */
static void notify_waits_rc(nw_state* s, tw_bf* bf, tw_lp* lp, nw_message* m, dumpi_req_id completed_req);
227

228 229
/* conversion from seconds to eanaoseconds */
static tw_stime s_to_ns(tw_stime ns);
230

231

232 233 234 235 236 237 238 239 240 241 242 243
/* initializes the queue and allocates memory */
static struct mpi_queue_ptrs* queue_init()
{
	struct mpi_queue_ptrs* mpi_queue = malloc(sizeof(struct mpi_queue_ptrs));

	mpi_queue->num_elems = 0;
	mpi_queue->queue_head = NULL;
	mpi_queue->queue_tail = NULL;
	
	return mpi_queue;
}

244
/* helper function: counts number of elements in the queue */
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
static int numQueue(struct mpi_queue_ptrs* mpi_queue)
{
	struct mpi_msgs_queue* tmp = malloc(sizeof(struct mpi_msgs_queue)); 
	assert(tmp);

	tmp = mpi_queue->queue_head;
	int count = 0;

	while(tmp)
	{
		++count;
		tmp = tmp->next;
	}
	return count;
	free(tmp);
}

/* prints elements in a send/recv queue */
static void printQueue(tw_lpid lpid, struct mpi_queue_ptrs* mpi_queue, char* msg)
{
	printf("\n ************ Printing the queue %s *************** ", msg);
	struct mpi_msgs_queue* tmp = malloc(sizeof(struct mpi_msgs_queue));
	assert(tmp);

	tmp = mpi_queue->queue_head;
	
	while(tmp)
	{
273
		if(tmp->mpi_op->op_type == CODES_WK_SEND || tmp->mpi_op->op_type == CODES_WK_ISEND)
274 275 276
			printf("\n lpid %ld send operation data type %d count %d tag %d source %d", 
				    lpid, tmp->mpi_op->u.send.data_type, tmp->mpi_op->u.send.count, 
				     tmp->mpi_op->u.send.tag, tmp->mpi_op->u.send.source_rank);
277
		else if(tmp->mpi_op->op_type == CODES_WK_IRECV || tmp->mpi_op->op_type == CODES_WK_RECV)
278 279 280 281 282 283 284 285 286 287 288
			printf("\n lpid %ld recv operation data type %d count %d tag %d source %d", 
				   lpid, tmp->mpi_op->u.recv.data_type, tmp->mpi_op->u.recv.count, 
				    tmp->mpi_op->u.recv.tag, tmp->mpi_op->u.recv.source_rank );
		else
			printf("\n Invalid data type in the queue %d ", tmp->mpi_op->op_type);
		tmp = tmp->next;
	}
	free(tmp);
}

/* re-insert element in the queue at the index --- maintained for reverse computation */
289
static void mpi_queue_update(struct mpi_queue_ptrs* mpi_queue, struct codes_workload_op* mpi_op, int pos)
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
{
	struct mpi_msgs_queue* elem = malloc(sizeof(struct mpi_msgs_queue));
	assert(elem);
	elem->mpi_op = mpi_op;
	
	/* inserting at the head */
	if(pos == 0)
	{
	   if(!mpi_queue->queue_tail)
		mpi_queue->queue_tail = elem;
	   elem->next = mpi_queue->queue_head;
	   mpi_queue->queue_head = elem;
	   mpi_queue->num_elems++;
	   return;
	}

	int index = 0;
	struct mpi_msgs_queue* tmp = mpi_queue->queue_head;
	while(index < pos - 1)
	{
		tmp = tmp->next;
		++index;
	}

	if(!tmp)
		printf("\n Invalid index! %d pos %d size %d ", index, pos, numQueue(mpi_queue));
	if(tmp == mpi_queue->queue_tail)
	    mpi_queue->queue_tail = elem;

	elem->next = tmp->next;
	tmp->next = elem;
	mpi_queue->num_elems++;

	return;
}

326
/* prints the elements of a queue (for debugging purposes). */
327 328 329 330
static void printCompletedQueue(nw_state* s, tw_lp* lp)
{
	   if(TRACE == lp->gid)
	   {
331
	   	printf("\n %lf contents of completed operations queue ", tw_now(lp));
332 333 334 335 336 337 338 339 340
	   	struct completed_requests* current = s->completed_reqs;
	   	while(current)
	    	{
			printf(" %d ",current->req_id);
			current = current->next;
	   	}
	   }
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
/* reverse handler of notify_waits function. */
static void notify_waits_rc(nw_state* s, tw_bf* bf, tw_lp* lp, nw_message* m, dumpi_req_id completed_req)
{
   int i;

   /*if(bf->c1)
    {*/
	/* if pending wait is still present and is of type MPI_WAIT then do nothing*/
/*	s->wait_time = s->saved_wait_time; 	
	mpi_completed_queue_insert_op(&s->completed_reqs, completed_req);	
	s->pending_waits = wait_elem;
	s->saved_pending_wait = NULL;
    }
*/
  if(lp->gid == TRACE)
	  printf("\n %lf reverse -- notify waits req id %d ", tw_now(lp), completed_req);
  printCompletedQueue(s, lp);
358
  if(m->u.rc.matched_op == 1)
359 360
	s->pending_waits->num_completed--;
   /* if a wait-elem exists, it means the request ID has been matched*/
361
   if(m->u.rc.matched_op == 2) 
362 363 364 365 366 367
    {
	if(lp->gid == TRACE)
	{
		printf("\n %lf matched req id %d ", tw_now(lp), completed_req);
		printCompletedQueue(s, lp);
	}
368 369
        struct pending_waits* wait_elem = m->u.rc.saved_pending_wait;
	s->wait_time = m->u.rc.saved_wait_time;
370 371 372 373 374 375 376 377 378 379 380 381 382 383
	int count = wait_elem->mpi_op->u.waits.count; 

	for( i = 0; i < count; i++ )
		mpi_completed_queue_insert_op(&s->completed_reqs, wait_elem->mpi_op->u.waits.req_ids[i]);

	wait_elem->num_completed--;	
	s->pending_waits = wait_elem;
	tw_rand_reverse_unif(lp->rng);

   }
}

/* notify the completed send/receive request to the wait operation. */
static int notify_waits(nw_state* s, tw_bf* bf, tw_lp* lp, nw_message* m, dumpi_req_id completed_req)
384 385 386 387 388 389 390
{
	int i;
	/* traverse the pending waits list and look what type of wait operations are 
	there. If its just a single wait and the request ID has just been completed, 
	then the network node LP can go on with fetching the next operation from the log.
	If its waitall then wait for all pending requests to complete and then proceed. */
	struct pending_waits* wait_elem = s->pending_waits;
391
	m->u.rc.matched_op = 0;
392 393 394
	
	if(lp->gid == TRACE)
		printf("\n %lf notify waits req id %d ", tw_now(lp), completed_req);
395 396 397

	if(!wait_elem)
		return 0;
398

399 400
	int op_type = wait_elem->mpi_op->op_type;

401
	if(op_type == CODES_WK_WAIT)
402 403 404
	{
		if(wait_elem->mpi_op->u.wait.req_id == completed_req)	
		  {
405
			m->u.rc.saved_wait_time = s->wait_time;
406
			s->wait_time += (tw_now(lp) - wait_elem->start_time);
407
                        remove_req_id(&s->completed_reqs, completed_req);
408
	
409
			m->u.rc.saved_pending_wait = wait_elem;			
410 411 412 413 414 415
			s->pending_waits = NULL;
			codes_issue_next_event(lp);	
			return 0;
		 }
	}
	else
416
	if(op_type == CODES_WK_WAITALL)
417
	{
418 419
	   int required_count = wait_elem->mpi_op->u.waits.count;
	  for(i = 0; i < required_count; i++)
420 421
	   {
	    if(wait_elem->mpi_op->u.waits.req_ids[i] == completed_req)
422 423 424
		{
			if(lp->gid == TRACE)
				printCompletedQueue(s, lp);
425
			m->u.rc.matched_op = 1;
426
			wait_elem->num_completed++;	
427
		}
428 429
	   }
	   
430
	    if(wait_elem->num_completed == required_count)
431
	     {
432 433 434 435 436
		if(lp->gid == TRACE)
		{
			printf("\n %lf req %d completed %d", tw_now(lp), completed_req, wait_elem->num_completed);
			printCompletedQueue(s, lp);
		}
437 438
		m->u.rc.matched_op = 2;
		m->u.rc.saved_wait_time = s->wait_time;
439
		s->wait_time += (tw_now(lp) - wait_elem->start_time);
440
		m->u.rc.saved_pending_wait = wait_elem;
441
		s->pending_waits = NULL; 
442
		for(i = 0; i < required_count; i++)
443 444 445
			remove_req_id(&s->completed_reqs, wait_elem->mpi_op->u.waits.req_ids[i]);	
		codes_issue_next_event(lp); //wait completed
	    }
446
       }
447 448 449
	return 0;
}

450 451 452 453 454 455 456 457 458 459
/* reverse handler of MPI wait operation */
static void codes_exec_mpi_wait_rc(nw_state* s, tw_bf* bf, nw_message* m, tw_lp* lp)
{
    if(s->pending_waits)
     {
    	s->pending_waits = NULL;
	return;
     }
   else
    {
460
 	mpi_completed_queue_insert_op(&s->completed_reqs, m->op->u.wait.req_id);	
461 462 463
	tw_rand_reverse_unif(lp->rng);		
    }
}
464 465

/* execute MPI wait operation */
466
static void codes_exec_mpi_wait(nw_state* s, tw_bf* bf, nw_message* m, tw_lp* lp)
467
{
468 469 470 471 472 473 474 475 476 477 478 479 480
    /* check in the completed receives queue if the request ID has already been completed.*/
    assert(!s->pending_waits);
    dumpi_req_id req_id = m->op->u.wait.req_id;

    struct completed_requests* current = s->completed_reqs;
    while(current) {
        if(current->req_id == req_id) {
            remove_req_id(&s->completed_reqs, req_id);
            m->u.rc.saved_wait_time = s->wait_time;
            codes_issue_next_event(lp);
            return;
        }
        current = current->next;
481 482
    }

483
    /* If not, add the wait operation in the pending 'waits' list. */
484
    struct pending_waits* wait_op = malloc(sizeof(struct pending_waits));
485 486 487
    wait_op->mpi_op = m->op;
    wait_op->num_completed = 0;
    wait_op->start_time = tw_now(lp);
488
    s->pending_waits = wait_op;
489 490
}

491
static void codes_exec_mpi_wait_all_rc(nw_state* s, tw_bf* bf, nw_message* m, tw_lp* lp)
492
{
493 494
   if(lp->gid == TRACE)
  {
495
   printf("\n %lf codes exec mpi waitall reverse %d ", tw_now(lp), m->u.rc.found_match);
496 497
   printCompletedQueue(s, lp); 
  } 
498
  if(m->u.rc.found_match)
499 500
    {
   	int i;
501
	int count = m->op->u.waits.count;
502 503 504 505
	dumpi_req_id req_id[count];

	for( i = 0; i < count; i++)
	{
506
		req_id[i] = m->op->u.waits.req_ids[i];
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
		mpi_completed_queue_insert_op(&s->completed_reqs, req_id[i]);
	}
	tw_rand_reverse_unif(lp->rng);
    }
    else
    {
	struct pending_waits* wait_op = s->pending_waits;
	free(wait_op);
	s->pending_waits = NULL;
	assert(!s->pending_waits);
	if(lp->gid == TRACE)
		printf("\n %lf Nullifying codes waitall ", tw_now(lp));
    }
}
static void codes_exec_mpi_wait_all(nw_state* s, tw_bf* bf, nw_message* m, tw_lp* lp)
{
  //assert(!s->pending_waits);
524
  int count = m->op->u.waits.count;
525 526 527 528 529 530 531
  int i, num_completed = 0;
  dumpi_req_id req_id[count];
  struct completed_requests* current = s->completed_reqs;

  /* check number of completed irecvs in the completion queue */ 
  if(lp->gid == TRACE)
    {
532
  	printf(" \n (%lf) MPI waitall posted %d count", tw_now(lp), m->op->u.waits.count);
533
	for(i = 0; i < count; i++)
534
		printf(" %d ", (int)m->op->u.waits.req_ids[i]);
535 536 537 538 539 540
   	printCompletedQueue(s, lp);	 
   }
  while(current) 
   {
	  for(i = 0; i < count; i++)
	   {
541
	     req_id[i] = m->op->u.waits.req_ids[i];
542 543 544 545 546 547 548
	     if(req_id[i] == current->req_id)
 		 num_completed++;
   	  }
	 current = current->next;
   }

  if(TRACE== lp->gid)
549
	  printf("\n %lf Num completed %d count %d ", tw_now(lp), num_completed, count);
550

551
  m->u.rc.found_match = 0;
552
  if(count == num_completed)
553
  {
554
	m->u.rc.found_match = 1;
555
	for( i = 0; i < count; i++)	
556
		remove_req_id(&s->completed_reqs, req_id[i]);
557

558 559 560 561 562 563
	codes_issue_next_event(lp);
  }
  else
  {
 	/* If not, add the wait operation in the pending 'waits' list. */
	  struct pending_waits* wait_op = malloc(sizeof(struct pending_waits));
564
	  wait_op->mpi_op = m->op;  
565
	  wait_op->num_completed = num_completed;
566
	  wait_op->start_time = tw_now(lp);
567 568 569 570 571 572 573 574 575 576
	  s->pending_waits = wait_op;
  }
}

/* request ID is being reused so delete it from the list once the matching is done */
static void remove_req_id(struct completed_requests** mpi_completed_queue, dumpi_req_id req_id)
{
	struct completed_requests* current = *mpi_completed_queue;

	if(!current)
577 578 579
		tw_error(TW_LOC, "\n REQ ID DOES NOT EXIST");
	
       if(current->req_id == req_id)
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
	{
		*mpi_completed_queue = current->next;
		free(current);
		return;
	}
	
	struct completed_requests* elem;
	while(current->next)
	{
	   elem = current->next;
	   if(elem->req_id == req_id)	
	     {
		current->next = elem->next;
		free(elem);
		return;
	     }
	   current = current->next;	
	}
	return;
}

/* inserts mpi operation in the completed requests queue */
602
static void mpi_completed_queue_insert_op(struct completed_requests** mpi_completed_queue, dumpi_req_id req_id)
603 604 605 606 607 608 609 610
{
	struct completed_requests* reqs = malloc(sizeof(struct completed_requests));
	assert(reqs);

	reqs->req_id = req_id;

	if(!(*mpi_completed_queue))	
	{
611
			reqs->next = NULL;
612
			*mpi_completed_queue = reqs;
613
			return;
614 615 616
	}
	reqs->next = *mpi_completed_queue;
	*mpi_completed_queue = reqs;
617
	return;
618 619
}

620
/* insert MPI send or receive operation in the queues starting from tail. Unmatched sends go to arrival queue and unmatched receives go to pending receives queues. */
621
static void mpi_pending_queue_insert_op(struct mpi_queue_ptrs* mpi_queue, struct codes_workload_op* mpi_op)
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
{
	/* insert mpi operation */
	struct mpi_msgs_queue* elem = malloc(sizeof(struct mpi_msgs_queue));
	assert(elem);

	elem->mpi_op = mpi_op;
     	elem->next = NULL;

	if(!mpi_queue->queue_head)
	  mpi_queue->queue_head = elem;

	if(mpi_queue->queue_tail)
	    mpi_queue->queue_tail->next = elem;
	
        mpi_queue->queue_tail = elem;
	mpi_queue->num_elems++;

	return;
}

/* match the send/recv operations */
643
static int match_receive(nw_state* s, tw_lp* lp, tw_lpid lpid, struct codes_workload_op* op1, struct codes_workload_op* op2)
644
{
645 646 647 648 649 650 651 652 653
        assert(op1->op_type == CODES_WK_IRECV || op1->op_type == CODES_WK_RECV);
        assert(op2->op_type == CODES_WK_SEND || op2->op_type == CODES_WK_ISEND);

        if((op1->u.recv.num_bytes >= op2->u.send.num_bytes) &&
                   ((op1->u.recv.tag == op2->u.send.tag) || op1->u.recv.tag == -1) &&
                   ((op1->u.recv.source_rank == op2->u.send.source_rank) || op1->u.recv.source_rank == -1))
                   {
                        if(lp->gid == TRACE)
                           printf("\n op1 rank %d bytes %d ", op1->u.recv.source_rank, op1->u.recv.num_bytes);
654
                        s->recv_time += tw_now(lp) - op1->sim_start_time;
655 656 657 658
                        mpi_completed_queue_insert_op(&s->completed_reqs, op1->u.recv.req_id);
                        return 1;
                   }
        return -1;
659 660 661
}

/* used for reverse computation. removes the tail of the queue */
662
static int mpi_queue_remove_tail(tw_lpid lpid, struct mpi_queue_ptrs* mpi_queue)
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
{
	assert(mpi_queue->queue_tail);
	if(mpi_queue->queue_tail == NULL)
	{
		printf("\n Error! tail not updated ");	
		return 0;
	}
	struct mpi_msgs_queue* tmp = mpi_queue->queue_head;

	if(mpi_queue->queue_head == mpi_queue->queue_tail)
	{
		mpi_queue->queue_head = NULL;
		mpi_queue->queue_tail = NULL;
		free(tmp);
		mpi_queue->num_elems--;
		 return 1;
	}

	struct mpi_msgs_queue* elem = mpi_queue->queue_tail;

	while(tmp->next != mpi_queue->queue_tail)
		tmp = tmp->next;

	mpi_queue->queue_tail = tmp;
	mpi_queue->queue_tail->next = NULL;
	mpi_queue->num_elems--;

	free(elem);
	return 1;
}

/* search for a matching mpi operation and remove it from the list. 
 * Record the index in the list from where the element got deleted. 
 * Index is used for inserting the element once again in the queue for reverse computation. */
697
static int mpi_queue_remove_matching_op(nw_state* s, tw_lp* lp, struct mpi_queue_ptrs* mpi_queue, nw_message * m)
698
{
699 700
       struct codes_workload_op * mpi_op = m->op;
 
701 702 703 704 705 706 707 708
	if(mpi_queue->queue_head == NULL)
		return -1;

	/* remove mpi operation */
	struct mpi_msgs_queue* tmp = mpi_queue->queue_head;
	int indx = 0;

	/* if head of the list has the required mpi op to be deleted */
709
	int rcv_val = 0;
710
	if(mpi_op->op_type == CODES_WK_SEND || mpi_op->op_type == CODES_WK_ISEND)
711
	  {
712
		rcv_val = match_receive(s, lp, lp->gid, tmp->mpi_op, mpi_op);
713
		m->u.rc.saved_matched_req = tmp->mpi_op->u.recv.req_id;  
714
	 }
715
	else if(mpi_op->op_type == CODES_WK_RECV || mpi_op->op_type == CODES_WK_IRECV)
716
	  {
717
		rcv_val = match_receive(s, lp, lp->gid, mpi_op, tmp->mpi_op);
718
	  	m->u.rc.saved_matched_req = mpi_op->u.recv.req_id;
719 720
	  }
	if(rcv_val >= 0)
721
	{
722
		memcpy(&m->u.rc.ptr_match_op, &tmp->mpi_op, sizeof(struct codes_workload_op));
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
		if(mpi_queue->queue_head == mpi_queue->queue_tail)
		   {
			mpi_queue->queue_tail = NULL;
			mpi_queue->queue_head = NULL;
			 free(tmp);
		   }
		 else
		   {
			mpi_queue->queue_head = tmp->next;
			free(tmp);	
		   }
		mpi_queue->num_elems--;
		return indx;
	}

	/* record the index where matching operation has been found */
	struct mpi_msgs_queue* elem;

	while(tmp->next)	
	{
	   indx++;
	   elem = tmp->next;
745
	   
746
	    if(mpi_op->op_type == CODES_WK_SEND || mpi_op->op_type == CODES_WK_ISEND)
747
	     {
748
		rcv_val = match_receive(s, lp, lp->gid, elem->mpi_op, mpi_op);
749
	     	m->u.rc.saved_matched_req = elem->mpi_op->u.recv.req_id; 
750
	     }
751
	    else if(mpi_op->op_type == CODES_WK_RECV || mpi_op->op_type == CODES_WK_IRECV)
752
	     {
753
		rcv_val = match_receive(s, lp, lp->gid, mpi_op, elem->mpi_op);
754
		m->u.rc.saved_matched_req = mpi_op->u.recv.req_id;
755 756
	     }
   	     if(rcv_val >= 0)
757
		{
758
		    memcpy(&m->u.rc.ptr_match_op, &elem->mpi_op, sizeof(struct codes_workload_op));
759 760
		    if(elem == mpi_queue->queue_tail)
			mpi_queue->queue_tail = tmp;
761
		    
762 763 764 765
		    tmp->next = elem->next;

		    free(elem);
		    mpi_queue->num_elems--;
766
		
767 768 769
		    return indx;
		}
	   tmp = tmp->next;
770
        }
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	return -1;
}
/* Trigger getting next event at LP */
static void codes_issue_next_event(tw_lp* lp)
{
   tw_event *e;
   nw_message* msg;

   tw_stime ts;

   ts = g_tw_lookahead + 0.1 + tw_rand_exponential(lp->rng, noise);
   e = tw_event_new( lp->gid, ts, lp );
   msg = tw_event_data(e);

   msg->msg_type = MPI_OP_GET_NEXT;
   tw_event_send(e);
}

/* Simulate delays between MPI operations */
static void codes_exec_comp_delay(nw_state* s, nw_message* m, tw_lp* lp)
{
792
	struct codes_workload_op* mpi_op = m->op;
793 794 795 796
	tw_event* e;
	tw_stime ts;
	nw_message* msg;

797 798
	s->compute_time += s_to_ns(mpi_op->u.delay.seconds);
	ts = s_to_ns(mpi_op->u.delay.seconds) + g_tw_lookahead + 0.1;
799 800 801 802 803 804 805 806 807 808
	ts += tw_rand_exponential(lp->rng, noise);
	
	e = tw_event_new( lp->gid, ts , lp );
	msg = tw_event_data(e);
	msg->msg_type = MPI_OP_GET_NEXT;

	tw_event_send(e); 
}

/* reverse computation operation for MPI irecv */
809
static void codes_exec_mpi_recv_rc(nw_state* s, nw_message* m, tw_lp* lp)
810
{
811 812 813
	num_bytes_recvd -= m->op->u.recv.num_bytes;
	s->recv_time = m->u.rc.saved_recv_time;
	if(m->u.rc.found_match >= 0)
814
	  {
815 816 817
		s->recv_time = m->u.rc.saved_recv_time;
		mpi_queue_update(s->arrival_queue, m->u.rc.ptr_match_op, m->u.rc.found_match);
		remove_req_id(&s->completed_reqs, m->op->u.recv.req_id);
818
		tw_rand_reverse_unif(lp->rng);
819
	  }
820
	else if(m->u.rc.found_match < 0)
821
	    {
822 823
		mpi_queue_remove_tail(lp->gid, s->pending_recvs_queue);
		if(m->op->op_type == CODES_WK_IRECV)
824
			tw_rand_reverse_unif(lp->rng);
825 826 827 828
	    }
}

/* Execute MPI Irecv operation (non-blocking receive) */ 
829
static void codes_exec_mpi_recv(nw_state* s, nw_message* m, tw_lp* lp)
830 831 832 833 834
{
/* Once an irecv is posted, list of completed sends is checked to find a matching isend.
   If no matching isend is found, the receive operation is queued in the pending queue of
   receive operations. */

835 836
	m->u.rc.saved_recv_time = s->recv_time;
	struct codes_workload_op* mpi_op = m->op;
837
	mpi_op->sim_start_time = tw_now(lp);
838
	num_bytes_recvd += mpi_op->u.recv.num_bytes;
839 840

	if(lp->gid == TRACE)
841
		printf("\n %lf codes exec mpi recv req id %d", tw_now(lp), (int)mpi_op->u.recv.req_id);
842 843
	
	dumpi_req_id req_id;
844
	int found_matching_sends = mpi_queue_remove_matching_op(s, lp, s->arrival_queue, m);
845 846 847
	
	/* save the req id inserted in the completed queue for reverse computation. */
	//m->matched_recv = req_id;
848 849 850

	if(found_matching_sends < 0)
	  {
851
		m->u.rc.found_match = -1;
852 853 854
		mpi_pending_queue_insert_op(s->pending_recvs_queue, mpi_op);
	
	       /* for mpi irecvs, this is a non-blocking receive so just post it and move on with the trace read. */
855
		if(mpi_op->op_type == CODES_WK_IRECV)
856 857 858 859 860 861
		   {
			codes_issue_next_event(lp);	
			return;
		   }
		else
			printf("\n CODES MPI RECV OPERATION!!! ");
862
	  }
863
	else
864 865
	  {
		/*if(lp->gid == TRACE)
866 867 868 869
			printf("\n Matched after removing: arrival queue num_elems %d ", s->arrival_queue->num_elems);*/
		/* update completed requests list */
		//int count_after = numQueue(s->arrival_queue);
		//assert(count_before == (count_after+1));
870
	   	m->u.rc.found_match = found_matching_sends;
871
		codes_issue_next_event(lp); 
872 873 874 875
	 }
}

/* executes MPI send and isend operations */
876
static void codes_exec_mpi_send(nw_state* s, nw_message * m, tw_lp* lp)
877
{
878
        struct codes_workload_op * mpi_op = m->op; 
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	/* model-net event */
	tw_lpid dest_rank;

	codes_mapping_get_lp_info(lp->gid, lp_group_name, &mapping_grp_id, 
	    lp_type_name, &mapping_type_id, annotation, &mapping_rep_id, &mapping_offset);

	if(net_id == DRAGONFLY) /* special handling for the dragonfly case */
	{
		int num_routers, lps_per_rep, factor;
		num_routers = codes_mapping_get_lp_count("MODELNET_GRP", 1,
                  "dragonfly_router", NULL, 1);
	 	lps_per_rep = (2 * num_nw_lps) + num_routers;	
		factor = mpi_op->u.send.dest_rank / num_nw_lps;
		dest_rank = (lps_per_rep * factor) + (mpi_op->u.send.dest_rank % num_nw_lps);	
	}
	else
	{
		/* other cases like torus/simplenet/loggp etc. */
		codes_mapping_get_lp_id(lp_group_name, lp_type_name, NULL, 1,  
	    	  mpi_op->u.send.dest_rank, mapping_offset, &dest_rank);
	}

	num_bytes_sent += mpi_op->u.send.num_bytes;

	nw_message* local_m = malloc(sizeof(nw_message));
	nw_message* remote_m = malloc(sizeof(nw_message));
	assert(local_m && remote_m);

907 908 909 910 911 912 913 914 915 916
        local_m->u.msg_info.sim_start_time = tw_now(lp);
        local_m->u.msg_info.dest_rank = mpi_op->u.send.dest_rank;
	local_m->u.msg_info.src_rank = mpi_op->u.send.source_rank;
        local_m->u.msg_info.op_type = mpi_op->op_type; 
        local_m->msg_type = MPI_SEND_POSTED;
        local_m->u.msg_info.tag = mpi_op->u.send.tag;
        local_m->u.msg_info.num_bytes = mpi_op->u.send.num_bytes;
        local_m->u.msg_info.req_id = mpi_op->u.send.req_id;

        memcpy(remote_m, local_m, sizeof(nw_message));
917
	remote_m->msg_type = MPI_SEND_ARRIVED;
918

919 920
	model_net_event(net_id, "test", dest_rank, mpi_op->u.send.num_bytes, 0.0, 
	    sizeof(nw_message), (const void*)remote_m, sizeof(nw_message), (const void*)local_m, lp);
921

922 923 924
	/*if(TRACE == lp->gid)	
		printf("\n !!! %lf send req id %d dest %d nw_message %d ", tw_now(lp), (int)mpi_op->u.send.req_id, (int)dest_rank, sizeof(nw_message));
	*/
925
	/* isend executed, now get next MPI operation from the queue */ 
926
	if(mpi_op->op_type == CODES_WK_ISEND)
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
	   codes_issue_next_event(lp);
}

/* MPI collective operations */
static void codes_exec_mpi_col(nw_state* s, nw_message* m, tw_lp* lp)
{
	codes_issue_next_event(lp);
}

/* convert seconds to ns */
static tw_stime s_to_ns(tw_stime ns)
{
    return(ns * (1000.0 * 1000.0 * 1000.0));
}


static void update_send_completion_queue_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
	//mpi_queue_remove_matching_op(&s->completed_isend_queue_head, &s->completed_isend_queue_tail, &m->op, SEND);
946
	if(m->u.msg_info.op_type == CODES_WK_SEND)
947
		tw_rand_reverse_unif(lp->rng);	
948

949
	if(m->u.msg_info.op_type == CODES_WK_ISEND)
950
	  {
951 952
		notify_waits_rc(s, bf, lp, m, m->u.msg_info.req_id);
		remove_req_id(&s->completed_reqs, m->u.msg_info.req_id);
953
	 }
954 955 956 957 958
}

/* completed isends are added in the list */
static void update_send_completion_queue(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
959
	if(TRACE == lp->gid)
960 961
		printf("\n %lf isend operation completed req id %d ", tw_now(lp), m->u.msg_info.req_id);
	if(m->u.msg_info.op_type == CODES_WK_ISEND)
962
	   {	
963 964
		mpi_completed_queue_insert_op(&s->completed_reqs, m->u.msg_info.req_id);
	   	notify_waits(s, bf, lp, m, m->u.msg_info.req_id);
965 966
	   }  
	
967
	/* blocking send operation */
968
	if(m->u.msg_info.op_type == CODES_WK_SEND)
969 970 971 972 973 974 975 976
		codes_issue_next_event(lp);	

	 return;
}

/* reverse handler for updating arrival queue function */
static void update_arrival_queue_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
977
	s->send_time = m->u.rc.saved_send_time; s->recv_time = m->u.rc.saved_recv_time;
978

979
	if(m->u.rc.found_match >= 0)
980
	{
981 982
		// TODO: Modify for recvs
		if(lp->gid == TRACE)
983 984 985
			printf("\n %lf reverse-- update arrival queue req ID %d", tw_now(lp), (int) m->u.rc.saved_matched_req);
		dumpi_req_id req_id = m->u.rc.saved_matched_req;
		notify_waits_rc(s, bf, lp, m, m->u.rc.saved_matched_req);
986
		//int count = numQueue(s->pending_recvs_queue);
987 988
		mpi_queue_update(s->pending_recvs_queue, m->u.rc.ptr_match_op, m->u.rc.found_match);
		remove_req_id(&s->completed_reqs, m->u.rc.saved_matched_req);
989
	
990 991 992
		/*if(lp->gid == TRACE)
			printf("\n Reverse: after adding pending recvs queue %d ", s->pending_recvs_queue->num_elems);*/
	}
993
	else if(m->u.rc.found_match < 0)
994
	{
995
		mpi_queue_remove_tail(lp->gid, s->arrival_queue);	
996 997 998 999 1000 1001 1002 1003
		/*if(lp->gid == TRACE)
			printf("\n Reverse: after removing arrivals queue %d ", s->arrival_queue->num_elems);*/
	}
}

/* once an isend operation arrives, the pending receives queue is checked to find out if there is a irecv that has already been posted. If no isend has been posted, */
static void update_arrival_queue(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
1004 1005
	//int count_before = numQueue(s->pending_recvs_queue);
	int is_blocking = 0; /* checks if the recv operation was blocking or not */
1006

1007 1008
	m->u.rc.saved_send_time = s->send_time;
	m->u.rc.saved_recv_time = s->recv_time;
1009

1010
	s->send_time += tw_now(lp) - m->u.msg_info.sim_start_time;
1011 1012
	dumpi_req_id req_id = -1;

1013 1014
        /* Now reconstruct the mpi op */
        struct codes_workload_op * arrived_op = (struct codes_workload_op *) malloc(sizeof(struct codes_workload_op));
1015
        arrived_op->sim_start_time = m->u.msg_info.sim_start_time;
1016 1017 1018 1019 1020 1021 1022 1023
        arrived_op->op_type = m->u.msg_info.op_type;
        arrived_op->u.send.source_rank = m->u.msg_info.src_rank;
        arrived_op->u.send.dest_rank = m->u.msg_info.dest_rank;
        arrived_op->u.send.num_bytes = m->u.msg_info.num_bytes;
        arrived_op->u.send.tag = m->u.msg_info.tag;
        arrived_op->u.send.req_id = m->u.msg_info.req_id;
        m->op = arrived_op;

1024
	int found_matching_recv = mpi_queue_remove_matching_op(s, lp, s->pending_recvs_queue, m);
1025

1026 1027
	if(TRACE == lp->gid)
		printf("\n %lf update arrival queue req id %d %d", tw_now(lp), arrived_op->u.send.req_id, m->op->u.send.source_rank);
1028 1029
	if(found_matching_recv < 0)
	 {
1030 1031
		m->u.rc.found_match = -1;
		mpi_pending_queue_insert_op(s->arrival_queue, m->op);
1032 1033 1034
	}
	else
	  {
1035 1036
		m->u.rc.found_match = found_matching_recv;
	   	notify_waits(s, bf, lp, m, m->u.rc.saved_matched_req);
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	  }
}

/* initializes the network node LP, loads the trace file in the structs, calls the first MPI operation to be executed */
void nw_test_init(nw_state* s, tw_lp* lp)
{
   /* initialize the LP's and load the data */
   char * params;
   scala_trace_params params_sc;
   dumpi_trace_params params_d;
  
   codes_mapping_get_lp_info(lp->gid, lp_group_name, &mapping_grp_id, lp_type_name, 
	&mapping_type_id, annotation, &mapping_rep_id, &mapping_offset);
  
1051
   memset(s, 0, sizeof(*s));
1052
   s->nw_id = (mapping_rep_id * num_nw_lps) + mapping_offset;
1053 1054 1055
   s->completed_reqs = NULL;

   s->pending_waits = NULL;
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
   if(!num_net_traces) 
	num_net_traces = num_net_lps;

   if (strcmp(workload_type, "scalatrace") == 0){
       if (params_sc.offset_file_name[0] == '\0'){
           tw_error(TW_LOC, "required argument for scalatrace offset_file");
           return;
       }
       strcpy(params_sc.offset_file_name, offset_file);
       strcpy(params_sc.nw_wrkld_file_name, workload_file);
       params = (char*)&params_sc;
   }
   else if (strcmp(workload_type, "dumpi") == 0){
       strcpy(params_d.file_name, workload_file);
       params_d.num_net_traces = num_net_traces;

       params = (char*)&params_d;
   }
  /* In this case, the LP will not generate any workload related events*/
   if(s->nw_id >= params_d.num_net_traces)
     {
	//printf("\n network LP not generating events %d ", (int)s->nw_id);
	return;
     }
1080
   wrkld_id = codes_workload_load("dumpi-trace-workload", params, 0, (int)s->nw_id);
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093

   s->arrival_queue = queue_init(); 
   s->pending_recvs_queue = queue_init();

   /* clock starts ticking */
   s->elapsed_time = tw_now(lp);
   codes_issue_next_event(lp);

   return;
}

void nw_test_event_handler(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
1094
	*(int *)bf = (int)0;
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
	switch(m->msg_type)
	{
		case MPI_SEND_POSTED:
			update_send_completion_queue(s, bf, m, lp);
		break;

		case MPI_SEND_ARRIVED:
			update_arrival_queue(s, bf, m, lp);
		break;

		case MPI_OP_GET_NEXT:
			get_next_mpi_operation(s, bf, m, lp);	
		break; 
	}
}

static void get_next_mpi_operation_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
1113 1114
	codes_workload_get_next_rc(wrkld_id, 0, (int)s->nw_id, m->op);
	if(m->op->op_type == CODES_WK_END)
1115
		return;
1116

1117
	switch(m->op->op_type)
1118
	{
1119 1120
		case CODES_WK_SEND:
		case CODES_WK_ISEND:
1121
		{
1122
			if(lp->gid == TRACE)
1123 1124 1125
				printf("\n %lf reverse send req %d ", tw_now(lp), (int)m->op->u.send.req_id);
			model_net_event_rc(net_id, lp, m->op->u.send.num_bytes);
			if(m->op->op_type == CODES_WK_ISEND)
1126 1127
				tw_rand_reverse_unif(lp->rng);	
			s->num_sends--;
1128
			num_bytes_sent -= m->op->u.send.num_bytes;
1129 1130
		}
		break;
1131 1132

		case CODES_WK_IRECV:
1133
		case CODES_WK_RECV:
1134
		{
1135
			codes_exec_mpi_recv_rc(s, m, lp);
1136 1137 1138
			s->num_recvs--;
		}
		break;
1139
		case CODES_WK_DELAY:
1140 1141 1142
		{
			tw_rand_reverse_unif(lp->rng);
			s->num_delays--;
1143
			s->compute_time -= s_to_ns(m->op->u.delay.seconds);
1144 1145
		}
		break;
1146 1147 1148 1149 1150 1151 1152 1153
		case CODES_WK_BCAST:
		case CODES_WK_ALLGATHER:
		case CODES_WK_ALLGATHERV:
		case CODES_WK_ALLTOALL:
		case CODES_WK_ALLTOALLV:
		case CODES_WK_REDUCE:
		case CODES_WK_ALLREDUCE:
		case CODES_WK_COL:
1154 1155 1156 1157 1158
		{
			s->num_cols--;
			tw_rand_reverse_unif(lp->rng);
		}
		break;
1159
	
1160
		case CODES_WK_WAIT:
1161
		{
1162 1163
			s->num_wait--;
			codes_exec_mpi_wait_rc(s, bf, m, lp);
1164 1165
		}
		break;
1166
		case CODES_WK_WAITALL:
1167 1168 1169 1170 1171
		{
			s->num_waitall--;
			codes_exec_mpi_wait_all_rc(s, bf, m, lp);
		}
		break;
1172 1173
		case CODES_WK_WAITSOME:
		case CODES_WK_WAITANY:
1174
		{
1175 1176
			s->num_waitsome--;
			tw_rand_reverse_unif(lp->rng);
1177 1178
		}
		break;
1179
		default:
1180
			printf("\n Invalid op type %d ", m->op->op_type);
1181 1182 1183 1184 1185
	}
}

static void get_next_mpi_operation(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
1186
		struct codes_workload_op mpi_op;
1187
    		codes_workload_get_next(wrkld_id, 0, (int)s->nw_id, &mpi_op);
1188 1189
		m->op = malloc(sizeof(struct codes_workload_op));
                memcpy(m->op, &mpi_op, sizeof(struct codes_workload_op));
1190

1191
    		if(mpi_op.op_type == CODES_WK_END)
1192 1193 1194 1195 1196
    	 	{
			return;
     		}
		switch(mpi_op.op_type)
		{
1197 1198
			case CODES_WK_SEND:
			case CODES_WK_ISEND:
1199 1200 1201 1202 1203
			 {
				s->num_sends++;
				codes_exec_mpi_send(s, m, lp);
			 }
			break;
1204
	
1205 1206
			case CODES_WK_RECV:
			case CODES_WK_IRECV:
1207 1208
			  {
				s->num_recvs++;
1209
				codes_exec_mpi_recv(s, m, lp);
1210 1211 1212
			  }
			break;

1213
			case CODES_WK_DELAY:
1214 1215 1216 1217 1218 1219
			  {
				s->num_delays++;
				codes_exec_comp_delay(s, m, lp);
			  }
			break;

1220 1221 1222 1223 1224 1225 1226 1227
			case CODES_WK_BCAST:
			case CODES_WK_ALLGATHER:
			case CODES_WK_ALLGATHERV:
			case CODES_WK_ALLTOALL:
			case CODES_WK_ALLTOALLV:
			case CODES_WK_REDUCE:
			case CODES_WK_ALLREDUCE:
			case CODES_WK_COL:
1228 1229
                        case CODES_WK_WAITSOME:
                        case CODES_WK_WAITANY:
1230 1231 1232 1233 1234
			  {
				s->num_cols++;
				codes_exec_mpi_col(s, m, lp);
			  }
			break;
1235
			case CODES_WK_WAIT:
1236 1237
			{
				s->num_wait++;
1238
				codes_exec_mpi_wait(s, bf, m, lp);	
1239 1240
			}
			break;
1241
			case CODES_WK_WAITALL:
1242 1243
			{
				s->num_waitall++;
1244
				codes_exec_mpi_wait_all(s, bf, m, lp);
1245 1246
			}
			break;
1247
			default:
1248
				printf("\n Invalid op type %d ", m->op->op_type);
1249 1250 1251 1252 1253 1254 1255 1256 1257
		}
}

void nw_test_finalize(nw_state* s, tw_lp* lp)
{
	if(s->nw_id < num_net_traces)
	{
		int count_irecv = numQueue(s->pending_recvs_queue);
        	int count_isend = numQueue(s->arrival_queue);
1258 1259
		printf("\n LP %ld unmatched irecvs %d unmatched sends %d Total sends %ld receives %ld collectives %ld delays %ld wait alls %ld waits %ld send time %lf wait %lf", 
			lp->gid, s->pending_recvs_queue->num_elems, s->arrival_queue->num_elems, s->num_sends, s->num_recvs, s->num_cols, s->num_delays, s->num_waitall, s->num_wait, s->send_time, s->wait_time);
1260 1261
		if(lp->gid == TRACE)
		{
1262
		   printQueue(lp->gid, s->pending_recvs_queue, "irecv ");
1263 1264
		  printQueue(lp->gid, s->arrival_queue, "isend");
	        }
1265 1266

		double total_time = tw_now(lp) - s->elapsed_time;
1267
		//assert(total_time >= s->compute_time);
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290

		if(total_time - s->compute_time > max_comm_time)
			max_comm_time = total_time - s->compute_time;
		
		if(total_time > max_time )
			max_time = total_time;

		if(s->wait_time > max_wait_time)
			max_wait_time = s->wait_time;

		if(s->send_time > max_send_time)
			max_send_time = s->send_time;

		if(s->recv_time > max_recv_time)
			max_recv_time = s->recv_time;

		avg_time += total_time;
		avg_comm_time += (total_time - s->compute_time);	
		avg_wait_time += s->wait_time;
		avg_send_time += s->send_time;
		 avg_recv_time += s->recv_time;

		//printf("\n LP %ld Time spent in communication %llu ", lp->gid, total_time - s->compute_time);
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
		free(s->arrival_queue);
		free(s->pending_recvs_queue);
	}
}

void nw_test_event_handler_rc(nw_state* s, tw_bf * bf, nw_message * m, tw_lp * lp)
{
	switch(m->msg_type)
	{
		case MPI_SEND_POSTED:
			update_send_completion_queue_rc(s, bf, m, lp);
		break;

		case MPI_SEND_ARRIVED:
			update_arrival_queue_rc(s, bf, m, lp);
		break;

		case MPI_OP_GET_NEXT:
			get_next_mpi_operation_rc(s, bf, m, lp);
		break;
	}
}

const tw_optdef app_opt [] =
{
	TWOPT_GROUP("Network workload test"),
    	TWOPT_CHAR("workload_type", workload_type, "workload type (either \"scalatrace\" or \"dumpi\")"),
	TWOPT_CHAR("workload_file", workload_file, "workload file name"),
	TWOPT_UINT("num_net_traces", num_net_traces, "number of network traces"),
	TWOPT_CHAR("offset_file", offset_file, "offset file name"),
	TWOPT_END()
};

tw_lptype nw_lp = {
1325 1326 1327 1328 1329 1330 1331
    (init_f) nw_test_init,
    (pre_run_f) NULL,
    (event_f) nw_test_event_handler,
    (revent_f) nw_test_event_handler_rc,
    (final_f) nw_test_finalize,
    (map_f) codes_mapping,
    sizeof(nw_state)
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
};

const tw_lptype* nw_get_lp_type()
{
            return(&nw_lp);
}

static void nw_add_lp_type()
{
  lp_type_register("nw-lp", nw_get_lp_type());
}

int main( int argc, char** argv )
{
  int rank, nprocs;
  int num_nets;
  int* net_ids;

  g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */

  workload_type[0]='\0';
  tw_opt_add(app_opt);
  tw_init(&argc, &argv);

  if(strlen(workload_file) == 0)
    {
	if(tw_ismaster())
		printf("\n Usage: mpirun -np n ./codes-nw-test --sync=1/2/3 --workload_type=type --workload_file=workload-file-name");
	tw_end();
	return -1;
    }

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

   configuration_load(argv[2], MPI_COMM_WORLD, &config);

   nw_add_lp_type();
   model_net_register();

   net_ids = model_net_configure(&num_nets);
   assert(num_nets == 1);
   net_id = *net_ids;
   free(net_ids);


   codes_mapping_setup();

   num_net_lps = codes_mapping_get_lp_count("MODELNET_GRP", 0, "nw-lp", NULL, 0);
   
   num_nw_lps = codes_mapping_get_lp_count("MODELNET_GRP", 1, 
			"nw-lp", NULL, 1);	
   tw_run();

1386 1387 1388 1389 1390 1391 1392
    long long total_bytes_sent, total_bytes_recvd;
    double max_run_time, avg_run_time;
   double max_comm_run_time, avg_comm_run_time;
    double total_avg_send_time, total_max_send_time;
     double total_avg_wait_time, total_max_wait_time;
     double total_avg_recv_time, total_max_recv_time;
	
1393 1394
    MPI_Reduce(&num_bytes_sent, &total_bytes_sent, 1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
    MPI_Reduce(&num_bytes_recvd, &total_bytes_recvd, 1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
   MPI_Reduce(&max_comm_time, &max_comm_run_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
   MPI_Reduce(&max_time, &max_run_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_time, &avg_run_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

   MPI_Reduce(&avg_recv_time, &total_avg_recv_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_comm_time, &avg_comm_run_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&max_wait_time, &total_max_wait_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);  
   MPI_Reduce(&max_send_time, &total_max_send_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);  
   MPI_Reduce(&max_recv_time, &total_max_recv_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);  
   MPI_Reduce(&avg_wait_time, &total_avg_wait_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_send_time, &total_avg_send_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

1407
   if(!g_tw_mynode)
1408 1409 1410 1411 1412 1413
	printf("\n Total bytes sent %lld recvd %lld \n max runtime %lf ns avg runtime %lf \n max comm time %lf avg comm time %lf \n max send time %lf avg send time %lf \n max recv time %lf avg recv time %lf \n max wait time %lf avg wait time %lf \n", total_bytes_sent, total_bytes_recvd, 
			max_run_time, avg_run_time/num_net_traces,
			max_comm_run_time, avg_comm_run_time/num_net_traces,
			total_max_send_time, total_avg_send_time/num_net_traces,
			total_max_recv_time, total_avg_recv_time/num_net_traces,
			total_max_wait_time, total_avg_wait_time/num_net_traces);
1414 1415 1416 1417
   tw_end();
  
  return 0;
}
Jonathan Jenkins's avatar
Jonathan Jenkins committed
1418 1419 1420 1421 1422 1423 1424 1425 1426

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ft=c ts=8 sts=4 sw=4 expandtab
 */