codes-nw-workload.h 4.24 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
/*
 * Copyright (C) 2013 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */

#ifndef CODES_NW_WORKLOAD_H
#define CODES_NW_WORKLOAD_H

#include "ross.h"

12
#define MAX_LENGTH 512
13
#define MAX_REQUESTS 128
14
15
16
17

/* struct to hold the actual data from a single MPI event*/
typedef struct mpi_event_list mpi_event_list;
typedef struct scala_trace_params scala_trace_params;
18
19
typedef struct dumpi_trace_params dumpi_trace_params;

20
21
struct scala_trace_params
{
22
23
   char offset_file_name[MAX_LENGTH];
   char nw_wrkld_file_name[MAX_LENGTH];
24
25
};

26
27
struct dumpi_trace_params
{
28
29
   int num_net_traces;
   char file_name[MAX_LENGTH];
30
31
};

32
33
34
enum NW_WORKLOADS
{
   SCALA_TRACE = 1,
35
36
37
#ifdef USE_DUMPI
   DUMPI,
#endif
38
39
   OTHERS, /* add the names of other workload generators here */
};
40

41
42
43
enum mpi_workload_type
{
    /* sleep/delay to simulate computation or other activity */
44
45
     CODES_NW_DELAY = 1,
    /* MPI wait all operation */
46
47
48
49
50
51
52
     CODES_NW_WAITALL,
    /* MPI Wait operation */
     CODES_NW_WAIT,
    /* MPI Waitsome operation */
     CODES_NW_WAITSOME,
    /* MPI Waitany operation */
     CODES_NW_WAITANY,
53
54
    /* terminator; there are no more operations for this rank */
     CODES_NW_END,
55
    /* MPI blocking send operation */
56
     CODES_NW_SEND,
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
    /* MPI blocking recv operation */
     CODES_NW_RECV,
    /* MPI non-blocking send operation */
     CODES_NW_ISEND,
    /* MPI non-blocking receive operation */
     CODES_NW_IRECV,
    /* MPI broadcast operation */
     CODES_NW_BCAST,
    /* MPI Allgather operation */
     CODES_NW_ALLGATHER,
     /* MPI Allgatherv operation */
     CODES_NW_ALLGATHERV,
    /* MPI Alltoall operation */
     CODES_NW_ALLTOALL,
    /* MPI Alltoallv operation */
     CODES_NW_ALLTOALLV,
    /* MPI Reduce operation */
     CODES_NW_REDUCE,
    /* MPI Allreduce operation */
     CODES_NW_ALLREDUCE,
77
    /* MPI test all operation */
78
     CODES_NW_TESTALL,
79
    /* MPI test operation */
80
     CODES_NW_TEST,
81
    /* Generic collective operation */
82
    CODES_NW_COL,
83
84
85
86
87
88
89
90
};

/* data structure for holding data from a MPI event (coming through scala-trace) 
*  can be a delay, isend, irecv or a collective call */
struct mpi_event_list
{
    /* what type of operation this is */
    enum mpi_workload_type op_type;
91
92
    double start_time;
    double end_time;
93
    double sim_start_time;
94
95
96
97
98
99

   /* parameters for each operation type */
    union
    {
  	struct
  	{
100
101
	   double nsecs;
	   double seconds;
102
103
104
105
106
  	} delay;
        struct
  	{
      	    int source_rank;/* source rank of MPI send message */
            int dest_rank; /* dest rank of MPI send message */
107
108
109
110
	    int num_bytes; /* number of bytes to be transferred over the network */
	    short data_type; /* MPI data type to be matched with the recv */
	    int count; /* number of elements to be received */
	    int tag; /* tag of the message */
111
	    int16_t req_id;
112
	} send;
113
114
115
116
       struct
       {
     	    int source_rank;/* source rank of MPI recv message */
     	    int dest_rank;/* dest rank of MPI recv message */
117
118
119
120
	    int num_bytes; /* number of bytes to be transferred over the network */
	    short data_type; /* MPI data type to be matched with the send */
	    int count; /* number of elements to be sent */
	    int tag; /* tag of the message */
121
       	    int16_t req_id;
122
	} recv; 
123
124
125
126
      struct
      {
	  int num_bytes;
      } collective;
127
      struct
128
129
      {
	int count;
130
131
        int16_t* req_ids; 
      } waits;
132
133
      struct
      {
134
135
	int16_t req_id;
      } wait;
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
    }u;
};


/* read in the metadata file about the MPI event information
   and populate the MPI events array */
int codes_nw_workload_load(const char* type_name, const char* params, int rank);

/* retrieves the next network operation to execute. the wkld_id is the 
   identifier returned by the init() function.  The op argument is a pointer
   to a structure to be filled in with network operation information */
void codes_nw_workload_get_next(int wkld_id, int rank, struct mpi_event_list *op); 

/* Reverse of the above function */
void codes_nw_workload_get_next_rc(int wkld_id, int rank, const struct mpi_event_list* op);

void codes_nw_workload_print_op(FILE* f, struct mpi_event_list* op, int rank);
#endif /* CODES_NW_WORKLOAD_H */

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ft=c ts=8 sts=4 sw=4 expandtab
 */