mpi_nrm.cpp 6.54 KB
Newer Older
1 2
/* Filename: mpi_nrm.cpp
 *
3
 * Description: This Message Passing Interface(MPI) libary allows
4
 * application of runtime policies for energy efficiency through the MPI
5
 * standard profiling interface(PMPI).
6
 *
7 8
 * The current implementation passes phase contextual information(compute and
 * barrier times) to the Argo Node Resource Manager(NRM). The NRM using this
9 10
 * contextual information invokes power policies to improve energy efficiency
 * of the node.
11 12 13 14
 * 
 * Note: Users can annotate phases to control transmission of application
 * context information. A phase can be skipped by setting NRM_SKIP to non-zero
 * value. 
15
 *
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Written by Sridutt Bhalachandra, sriduttb@anl.gov
 */

#include "mpi_nrm.h"

/************************
 * Signal Handler to handle SIGTERM, SIGINT and other calls if needed
 ***********************/
void handle_signal(int sig_num)
{
  printf("\nExiting MPI NRM Interface...\n");

  exit(sig_num);
}

/************************
 * Setup up the MPI NRM Interface
 ***********************/
34
extern "C" void MPI_nrm_init()
35 36 37 38 39 40 41 42
{
  signal(SIGTERM, handle_signal);
  signal(SIGINT, handle_signal);

  cpu = sched_getcpu();

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

43
  if(getenv("NRM_TRANSMIT"))
44
  {
45
    _transmit = atoi(getenv("NRM_TRANSMIT"));
46
  }
47 48

  if(getenv("NRM_DAMPER"))
49
  {
50
    _damper = atof(getenv("NRM_DAMPER"));
51 52 53
  }

  // Initialize context to communicate with Argo Node Resource Manager(NRM)
54
  // TODO: Change hard coded application uuid
55 56 57 58 59
  nrm_init(&ctxt, "mpi_nrm");

  return;
}

60
extern "C" void MPI_nrm_fini()
61 62 63 64 65
{
  // Cleanup NRM context
  nrm_fini(&ctxt);
}

66 67 68 69 70
/************************
 * Prints the transmission statistics for an application
 ***********************/
extern "C" void MPI_nrm_print_stats(void)
{
71 72
  printf("Stats: CPU %u Damper %lf DamperAggreations %u PhaseSkips %u\n", cpu,
      _damper,_damperAggregationCount, _phaseSkipCount);
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
  return;
}

/************************
 * Send appropriate phase context to NRM 
 ***********************/
extern "C" void transmit_to_nrm(int cpu, uint64_t *startCompute, uint64_t
    endCompute, uint64_t startBarrier, uint64_t endBarrier)
{
  uint64_t computeTime, barrierTime, totalPhaseTime;

  // Time spent in computation, barrier and total
  computeTime = endCompute - *startCompute;
  barrierTime = endBarrier - startBarrier;
  totalPhaseTime = computeTime + barrierTime;

  // Aggregate phases smaller than the damper value set
  if(totalPhaseTime < _damper)
  {
    // Keep track if the value being transmitted in the future is an
    // aggregation of smaller phases
    _aggregation++;
    _damperAggregationCount++;
    return;
  }

99 100 101 102 103 104 105 106 107 108 109 110
  // Check if user wants to skip sending context to NRM in the current phase
  if(getenv("NRM_SKIP"))
  {
    // Reset phase start time
    *startCompute = return_current_time();
    _phaseSkipCount++;

    // Reset environment variable
    setenv("NRM_SKIP", "0", 1);
    return;
  }
  
111 112 113 114 115 116 117 118 119 120
  // Send context to NRM
  nrm_send_phase_context(&ctxt, cpu, _aggregation, computeTime, totalPhaseTime);

  // Reset
  _aggregation = 0;
  *startCompute = return_current_time();
  
  return;
}

121 122 123 124
int MPI_Init(int *argc, char ***argv)
{
  startCompute = return_current_time();
  int ret_value = PMPI_Init(argc, argv);
125
  MPI_nrm_init();
126 127 128 129 130 131

  return ret_value;
}

int MPI_Finalize(void)
{
132
  MPI_nrm_fini();
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

  return PMPI_Finalize();
}

int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest,
    int tag, MPI_Comm comm)
{
  int ret_value = PMPI_Send(buf, count, datatype, dest, tag, comm);

  return ret_value;
}

int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
    MPI_Comm comm, MPI_Status * status)
{
  int ret_value = PMPI_Recv(buf, count, datatype, source, tag, comm, status);

  return ret_value;
}

int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest,
     int tag, MPI_Comm comm, MPI_Request * request)
{
  int ret_value = PMPI_Isend(buf, count, datatype, dest, tag, comm, request);

  return ret_value;
}

int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
     MPI_Comm comm, MPI_Request * request)
{
  int ret_value = PMPI_Irecv(buf, count, datatype, source, tag, comm, request);

  return ret_value;
}

int MPI_Barrier(MPI_Comm comm)
{
  endCompute = return_current_time();
  uint64_t startBarrier = endCompute;

  int ret_value = PMPI_Barrier(comm);

  uint64_t endBarrier = return_current_time();

178
  if(_transmit)
179
  {
180
    transmit_to_nrm(cpu, &startCompute, endCompute, startBarrier, endBarrier);
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
  }

  return ret_value;
}

int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root,
     MPI_Comm comm)
{
  int ret_value = PMPI_Bcast(buffer, count, datatype, root, comm);

  return ret_value;
}

int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
      void *recvbuf, int recvcount, MPI_Datatype recvtype, int root,
      MPI_Comm comm)
{
  int ret_value = PMPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
      recvtype, root, comm);

  return ret_value;
}

int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
       void *recvbuf, const int *recvcounts, const int *displs,
       MPI_Datatype recvtype, int root, MPI_Comm comm)
{
  int ret_value = PMPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf,
      recvcounts, displs, recvtype, root, comm);

  return ret_value;
}

int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
         void *recvbuf, int recvcount, MPI_Datatype recvtype,
         MPI_Comm comm)
{
  int ret_value = PMPI_Allgather(sendbuf, sendcount, sendtype, recvbuf,
      recvcount, recvtype, comm);

  return ret_value;
}

int MPI_Reduce(const void *sendbuf, void *recvbuf, int count,
      MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
{
  int ret_value = PMPI_Reduce(sendbuf, recvbuf, count, datatype, op, root,
      comm);

  return ret_value;
}

int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count,
         MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
  endCompute = return_current_time();
  uint64_t startBarrier = endCompute;

  int ret_value = PMPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm);

  uint64_t endBarrier = return_current_time();

243
  if(_transmit)
244
  {
245
    transmit_to_nrm(cpu, &startCompute, endCompute, startBarrier, endBarrier);
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
  }

  return ret_value;
}

int MPI_Wait(MPI_Request * request, MPI_Status * status)
{
  int ret_value = PMPI_Wait(request, status);

  return ret_value;
}

int MPI_Waitall(int count, MPI_Request array_of_requests[],
       MPI_Status array_of_statuses[])
{
  int ret_value = PMPI_Waitall(count, array_of_requests, array_of_statuses);

  return ret_value;
}