Commit 5d4f74ee authored by Francois Tessier's avatar Francois Tessier

Add miniHACC example

parent 6193271f
*.[oa]
*~
MPICXX = mpicxx MPICXX = mpicxx
MPI_CFLAGS = -g -O3 -I./topology/ MPI_CFLAGS = -g -O3 -I./topology/
MPI_CFLAGS += -DBGQ MPI_CFLAGS += -DBGQ #-DDEBUG -DTIMING
MPI_CFLAGS += -I/bgsys/drivers/ppcfloor -I/bgsys/drivers/ppcfloor/spi/include/kernel/cnk MPI_CFLAGS += -I/bgsys/drivers/ppcfloor -I/bgsys/drivers/ppcfloor/spi/include/kernel/cnk
INSTALL_PATH = $(HOME)/install_bgq INSTALL_PATH = $(HOME)/install_bgq
......
----------------
Run on BG/Q
----------------
qsub -n 256 --mode c16 -t 10 -A Performance -M ftessier@anl.gov ./miniHACC-AoS
qsub -n 256 --mode c16 -t 10 -A Performance -M ftessier@anl.gov ./miniHACC-SoA
runjob --block $COBALT_PARTNAME --envs PAMID_VERBOSE=1 BG_SHAREDMEMSIZE=64 PAMID_COLLECTIVES=1 BGLOCKLESSMPIO_F_TYPE=0x47504653 -p 16 --np 8192 : ./miniHACC-SoA
runjob --block $COBALT_PARTNAME --envs PAMID_VERBOSE=1 BG_SHAREDMEMSIZE=64 PAMID_COLLECTIVES=1 BGLOCKLESSMPIO_F_TYPE=0x47504653 -p 16 --np 8192 : ./miniHACC-AoS
MPICC = mpicxx
MPI_CFLAGS = -g -O3 -Wall -I/bgsys/drivers/ppcfloor -I/bgsys/drivers/ppcfloor/spi/include/kernel/cnk -I${HOME}/install_bgq/include/ -DBGQ
MPI_LDFLAGS = -lstdc++ -lpthread -lm -L${HOME}/install_bgq/lib/ -ltapioca
all: miniHACC-AoS miniHACC-SoA
miniHACC-AoS:miniHACC-AoS.o
$(MPICC) $(MPI_CFLAGS) miniHACC-AoS.o -o $@ $(MPI_LDFLAGS)
miniHACC-AoS.o:miniHACC-AoS.cpp
$(MPICC) $(MPI_CFLAGS) -c miniHACC-AoS.cpp
miniHACC-SoA:miniHACC-SoA.o
$(MPICC) $(MPI_CFLAGS) $(MPI_LDFLAGS) miniHACC-SoA.o -o $@
miniHACC-SoA.o:miniHACC-SoA.cpp
$(MPICC) $(MPI_CFLAGS) -c miniHACC-SoA.cpp
clean:
rm miniHACC-AoS miniHACC-SoA *.o HACC.dat
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdint.h>
#include <mpi.h>
#include <mpix.h>
#include "tapioca.hpp"
#define RED "\x1b[31m"
#define GREEN "\x1b[32m"
#define BLUE "\x1b[34m"
#define RESET "\x1b[0m"
int main (int argc, char * argv[])
{
int world_numtasks, world_myrank, mycolor, mykey, sub_numtasks, sub_myrank, i;
int64_t num_particles = 25000;
int64_t sub_particles, tot_particles, particle_size, file_size, tot_size;
int64_t scan_size = 0, offset, hdr = 0;
double start_time, end_time, tot_time, max_time;
double io_bw;
MPI_Comm sub_comm;
MPI_File file_handle;
MPI_Status status;
char output[100];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &world_numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &world_myrank);
mycolor = MPIX_IO_link_id ();
mykey = world_myrank;
MPI_Comm_split (MPI_COMM_WORLD, mycolor, mykey, &sub_comm);
MPI_Comm_size(sub_comm, &sub_numtasks);
MPI_Comm_rank(sub_comm, &sub_myrank);
snprintf (output, 100, "/projects/visualization/ftessier/debug/HACC-AOS-%08d.dat", mycolor);
/*****************/
/* WRITE */
/*****************/
float *xx, *yy, *zz, *vx, *vy, *vz, *phi;
int64_t* pid;
uint16_t* mask;
xx = new float[num_particles];
yy = new float[num_particles];
zz = new float[num_particles];
vx = new float[num_particles];
vy = new float[num_particles];
vz = new float[num_particles];
phi = new float[num_particles];
pid = new int64_t[num_particles];
mask = new uint16_t[num_particles];
for (uint64_t i = 0; i< num_particles; i++)
{
xx[i] = (float)i;
yy[i] = (float)i;
zz[i] = (float)i;
vx[i] = (float)i;
vy[i] = (float)i;
vz[i] = (float)i;
phi[i] = (float)i;
pid[i] = (int64_t)i;
mask[i] = (uint16_t)world_myrank;
}
MPI_Allreduce(&num_particles, &sub_particles, 1, MPI_LONG_LONG, MPI_SUM, sub_comm);
MPI_Allreduce(&num_particles, &tot_particles, 1, MPI_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
particle_size = (7 * sizeof(float)) + sizeof(int64_t) + sizeof(uint16_t);
file_size = particle_size * sub_particles;
tot_size = particle_size * tot_particles;
if (sub_myrank == 0) {
MPI_File_open(MPI_COMM_SELF, output,
MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &file_handle);
MPI_File_set_size(file_handle, file_size);
MPI_File_close (&file_handle);
}
MPI_Exscan (&num_particles, &scan_size, 1, MPI_LONG_LONG, MPI_SUM, sub_comm);
if (0 == sub_myrank) {
fprintf (stdout, GREEN "[INFO]" RESET " [%08d] Write output file (AoS data layout)\n", mycolor);
fprintf (stdout, GREEN "[INFO]" RESET " [%08d] --> %lld particles per rank\n", mycolor, num_particles);
fprintf (stdout, GREEN "[INFO]" RESET " [%08d] --> File size: %.2f MB (%lld particles)\n",
mycolor, (double)file_size/(1024*1024), sub_particles);
}
/*****************/
/* INIT TAPIOCA */
/*****************/
Tapioca tp;
int64_t chunkCount[9], chunkOffset[9];
int chunkSize[9];
for ( i = 0; i < 9; i++ ) {
chunkCount[i] = num_particles;
}
chunkSize[0] = sizeof(float);
chunkSize[1] = sizeof(float);
chunkSize[2] = sizeof(float);
chunkSize[3] = sizeof(float);
chunkSize[4] = sizeof(float);
chunkSize[5] = sizeof(float);
chunkSize[6] = sizeof(float);
chunkSize[7] = sizeof(int64_t);
chunkSize[8] = sizeof(uint16_t);
chunkOffset[0] = hdr + scan_size * particle_size;
for ( i = 1; i < 9; i++ ) {
chunkOffset[i] = chunkOffset[i - 1] + chunkCount[i - 1] * chunkSize[i - 1];
}
tp.Initialize (chunkCount, chunkSize, chunkOffset, 9, hdr, ARRAY_OF_STRUCTURES, sub_comm);
/*****************/
start_time = MPI_Wtime();
MPI_File_open(sub_comm, output,
MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handle);
offset = scan_size * particle_size;
tp.Commit (file_handle, offset, xx, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, yy, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, zz, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, vx, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, vy, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, vz, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, phi, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
tp.Commit (file_handle, offset, pid, num_particles, MPI_LONG_LONG, &status);
offset += num_particles * sizeof(int64_t);
tp.Commit (file_handle, offset, mask, num_particles, MPI_UNSIGNED_SHORT, &status);
MPI_File_close (&file_handle);
end_time = MPI_Wtime();
tot_time = end_time - start_time;
MPI_Reduce (&tot_time, &max_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if (0 == world_myrank) {
io_bw = (double)tot_size / max_time / (1024 * 1024);
fprintf (stdout, BLUE "[TIMING]" RESET " Write I/O bandwidth: %.2f MBps (%.2f MB in %.2f ms)\n",
io_bw, (double)tot_size/(1024*1024), max_time * 1000);
}
MPI_Barrier (MPI_COMM_WORLD);
/*****************/
/* READ */
/*****************/
float *xx_r, *yy_r, *zz_r, *vx_r, *vy_r, *vz_r, *phi_r;
int64_t* pid_r;
uint16_t* mask_r;
xx_r = new float[num_particles];
yy_r = new float[num_particles];
zz_r = new float[num_particles];
vx_r = new float[num_particles];
vy_r = new float[num_particles];
vz_r = new float[num_particles];
phi_r = new float[num_particles];
pid_r = new int64_t[num_particles];
mask_r = new uint16_t[num_particles];
start_time = MPI_Wtime();
MPI_File_open(sub_comm, output,
MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handle);
if (0 == sub_myrank)
fprintf (stdout, GREEN "[INFO]" RESET " [%08d] Read output file\n", mycolor);
offset = scan_size * particle_size;
MPI_File_read_at_all (file_handle, offset, xx_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, yy_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, zz_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, vx_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, vy_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, vz_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, phi_r, num_particles, MPI_FLOAT, &status);
offset += num_particles * sizeof(float);
MPI_File_read_at_all (file_handle, offset, pid_r, num_particles, MPI_LONG_LONG, &status);
offset += num_particles * sizeof(int64_t);
MPI_File_read_at_all (file_handle, offset, mask_r, num_particles, MPI_UNSIGNED_SHORT, &status);
MPI_File_close (&file_handle);
end_time = MPI_Wtime();
tot_time = end_time - start_time;
MPI_Reduce (&tot_time, &max_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if (0 == world_myrank) {
io_bw = (double)tot_size / max_time / (1024 * 1024);
fprintf (stdout, BLUE "[TIMING]" RESET " Read I/O bandwidth: %.2f MBps (%.2f MB in %.2f ms)\n",
io_bw, (double)tot_size/(1024*1024), max_time * 1000);
}
/*****************/
/* VERIFICATION */
/*****************/
for (uint64_t i = 0; i< num_particles; i++) {
if ((xx[i] != xx_r[i]) || (yy[i] != yy_r[i]) || (zz[i] != zz_r[i])
|| (vx[i] != vx_r[i]) || (vy[i] != vy_r[i]) || (vz[i] != vz_r[i])
|| (phi[i] != phi_r[i])|| (pid[i] != pid_r[i]) || (mask[i] != mask_r[i]))
{
fprintf (stdout, RED "[ERROR]" RESET " Wrong value for particle %d\n", i);
MPI_Abort (MPI_COMM_WORLD, -1);
}
}
if (0 == sub_myrank)
fprintf (stdout, GREEN "[INFO]" RESET " [%08d] Content verified and consistent\n", mycolor);
/*****************/
/* FREE */
/*****************/
delete [] xx;
delete [] xx_r;
delete [] yy;
delete [] yy_r;
delete [] zz;
delete [] zz_r;
delete [] vx;
delete [] vx_r;
delete [] vy;
delete [] vy_r;
delete [] vz;
delete [] vz_r;
delete [] phi;
delete [] phi_r;
delete [] pid;
delete [] pid_r;
delete [] mask;
delete [] mask_r;
MPI_Finalize ();
}
#include "tapioca.hpp" #include "tapioca.hpp"
Aggregation::Aggregation () Tapioca::Tapioca ()
{ {
this->SetDefaultValues (); this->SetDefaultValues ();
this->ParseEnvVariables (); this->ParseEnvVariables ();
} }
Aggregation::~Aggregation () Tapioca::~Tapioca ()
{ {
} }
void Aggregation::Initialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset, void Tapioca::Initialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm) int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{ {
int chunk; int chunk;
...@@ -79,7 +79,7 @@ void Aggregation::Initialize (int64_t *chunkCount, int *chunkSize, int64_t *chun ...@@ -79,7 +79,7 @@ void Aggregation::Initialize (int64_t *chunkCount, int *chunkSize, int64_t *chun
} }
void Aggregation::Finalize () void Tapioca::Finalize ()
{ {
...@@ -91,7 +91,7 @@ void Aggregation::Finalize () ...@@ -91,7 +91,7 @@ void Aggregation::Finalize ()
} }
int Aggregation::Commit (MPI_File fileHandle, MPI_Offset offset, void *buf, int Tapioca::Commit (MPI_File fileHandle, MPI_Offset offset, void *buf,
int count, MPI_Datatype datatype, MPI_Status *status, int64_t bufOffset) int count, MPI_Datatype datatype, MPI_Status *status, int64_t bufOffset)
{ {
int retval, i, c, targetRoundIdx, targetAggrIdx, targetGlobAggr; int retval, i, c, targetRoundIdx, targetAggrIdx, targetGlobAggr;
...@@ -198,7 +198,7 @@ int Aggregation::Commit (MPI_File fileHandle, MPI_Offset offset, void *buf, ...@@ -198,7 +198,7 @@ int Aggregation::Commit (MPI_File fileHandle, MPI_Offset offset, void *buf,
} }
void Aggregation::Push (MPI_File fileHandle, MPI_Status *status) void Tapioca::Push (MPI_File fileHandle, MPI_Status *status)
{ {
int64_t offset, dataSize; int64_t offset, dataSize;
int win, buffer; int win, buffer;
...@@ -243,7 +243,7 @@ void Aggregation::Push (MPI_File fileHandle, MPI_Status *status) ...@@ -243,7 +243,7 @@ void Aggregation::Push (MPI_File fileHandle, MPI_Status *status)
} }
} }
void Aggregation::iPush (MPI_File fileHandle, MPI_Request *request) void Tapioca::iPush (MPI_File fileHandle, MPI_Request *request)
{ {
int64_t offset, dataSize; int64_t offset, dataSize;
int win, buffer; int win, buffer;
...@@ -289,7 +289,7 @@ void Aggregation::iPush (MPI_File fileHandle, MPI_Request *request) ...@@ -289,7 +289,7 @@ void Aggregation::iPush (MPI_File fileHandle, MPI_Request *request)
} }
void Aggregation::GlobalFence () void Tapioca::GlobalFence ()
{ {
int buffer; int buffer;
...@@ -323,7 +323,7 @@ void Aggregation::GlobalFence () ...@@ -323,7 +323,7 @@ void Aggregation::GlobalFence ()
/***********************/ /***********************/
/* INITIALIZATION */ /* INITIALIZATION */
/***********************/ /***********************/
void Aggregation::SetDefaultValues () void Tapioca::SetDefaultValues ()
{ {
this->rankDataSize_ = 0; this->rankDataSize_ = 0;
this->strategy_ = SHORTEST_PATH; this->strategy_ = SHORTEST_PATH;
...@@ -339,7 +339,7 @@ void Aggregation::SetDefaultValues () ...@@ -339,7 +339,7 @@ void Aggregation::SetDefaultValues ()
} }
void Aggregation::ParseEnvVariables () void Tapioca::ParseEnvVariables ()
{ {
char *envStrategy = getenv("AGGR_STRATEGY"); char *envStrategy = getenv("AGGR_STRATEGY");
char *envNAggr = getenv("AGGR_NBAGGR"); char *envNAggr = getenv("AGGR_NBAGGR");
...@@ -375,7 +375,7 @@ void Aggregation::ParseEnvVariables () ...@@ -375,7 +375,7 @@ void Aggregation::ParseEnvVariables ()
} }
void Aggregation::SetCommValues () void Tapioca::SetCommValues ()
{ {
MPI_Comm_rank (this->subComm_, &this->commRank_); MPI_Comm_rank (this->subComm_, &this->commRank_);
MPI_Comm_size (this->subComm_, &this->commSize_); MPI_Comm_size (this->subComm_, &this->commSize_);
...@@ -385,7 +385,7 @@ void Aggregation::SetCommValues () ...@@ -385,7 +385,7 @@ void Aggregation::SetCommValues ()
} }
void Aggregation::SetOffsets () void Tapioca::SetOffsets ()
{ {
MPI_Exscan (&this->rankDataSize_, &this->offsetInAggrData_, 1, MPI_Exscan (&this->rankDataSize_, &this->offsetInAggrData_, 1,
MPI_LONG_LONG, MPI_SUM, this->subComm_); MPI_LONG_LONG, MPI_SUM, this->subComm_);
...@@ -394,7 +394,7 @@ void Aggregation::SetOffsets () ...@@ -394,7 +394,7 @@ void Aggregation::SetOffsets ()
} }
void Aggregation::SetNodesList () void Tapioca::SetNodesList ()
{ {
int *coords, *myCoords, i, worldSize, dimensions; int *coords, *myCoords, i, worldSize, dimensions;
...@@ -417,13 +417,13 @@ void Aggregation::SetNodesList () ...@@ -417,13 +417,13 @@ void Aggregation::SetNodesList ()
/***********************/ /***********************/
/* AGGREGATION */ /* AGGREGATION */
/***********************/ /***********************/
int Aggregation::NumberOfAggregators () int Tapioca::NumberOfAggregators ()
{ {
return 0; return 0;
} }
void Aggregation::IdentifyMyAggregators () void Tapioca::IdentifyMyAggregators ()
{ {
int i, j, c, globalRoundId, upperBound, index = 0; int i, j, c, globalRoundId, upperBound, index = 0;
int64_t remainingData, offsetInAggrData; int64_t remainingData, offsetInAggrData;
...@@ -494,7 +494,7 @@ void Aggregation::IdentifyMyAggregators () ...@@ -494,7 +494,7 @@ void Aggregation::IdentifyMyAggregators ()
} }
void Aggregation::ElectAggregators () void Tapioca::ElectAggregators ()
{ {
int aggr, aggrRank, rankAggrComm, sizeAggrComm, aggrRankAggrComm, i, j, aggrCoords, worldSize; int aggr, aggrRank, rankAggrComm, sizeAggrComm, aggrRankAggrComm, i, j, aggrCoords, worldSize;
int64_t color; int64_t color;
...@@ -614,11 +614,13 @@ void Aggregation::ElectAggregators () ...@@ -614,11 +614,13 @@ void Aggregation::ElectAggregators ()
} }
} }
#ifdef TIMING
this->PrintTime( 0, totTime, " |-> Create subcommunicator"); this->PrintTime( 0, totTime, " |-> Create subcommunicator");
#endif
} }
int64_t Aggregation::DataSizeSentToAggr (int aggrId) int64_t Tapioca::DataSizeSentToAggr (int aggrId)
{ {
int i; int i;
int64_t dataSize = 0; int64_t dataSize = 0;
...@@ -631,7 +633,7 @@ int64_t Aggregation::DataSizeSentToAggr (int aggrId) ...@@ -631,7 +633,7 @@ int64_t Aggregation::DataSizeSentToAggr (int aggrId)
} }
void Aggregation::InitAggregators () void Tapioca::InitAggregators ()
{ {
int aggr, retval; int aggr, retval;
...@@ -668,7 +670,7 @@ void Aggregation::InitAggregators () ...@@ -668,7 +670,7 @@ void Aggregation::InitAggregators ()
/***********************/ /***********************/
/* PLACEMENT */ /* PLACEMENT */
/***********************/ /***********************/
int Aggregation::RankShortestPath (MPI_Comm aggrComm, int64_t dataSize) int Tapioca::RankShortestPath (MPI_Comm aggrComm, int64_t dataSize)
{ {
int commRank, aggrRank, aggrPrank, ppn, nodeId; int commRank, aggrRank, aggrPrank, ppn, nodeId;
struct { int hops; int rank; } hopsToIONnode, shortestPath; struct { int hops; int rank; } hopsToIONnode, shortestPath;
...@@ -701,7 +703,7 @@ int Aggregation::RankShortestPath (MPI_Comm aggrComm, int64_t dataSize) ...@@ -701,7 +703,7 @@ int Aggregation::RankShortestPath (MPI_Comm aggrComm, int64_t dataSize)
} }
int Aggregation::RankLongestPath (MPI_Comm aggrComm, int64_t dataSize) int Tapioca::RankLongestPath (MPI_Comm aggrComm, int64_t dataSize)
{ {
int commRank, aggrRank; int commRank, aggrRank;
struct { int hops; int rank; } hopsToIONnode, longestPath; struct { int hops; int rank; } hopsToIONnode, longestPath;
...@@ -734,7 +736,7 @@ int Aggregation::RankLongestPath (MPI_Comm aggrComm, int64_t dataSize) ...@@ -734,7 +736,7 @@ int Aggregation::RankLongestPath (MPI_Comm aggrComm, int64_t dataSize)
} }
int Aggregation::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize) int Tapioca::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize)
{ {
struct { double cost; int rank; } aggrCost, minCost; struct { double cost; int rank; } aggrCost, minCost;
int aggrCommRank, aggrCommSize, worldRank, rank, distance, dim, hops, aggrRank; int aggrCommRank, aggrCommSize, worldRank, rank, distance, dim, hops, aggrRank;
...@@ -789,7 +791,7 @@ int Aggregation::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize) ...@@ -789,7 +791,7 @@ int Aggregation::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize)
} }
int Aggregation::RankUniformDistribution (MPI_Comm aggrComm, int64_t dataSize) int Tapioca::RankUniformDistribution (MPI_Comm aggrComm, int64_t dataSize)
{ {
int aggrRank, aggrCommRank, rootRank = 0, rootCoords; int aggrRank, aggrCommRank, rootRank = 0, rootCoords;
...@@ -821,7 +823,7 @@ int Aggregation::RankUniformDistribution (MPI_Comm aggrComm, int64_t dataSize) ...@@ -821,7 +823,7 @@ int Aggregation::RankUniformDistribution (MPI_Comm aggrComm, int64_t dataSize)
} }
int Aggregation::RankContentionAware (MPI_Comm aggrComm, int64_t dataSize) int Tapioca::RankContentionAware (MPI_Comm aggrComm, int64_t dataSize)
{ {
struct { double cost; int rank; } aggrCost, minCost; struct { double cost; int rank; } aggrCost, minCost;
int aggrCommRank, aggrCommSize, worldRank, rank, distance, interRanks; int aggrCommRank, aggrCommSize, worldRank, rank, distance, interRanks;
...@@ -947,7 +949,7 @@ int Aggregation::RankContentionAware (MPI_Comm aggrComm, int64_t dataSize) ...@@ -947,7 +949,7 @@ int Aggregation::RankContentionAware (MPI_Comm aggrComm, int64_t dataSize)
} }
int Aggregation::CoordsToInt (int *coords, int dim) int Tapioca::CoordsToInt (int *coords, int dim)
{ {
int i, res = 0; int i, res = 0;
...@@ -960,7 +962,7 @@ int Aggregation::CoordsToInt (int *coords, int dim) ...@@ -960,7 +962,7 @@ int Aggregation::CoordsToInt (int *coords, int dim)
/***********************/ /***********************/
/* MISC. */ /* MISC. */
/***********************/ /***********************/
const char* Aggregation::getStrategyName () const char* Tapioca::getStrategyName ()
{ {
switch (this->strategy_) switch (this->strategy_)
{ {
...@@ -974,7 +976,7 @@ const char* Aggregation::getStrategyName () ...@@ -974,7 +976,7 @@ const char* Aggregation::getStrategyName ()
} }
void Aggregation::HandleMPIError (int retval) void Tapioca::HandleMPIError (int retval)
{ {
#ifdef DEBUG #ifdef DEBUG
char msg[MPI_MAX_ERROR_STRING]; char msg[MPI_MAX_ERROR_STRING];
...@@ -989,7 +991,7 @@ void Aggregation::HandleMPIError (int retval) ...@@ -989,7 +991,7 @@ void Aggregation::HandleMPIError (int retval)
} }
void Aggregation::PrintTime ( double startTime, double endTime, char* func ) void Tapioca::PrintTime ( double startTime, double endTime, char* func )
{ {
double totTime, avgTime, minTime, maxTime; double totTime, avgTime, minTime, maxTime;
int commSize, commRank; int commSize, commRank;
...@@ -1010,7 +1012,7 @@ void Aggregation::PrintTime ( double startTime, double endTime, char* func ) ...@@ -1010,7 +1012,7 @@ void Aggregation::PrintTime ( double startTime, double endTime, char* func )
} }
void Aggregation::MPIIOInfo ( MPI_File fileHandle ) void Tapioca::MPIIOInfo ( MPI_File fileHandle )
{ {
MPI_Info info; MPI_Info info;
int flag; int flag;
......
...@@ -6,9 +6,6 @@ ...@@ -6,9 +6,6 @@
#define BANDWIDTH 1800000 #define BANDWIDTH 1800000
#define NBUFFERS 2 #define NBUFFERS 2
#define DEBUG 1
#define TIMING 1
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <math.h> #include <math.h>
...@@ -47,11 +44,11 @@ struct Round ...@@ -47,11 +44,11 @@ struct Round
}; };
class Aggregation class Tapioca
{ {
public: public:
Aggregation (); Tapioca ();
~Aggregation (); ~Tapioca ();
void Initialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset, void Initialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm); int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm);
......
...@@ -12,7 +12,7 @@ class iTopology { ...@@ -12,7 +12,7 @@ class iTopology {
virtual int ProcessPerNode () = 0; virtual int ProcessPerNode () = 0;
virtual int IONodeId () = 0; virtual int IONodeId () = 0;
virtual int BridgeNodeId () = 0; virtual int BridgeNodeId () = 0;
// ComputeNodeId
/**********************/ /**********************/
/* |-- Network */ /* |-- Network */
...@@ -21,9 +21,13 @@ class iTopology { ...@@ -21,9 +21,13 @@ class iTopology {
/* |---- Coordinates */ /* |---- Coordinates */
virtual void RankToCoordinates ( int rank, int* coord ) = 0; virtual void RankToCoordinates ( int rank, int* coord ) = 0;
// Number of IO nodes