Commit b58ff105 authored by Francois Tessier's avatar Francois Tessier

Init data movements and set aggregation and target tiers

parent 1e71b62f
......@@ -9,6 +9,79 @@ Tapioca::~Tapioca ()
{
}
void Tapioca::Init (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t header, MPI_Comm comm)
{
int chunk;
#ifdef TIMING
double startInitTime, endInitTime, startElectTime, endElectTime;
startInitTime = MPI_Wtime();
#endif
this->SetDefaultValues ();
this->ParseEnvVariables ();
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
this->chunkCount_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
this->chunkSize_ = (int *)malloc (this->nChunks_ * sizeof(int));
this->chunkOffset_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkCount_, chunkCount, this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkSize_, chunkSize, this->nChunks_ * sizeof(int));
memcpy (this->chunkOffset_, chunkOffset, this->nChunks_ * sizeof(int64_t));
for ( chunk = 0; chunk < this->nChunks_; chunk++ )
this->rankDataSize_ += this->chunkCount_[chunk] * this->chunkSize_[chunk];
this->offsetInFile_ = header;
MPI_Comm_dup (comm, &this->subComm_);
this->SetCommValues ();
this->SetOffsets ();
#ifdef DBG
if (this->commRank_ == MASTER) {
fprintf (stdout, "[DEBUG] #Aggr = %d \n", this->nAggr_);
fprintf (stdout, "[DEBUG] bufferSize = %lld \n", this->bufferSize_);
fprintf (stdout, "[DEBUG] commDataSize = %lld \n", this->commDataSize_);
fprintf (stdout, "[DEBUG] strategy = %s \n", this->getStrategyName ());
}
#endif
this->SetNodesList ();
this->IdentifyMyAggregators ();
#ifdef TIMING
startElectTime = MPI_Wtime();
#endif
this->ElectAggregators ();
#ifdef TIMING
endElectTime = MPI_Wtime();
#endif
#ifdef TIMING
endInitTime = MPI_Wtime();
this->PrintTime(startInitTime, endInitTime, "Initialize");
this->PrintTime(startElectTime, endElectTime, " |-> Elect aggregators");
#endif
}
void Tapioca::setAggregationTier ( mem_t mem, char* fileName )
{
this->memBuffer0.memAlloc ( this->bufferSize_, mem, this->amAnAggr_, fileName, this->subComm_ );
this->memBuffer1.memAlloc ( this->bufferSize_, mem, this->amAnAggr_, fileName, this->subComm_ );
}
void Tapioca::setTargetTier ( mem_t mem, int64_t buffSize, char* fileName )
{
this->memTarget.memAlloc ( buffSize, mem, this->amAnAggr_, fileName, this->subComm_ );
}
void Tapioca::Finalize ()
{
......@@ -22,6 +95,7 @@ void Tapioca::Finalize ()
this->memBuffer0.memFree ();
this->memBuffer1.memFree ();
this->memTarget.memFree ();
MPI_Comm_free (&this->subComm_);
}
......@@ -409,22 +483,6 @@ int64_t Tapioca::DataSizeSentToAggr (int aggrId)
}
void Tapioca::InitAggregators ()
{
int aggr, retval;
this->memBuffer0.memAlloc ( this->bufferSize_, HBM, this->amAnAggr_, "", this->subComm_ );
this->memBuffer1.memAlloc ( this->bufferSize_, HBM, this->amAnAggr_, "", this->subComm_ );
#ifdef DBG
if (this->commRank_ == MASTER) {
fprintf (stdout, "[DEBUG] %d RMA windows created (%d aggr., %d buffers)\n",
NBUFFERS, this->nAggr_, NBUFFERS);
}
#endif
}
int Tapioca::CoordsToInt (int *coords, int dim)
{
int i, res = 0;
......
......@@ -51,16 +51,16 @@ class Tapioca
Tapioca ();
~Tapioca ();
void WriteInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm);
int Write (MPI_File fileHandle, MPI_Offset offset,
void *buf, int count, MPI_Datatype datatype,
void Init (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t header, MPI_Comm comm);
void setAggregationTier ( mem_t mem, char* fileName );
void setTargetTier ( mem_t mem, int64_t buffSize, char* fileName );
int Write ( MPI_Offset offset, void *buf, int count, MPI_Datatype datatype,
MPI_Status *status, int64_t bufOffset = 0);
void ReadInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm);
int Read (MPI_File fileHandle, MPI_Offset offset,
void *buf, int count, MPI_Datatype datatype,
int Read ( MPI_Offset offset, void *buf, int count, MPI_Datatype datatype,
MPI_Status *status, int64_t bufOffset = 0);
void Finalize ();
......@@ -89,9 +89,8 @@ class Tapioca
void IdentifyMyAggregators ();
void ElectAggregators ();
int64_t DataSizeSentToAggr (int aggrId);
void InitAggregators ();
void Push (MPI_File fileHandle, MPI_Request *request);
void Pull (MPI_File fileHandle, MPI_Request *request);
void Push ();
void Pull ();
void GlobalFence ();
/***********************/
......@@ -125,8 +124,6 @@ class Tapioca
int commRank_;
int commSize_;
char *filename_;
int64_t rankDataSize_;
int64_t commDataSize_;
......@@ -160,14 +157,13 @@ class Tapioca
Memory memBuffer0;
Memory memBuffer1;
//Memory memTarget;
Memory memTarget;
/* AGGREGATOR */
bool amAnAggr_;
int globalAggrRank_;
bool commSplit_;
MAPPING_STRATEGY strategy_;
MEMORY_LAYOUT layout_;
int64_t bufferSize_;
int64_t aggrDataSize_;
......@@ -175,8 +171,7 @@ class Tapioca
int writeCounter_;
bool writeDevNull_;
MPI_File devNullFileHandle_;
bool pipelinedBuffers_;
/* TIMING */
double startAggrTime, endAggrTime, totAggrTime;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment