Commit 427bb857 authored by Francois Tessier's avatar Francois Tessier

Add option to not reelect a set of aggregators after the first selection

parent 88130b03
......@@ -11,10 +11,10 @@ cd $HOME/TAPIOCA/examples/1D-Array
export TAPIOCA_DEVNULL=false
export TAPIOCA_COMMSPLIT=true
export TAPIOCA_STRATEGY=TOPOLOGY_AWARE
export TAPIOCA_NBAGGR=2
export TAPIOCA_NBAGGR=4
export TAPIOCA_BUFFERSIZE=4194304
export TAPIOCA_PIPELINING=true
export TAPIOCA_REELECTAGGR=true
export TAPIOCA_REELECTAGGR=false
function updateSettings()
{
......@@ -48,7 +48,7 @@ function outputFile ()
PARTICLES=50000
for RUN in 1
do
for AGGR in 2
for AGGR in 4
do
export TAPIOCA_NBAGGR=$AGGR
updateSettings
......
......@@ -4,6 +4,7 @@ Tapioca::Tapioca ()
{
this->reElectAggr_ = true;
this->electedAggr_ = false;
this->amAnAggr_ = false;
}
......@@ -59,8 +60,9 @@ void Tapioca::Init (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
startElectTime = MPI_Wtime();
#endif
if ( this->reElectAggr_ )
if ( this->reElectAggr_ ) {
this->ElectAggregators ();
}
else {
if ( !this->electedAggr_ ) {
this->ElectAggregators ();
......@@ -174,7 +176,6 @@ void Tapioca::SetDefaultValues ()
this->nAggr_ = 8;
this->bufferSize_ = 16777216;
this->nBuffers_ = 2;
this->amAnAggr_ = false;
this->commSplit_ = true;
this->currentRound_ = 0;
this->totalRounds_ = 0;
......@@ -383,20 +384,44 @@ void Tapioca::IdentifyMyAggregators ()
void Tapioca::KeepAggregators ()
{
int aggr, worldSize, color;
int aggr, aggrRank, aggrSize;
int64_t dataSize;
int *aggrSubCommRanks;
MPI_Comm aggrComm;
MPI_Status status;
MPI_Comm_size (MPI_COMM_WORLD, &worldSize);
aggrSubCommRanks = ( int * ) malloc ( this->nAggr_ * sizeof ( int ) );
MPI_Comm_split (this->subComm_, this->amAnAggr_, this->commRank_, &aggrComm);
MPI_Comm_rank ( aggrComm, &aggrRank );
MPI_Comm_size ( aggrComm, &aggrSize );
if ( this->amAnAggr_ ) {
MPI_Gather ( &this->commRank_, 1, MPI_INT, aggrSubCommRanks, 1, MPI_INT, 0, aggrComm );
if ( aggrRank == 0 ) {
fprintf ( stdout, "[DEBUG] Keep the previously selected aggregators : " );
for ( aggr = 0; aggr < this->nAggr_; aggr++ )
fprintf ( stdout, "%d ", aggrSubCommRanks[aggr] );
fprintf ( stdout, "\n" );
}
}
MPI_Barrier ( this->subComm_ );
if ( this->amAnAggr_ && aggrRank == 0 && this->commRank_ != 0 )
MPI_Send ( aggrSubCommRanks, this->nAggr_, MPI_INT, 0, 0, this->subComm_ );
if ( this->commRank_ == 0 )
MPI_Recv ( aggrSubCommRanks, this->nAggr_, MPI_INT, MPI_ANY_SOURCE, 0, this->subComm_, &status);
MPI_Bcast ( aggrSubCommRanks, this->nAggr_, MPI_INT, 0, this->subComm_ );
for ( aggr = 0; aggr < this->nAggr_; aggr++ ) {
dataSize = this->DataSizeSentToAggr (aggr);
MPI_Comm_split (this->subComm_, color > 0, this->commRank_, &aggrComm);
if ( dataSize > 0 )
MPI_Reduce ( &dataSize, &this->aggrDataSize_, 1, MPI_LONG_LONG, MPI_SUM, this->aggregatorsRanks[aggr], this->subComm_ );
if ( this->commRank_ == this->aggregatorsRanks[aggr] )
MPI_Reduce ( &dataSize, &this->aggrDataSize_, 1, MPI_LONG_LONG, MPI_SUM, aggrSubCommRanks[aggr], this->subComm_ );
if ( this->commRank_ == aggrSubCommRanks[aggr] )
this->totalWrites_ = ceil ( (double)this->aggrDataSize_ / (double)this->bufferSize_);
}
}
......@@ -413,6 +438,7 @@ void Tapioca::ElectAggregators ()
MPI_Group commGroup, aggrGroup;
int *ranks, *join, *groupRanks, groupSize, groupRank, joinGroup;
this->amAnAggr_ = false;
MPI_Comm_size (MPI_COMM_WORLD, &worldSize);
this->aggregatorsRanks.resize ( this->globalAggregatorsRanks.size() );
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment