Commit 9fd32834 authored by Francois Tessier's avatar Francois Tessier

Keep the selected aggregators between I/O transactions. Clear arrays.

parent c7dedd53
......@@ -129,6 +129,10 @@ void Tapioca::Finalize ()
free (this->chunkOffset_);
this->excludedNode.clear();
this->dataSize.clear();
this->globalAggregatorsRanks.clear();
this->roundsIds.clear();
this->commDataSize_ = 0;
for ( i = 0; i < this->nBuffers_; i++ ) {
......@@ -408,11 +412,14 @@ void Tapioca::KeepAggregators ()
MPI_Barrier ( this->subComm_ );
if ( this->amAnAggr_ && aggrRank == 0 && this->commRank_ != 0 )
MPI_Send ( aggrSubCommRanks, this->nAggr_, MPI_INT, 0, 0, this->subComm_ );
if ( this->commRank_ == 0 )
MPI_Recv ( aggrSubCommRanks, this->nAggr_, MPI_INT, MPI_ANY_SOURCE, 0, this->subComm_, &status);
if ( ! ( this->amAnAggr_ && aggrRank == 0 && this->commRank_ == 0 ) ) {
if ( this->amAnAggr_ && aggrRank == 0 && this->commRank_ != 0 )
MPI_Send ( aggrSubCommRanks, this->nAggr_, MPI_INT, 0, 0, this->subComm_ );
if ( this->commRank_ == 0 )
MPI_Recv ( aggrSubCommRanks, this->nAggr_, MPI_INT, MPI_ANY_SOURCE, 0, this->subComm_, &status);
}
MPI_Bcast ( aggrSubCommRanks, this->nAggr_, MPI_INT, 0, this->subComm_ );
......
......@@ -20,7 +20,7 @@ int Tapioca::Write (MPI_Offset offset, void *buf, int count, MPI_Datatype dataty
targetAggrIdx = (*this->chunksIndexMatching[c].begin());
if ( this->chunksIndexMatching[c].size() > 1 ) {
multipleRounds = true;
subChunkDataSize = this->dataSize[targetRoundIdx];
subChunkDataSize = this->dataSize[targetRoundIdx];
this->chunksIndexMatching[c].erase ( this->chunksIndexMatching[c].begin() );
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment