Commit 211d5999 authored by Francois Tessier's avatar Francois Tessier

Use the ComputeNodeId instead of an Id based on coordinates

parent 6023a1ec
......@@ -10,7 +10,7 @@ int Tapioca::RankShortestPath (MPI_Comm aggrComm, int64_t dataSize)
hopsToIONnode.hops = topology.DistanceToIONode ( this->worldRank_ );
hopsToIONnode.rank = commRank;
if ( this->excludedNode[this->intCoords_] )
if ( this->excludedNode[this->hostId_] )
hopsToIONnode.hops = INT_MAX;
MPI_Allreduce ( &hopsToIONnode, &shortestPath, 1, MPI_2INTEGER, MPI_MINLOC, aggrComm );
......@@ -43,7 +43,7 @@ int Tapioca::RankLongestPath (MPI_Comm aggrComm, int64_t dataSize)
hopsToIONnode.hops = topology.DistanceToIONode ( this->worldRank_ );
hopsToIONnode.rank = commRank;
if ( this->excludedNode[this->intCoords_] )
if ( this->excludedNode[this->hostId_] )
hopsToIONnode.hops = INT_MIN;
MPI_Allreduce ( &hopsToIONnode, &longestPath, 1, MPI_2INTEGER, MPI_MAXLOC, aggrComm );
......@@ -110,7 +110,7 @@ int Tapioca::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize)
aggrCost.cost += topology.DistanceToIONode ( worldRank ) * LATENCY + (double)aggregatedData / BANDWIDTH;
#endif
if ( this->excludedNode[this->intCoords_] )
if ( this->excludedNode[this->hostId_] )
aggrCost.cost = DBL_MAX;
MPI_Allreduce ( &aggrCost, &minCost, 1, MPI_DOUBLE_INT, MPI_MINLOC, aggrComm );
......@@ -236,7 +236,7 @@ int Tapioca::RankContentionAware (MPI_Comm aggrComm, int64_t dataSize)
aggrCost.cost += aggregatedData / ( BANDWIDTH / routeCost[srcNode] );
if ( this->excludedNode[this->intCoords_] )
if ( this->excludedNode[this->hostId_] )
aggrCost.cost = DBL_MAX;
MPI_Allreduce ( &aggrCost, &minCost, 1, MPI_DOUBLE_INT, MPI_MINLOC, aggrComm );
......@@ -267,14 +267,14 @@ int Tapioca::RankUniformDistribution (MPI_Comm aggrComm, int64_t dataSize)
MPI_Comm_rank (aggrComm, &aggrCommRank);
if ( aggrCommRank == rootRank )
rootCoords = this->intCoords_;
rootCoords = this->hostId_;
MPI_Bcast ( &rootCoords, 1, MPI_INT, rootRank, aggrComm);
while ( this->excludedNode[rootCoords] ) {
rootRank += topology.ProcessPerNode ();
if ( aggrCommRank == rootRank )
rootCoords = this->intCoords_;
rootCoords = this->hostId_;
MPI_Bcast ( &rootCoords, 1, MPI_INT, rootRank, aggrComm);
}
......@@ -309,7 +309,7 @@ int Tapioca::RankRandom (MPI_Comm aggrComm, int64_t dataSize)
MPI_Bcast ( &aggrRank, 1, MPI_INT, 0, aggrComm);
if ( aggrCommRank == aggrRank ) {
aggrCoords = this->intCoords_;
aggrCoords = this->hostId_;
}
MPI_Bcast ( &aggrCoords, 1, MPI_INT, aggrRank, aggrComm);
......@@ -322,7 +322,7 @@ int Tapioca::RankRandom (MPI_Comm aggrComm, int64_t dataSize)
MPI_Bcast ( &aggrRank, 1, MPI_INT, rootRank, aggrComm);
if ( aggrCommRank == aggrRank ) {
aggrCoords = this->intCoords_;
aggrCoords = this->hostId_;
}
MPI_Bcast ( &aggrCoords, 1, MPI_INT, aggrRank, aggrComm);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment