Commit 326c774e authored by Francois Tessier's avatar Francois Tessier

Split tapioca.cpp in smaller files

parent 109885b6
......@@ -12,137 +12,6 @@ Tapioca::~Tapioca ()
}
void Tapioca::WriteInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{
int chunk;
#ifdef TIMING
double startInitTime, endInitTime, startElectTime, endElectTime;
startInitTime = MPI_Wtime();
#endif
this->filename_ = filename;
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
this->chunkCount_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
this->chunkSize_ = (int *)malloc (this->nChunks_ * sizeof(int));
this->chunkOffset_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkCount_, chunkCount, this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkSize_, chunkSize, this->nChunks_ * sizeof(int));
memcpy (this->chunkOffset_, chunkOffset, this->nChunks_ * sizeof(int64_t));
for ( chunk = 0; chunk < this->nChunks_; chunk++ )
this->rankDataSize_ += this->chunkCount_[chunk] * this->chunkSize_[chunk];
this->offsetInFile_ = offset;
this->layout_ = layout;
MPI_Comm_dup (comm, &this->subComm_);
this->SetCommValues ();
this->SetOffsets ();
if ( this->writeDevNull_ )
MPI_File_open(MPI_COMM_SELF, "/dev/null",
MPI_MODE_WRONLY | MPI_MODE_CREATE,
MPI_INFO_NULL, &this->devNullFileHandle_);
#ifdef DEBUG
if (this->commRank_ == MASTER) {
fprintf (stdout, "[DEBUG] #Aggr = %d \n", this->nAggr_);
fprintf (stdout, "[DEBUG] bufferSize = %lld \n", this->bufferSize_);
fprintf (stdout, "[DEBUG] commDataSize = %lld \n", this->commDataSize_);
fprintf (stdout, "[DEBUG] strategy = %s \n", this->getStrategyName ());
}
#endif
this->SetNodesList ();
this->IdentifyMyAggregators ();
#ifdef TIMING
startElectTime = MPI_Wtime();
#endif
this->ElectAggregators ();
#ifdef TIMING
endElectTime = MPI_Wtime();
#endif
this->InitAggregators ();
#ifdef TIMING
endInitTime = MPI_Wtime();
this->PrintTime(startInitTime, endInitTime, "Initialize");
this->PrintTime(startElectTime, endElectTime, " |-> Elect aggregators");
#endif
}
void Tapioca::ReadInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{
int chunk;
#ifdef TIMING
double startInitTime, endInitTime, startElectTime, endElectTime;
startInitTime = MPI_Wtime();
#endif
this->filename_ = filename;
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
this->chunkCount_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
this->chunkSize_ = (int *)malloc (this->nChunks_ * sizeof(int));
this->chunkOffset_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkCount_, chunkCount, this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkSize_, chunkSize, this->nChunks_ * sizeof(int));
memcpy (this->chunkOffset_, chunkOffset, this->nChunks_ * sizeof(int64_t));
for ( chunk = 0; chunk < this->nChunks_; chunk++ )
this->rankDataSize_ += this->chunkCount_[chunk] * this->chunkSize_[chunk];
this->offsetInFile_ = offset;
this->layout_ = layout;
MPI_Comm_dup (comm, &this->subComm_);
this->SetCommValues ();
this->SetOffsets ();
#ifdef DEBUG
if (this->commRank_ == MASTER) {
fprintf (stdout, "[DEBUG] #Aggr = %d \n", this->nAggr_);
fprintf (stdout, "[DEBUG] bufferSize = %lld \n", this->bufferSize_);
fprintf (stdout, "[DEBUG] commDataSize = %lld \n", this->commDataSize_);
fprintf (stdout, "[DEBUG] strategy = %s \n", this->getStrategyName ());
}
#endif
this->SetNodesList ();
//this->IdentifyMyAggregators ();
#ifdef TIMING
startElectTime = MPI_Wtime();
#endif
//this->ElectAggregators ();
#ifdef TIMING
endElectTime = MPI_Wtime();
#endif
this->InitAggregators ();
#ifdef TIMING
endInitTime = MPI_Wtime();
this->PrintTime(startInitTime, endInitTime, "Initialize");
this->PrintTime(startElectTime, endElectTime, " |-> Elect aggregators");
#endif
}
void Tapioca::Finalize ()
{
this->chunksIndexMatching.clear();
......@@ -151,6 +20,8 @@ void Tapioca::Finalize ()
free (this->chunkOffset_);
this->excludedNode.clear();
this->commDataSize_ = 0;
MPI_Win_free (&this->RMAWin1);
MPI_Win_free (&this->RMAWin2);
......@@ -161,149 +32,6 @@ void Tapioca::Finalize ()
}
int Tapioca::Write (MPI_File fileHandle, MPI_Offset offset, void *buf,
int count, MPI_Datatype datatype, MPI_Status *status, int64_t bufOffset)
{
int retval, i, c, targetRoundIdx, targetAggrIdx, targetGlobAggr;
int typeSize, targetAggr, win, buffer;
bool multipleRounds = false;
int64_t chunkDataSize, subChunkDataSize, cumulDataSize = 0, cumulDataSizeInRound;
int64_t winOffset = 0, rankDataOffset, offsetInAggrData;
MPI_Request request = NULL;
MPI_Type_size (datatype, &typeSize);
c = this->nCommit_;
chunkDataSize = count * typeSize;
subChunkDataSize = chunkDataSize;
offsetInAggrData = offset - this->offsetInFile_;
winOffset = offsetInAggrData % this->bufferSize_;
targetRoundIdx = (*this->chunksIndexMatching[c].begin());
targetAggrIdx = (*this->chunksIndexMatching[c].begin());
if ( this->chunksIndexMatching[c].size() > 1 ) {
multipleRounds = true;
subChunkDataSize = this->dataSize[targetRoundIdx];
this->chunksIndexMatching[c].erase ( this->chunksIndexMatching[c].begin() );
}
/*
* Wait if it's not the appropriate round
*/
while ( this->roundsIds[targetRoundIdx] > this->currentRound_ ) {
this->GlobalFence ();
if ( this->amAnAggr_ ) {
if (request != NULL)
MPI_Wait ( &request, status );
this->Push (fileHandle, &request);
}
this->currentRound_++;
}
#ifdef TIMING
this->startAggrTime = MPI_Wtime();
#endif
buffer = this->currentRound_ % NBUFFERS;
targetGlobAggr = this->globalAggregatorsRanks[targetAggrIdx];
targetAggr = this->aggregatorsRanks[targetAggrIdx];
switch (buffer)
{
case 0:
retval = MPI_Put (static_cast<char*>(buf) + bufOffset, subChunkDataSize, MPI_BYTE,
targetAggr, winOffset, subChunkDataSize, MPI_BYTE, this->RMAWin1);
this->HandleMPIError (retval);
break;
case 1:
retval = MPI_Put (static_cast<char*>(buf) + bufOffset, subChunkDataSize, MPI_BYTE,
targetAggr, winOffset, subChunkDataSize, MPI_BYTE, this->RMAWin2);
this->HandleMPIError (retval);
break;
}
this->currentDataSize_ += subChunkDataSize;
/*
* If all the data have been written, wait
*/
if ( this->currentDataSize_ == this->rankDataSize_ ) {
while ( this->currentRound_ < this->totalRounds_ ) {
this->GlobalFence ();
if ( this->amAnAggr_ ) {
if (request != NULL)
MPI_Wait ( &request, status );
this->Push (fileHandle, &request);
}
this->currentRound_++;
}
}
if ( multipleRounds ) {
retval = this->Write (fileHandle, offset + subChunkDataSize, buf,
chunkDataSize - subChunkDataSize, MPI_BYTE, status, subChunkDataSize);
}
else {
this->nCommit_ ++;
}
if (request != NULL)
MPI_Wait ( &request, status );
return retval;
}
void Tapioca::Push (MPI_File fileHandle, MPI_Request *request)
{
int64_t offset, dataSize;
int win, buffer;
buffer = this->currentRound_ % NBUFFERS;
if ( this->amAnAggr_ ) {
#ifdef TIMING
this->startIOTime = MPI_Wtime();
#endif
offset = (this->nAggr_ * this->currentRound_ + this->globalAggrRank_) * this->bufferSize_;
offset += this->offsetInFile_;
dataSize = this->bufferSize_;
if ( this->aggrDataSize_ < this->bufferSize_ )
dataSize = this->aggrDataSize_;
switch (buffer)
{
case 0:
if ( this->writeDevNull_ )
MPI_File_iwrite_at (this->devNullFileHandle_, 0, buffer1, dataSize, MPI_BYTE, request);
else
MPI_File_iwrite_at (fileHandle, offset, buffer1, dataSize, MPI_BYTE, request);
break;
case 1:
if ( this->writeDevNull_ )
MPI_File_iwrite_at (this->devNullFileHandle_, 0, buffer2, dataSize, MPI_BYTE, request);
else
MPI_File_iwrite_at (fileHandle, offset, buffer2, dataSize, MPI_BYTE, request);
break;
}
this->aggrDataSize_ -= dataSize;
#ifdef TIMING
this->endIOTime = MPI_Wtime();
this->totIOTime = this->endIOTime - this->startIOTime;
if ( dataSize > 0 )
fprintf (stdout, "[TIMING][AGG][IO] Agg. %d, Rnd %d - %.2f ms\n",
this->commRank_, this->currentRound_, this->totIOTime * 1000);
#endif
}
}
void Tapioca::GlobalFence ()
{
int buffer;
......
#include "tapioca.hpp"
void Tapioca::ReadInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{
int chunk;
#ifdef TIMING
double startInitTime, endInitTime, startElectTime, endElectTime;
startInitTime = MPI_Wtime();
#endif
this->filename_ = filename;
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
this->chunkCount_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
this->chunkSize_ = (int *)malloc (this->nChunks_ * sizeof(int));
this->chunkOffset_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkCount_, chunkCount, this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkSize_, chunkSize, this->nChunks_ * sizeof(int));
memcpy (this->chunkOffset_, chunkOffset, this->nChunks_ * sizeof(int64_t));
for ( chunk = 0; chunk < this->nChunks_; chunk++ )
this->rankDataSize_ += this->chunkCount_[chunk] * this->chunkSize_[chunk];
this->offsetInFile_ = offset;
this->layout_ = layout;
MPI_Comm_dup (comm, &this->subComm_);
this->SetCommValues ();
this->SetOffsets ();
#ifdef DEBUG
if (this->commRank_ == MASTER) {
fprintf (stdout, "[DEBUG] #Aggr = %d \n", this->nAggr_);
fprintf (stdout, "[DEBUG] bufferSize = %lld \n", this->bufferSize_);
fprintf (stdout, "[DEBUG] commDataSize = %lld \n", this->commDataSize_);
fprintf (stdout, "[DEBUG] strategy = %s \n", this->getStrategyName ());
}
#endif
this->SetNodesList ();
this->IdentifyMyAggregators ();
#ifdef TIMING
startElectTime = MPI_Wtime();
#endif
this->ElectAggregators ();
#ifdef TIMING
endElectTime = MPI_Wtime();
#endif
this->InitAggregators ();
#ifdef TIMING
endInitTime = MPI_Wtime();
this->PrintTime(startInitTime, endInitTime, "Initialize");
this->PrintTime(startElectTime, endElectTime, " |-> Elect aggregators");
#endif
}
int Tapioca::Read (MPI_File fileHandle, MPI_Offset offset, void *buf,
int count, MPI_Datatype datatype, MPI_Status *status, int64_t bufOffset)
{
int retval, i, c, targetRoundIdx, targetAggrIdx, targetGlobAggr;
int typeSize, targetAggr, win, buffer;
bool multipleRounds = false;
int64_t chunkDataSize, subChunkDataSize, cumulDataSize = 0, cumulDataSizeInRound;
int64_t winOffset = 0, rankDataOffset, offsetInAggrData;
MPI_Request request = NULL;
MPI_Type_size (datatype, &typeSize);
c = this->nCommit_;
chunkDataSize = count * typeSize;
subChunkDataSize = chunkDataSize;
offsetInAggrData = offset - this->offsetInFile_;
winOffset = offsetInAggrData % this->bufferSize_;
targetRoundIdx = (*this->chunksIndexMatching[c].begin());
targetAggrIdx = (*this->chunksIndexMatching[c].begin());
if ( this->chunksIndexMatching[c].size() > 1 ) {
multipleRounds = true;
subChunkDataSize = this->dataSize[targetRoundIdx];
this->chunksIndexMatching[c].erase ( this->chunksIndexMatching[c].begin() );
}
/*
* Wait if it's not the appropriate round
*/
while ( this->roundsIds[targetRoundIdx] > this->currentRound_ ) {
this->GlobalFence ();
if ( this->amAnAggr_ ) {
if (request != NULL)
MPI_Wait ( &request, status );
this->Push (fileHandle, &request);
}
this->currentRound_++;
}
#ifdef TIMING
this->startAggrTime = MPI_Wtime();
#endif
buffer = this->currentRound_ % NBUFFERS;
targetGlobAggr = this->globalAggregatorsRanks[targetAggrIdx];
targetAggr = this->aggregatorsRanks[targetAggrIdx];
switch (buffer)
{
case 0:
retval = MPI_Put (static_cast<char*>(buf) + bufOffset, subChunkDataSize, MPI_BYTE,
targetAggr, winOffset, subChunkDataSize, MPI_BYTE, this->RMAWin1);
this->HandleMPIError (retval);
break;
case 1:
retval = MPI_Put (static_cast<char*>(buf) + bufOffset, subChunkDataSize, MPI_BYTE,
targetAggr, winOffset, subChunkDataSize, MPI_BYTE, this->RMAWin2);
this->HandleMPIError (retval);
break;
}
this->currentDataSize_ += subChunkDataSize;
/*
* If all the data have been written, wait
*/
if ( this->currentDataSize_ == this->rankDataSize_ ) {
while ( this->currentRound_ < this->totalRounds_ ) {
this->GlobalFence ();
if ( this->amAnAggr_ ) {
if (request != NULL)
MPI_Wait ( &request, status );
this->Push (fileHandle, &request);
}
this->currentRound_++;
}
}
if ( multipleRounds ) {
retval = this->Write (fileHandle, offset + subChunkDataSize, buf,
chunkDataSize - subChunkDataSize, MPI_BYTE, status, subChunkDataSize);
}
else {
this->nCommit_ ++;
}
if (request != NULL)
MPI_Wait ( &request, status );
return retval;
}
void Tapioca::Push (MPI_File fileHandle, MPI_Request *request)
{
int64_t offset, dataSize;
int win, buffer;
buffer = this->currentRound_ % NBUFFERS;
if ( this->amAnAggr_ ) {
#ifdef TIMING
this->startIOTime = MPI_Wtime();
#endif
offset = (this->nAggr_ * this->currentRound_ + this->globalAggrRank_) * this->bufferSize_;
offset += this->offsetInFile_;
dataSize = this->bufferSize_;
if ( this->aggrDataSize_ < this->bufferSize_ )
dataSize = this->aggrDataSize_;
switch (buffer)
{
case 0:
if ( this->writeDevNull_ )
MPI_File_iwrite_at (this->devNullFileHandle_, 0, buffer1, dataSize, MPI_BYTE, request);
else
MPI_File_iwrite_at (fileHandle, offset, buffer1, dataSize, MPI_BYTE, request);
break;
case 1:
if ( this->writeDevNull_ )
MPI_File_iwrite_at (this->devNullFileHandle_, 0, buffer2, dataSize, MPI_BYTE, request);
else
MPI_File_iwrite_at (fileHandle, offset, buffer2, dataSize, MPI_BYTE, request);
break;
}
this->aggrDataSize_ -= dataSize;
#ifdef TIMING
this->endIOTime = MPI_Wtime();
this->totIOTime = this->endIOTime - this->startIOTime;
if ( dataSize > 0 )
fprintf (stdout, "[TIMING][AGG][IO] Agg. %d, Rnd %d - %.2f ms\n",
this->commRank_, this->currentRound_, this->totIOTime * 1000);
#endif
}
}
#include "tapioca.hpp"
void Tapioca::WriteInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{
int chunk;
#ifdef TIMING
double startInitTime, endInitTime, startElectTime, endElectTime;
startInitTime = MPI_Wtime();
#endif
this->filename_ = filename;
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
this->chunkCount_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
this->chunkSize_ = (int *)malloc (this->nChunks_ * sizeof(int));
this->chunkOffset_ = (int64_t *)malloc (this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkCount_, chunkCount, this->nChunks_ * sizeof(int64_t));
memcpy (this->chunkSize_, chunkSize, this->nChunks_ * sizeof(int));
memcpy (this->chunkOffset_, chunkOffset, this->nChunks_ * sizeof(int64_t));
for ( chunk = 0; chunk < this->nChunks_; chunk++ )
this->rankDataSize_ += this->chunkCount_[chunk] * this->chunkSize_[chunk];
this->offsetInFile_ = offset;
this->layout_ = layout;
MPI_Comm_dup (comm, &this->subComm_);
this->SetCommValues ();
this->SetOffsets ();
if ( this->writeDevNull_ )
MPI_File_open(MPI_COMM_SELF, "/dev/null",
MPI_MODE_WRONLY | MPI_MODE_CREATE,
MPI_INFO_NULL, &this->devNullFileHandle_);
#ifdef DEBUG
if (this->commRank_ == MASTER) {
fprintf (stdout, "[DEBUG] #Aggr = %d \n", this->nAggr_);
fprintf (stdout, "[DEBUG] bufferSize = %lld \n", this->bufferSize_);
fprintf (stdout, "[DEBUG] commDataSize = %lld \n", this->commDataSize_);
fprintf (stdout, "[DEBUG] strategy = %s \n", this->getStrategyName ());
}
#endif
this->SetNodesList ();
this->IdentifyMyAggregators ();
#ifdef TIMING
startElectTime = MPI_Wtime();
#endif
this->ElectAggregators ();
#ifdef TIMING
endElectTime = MPI_Wtime();
#endif
this->InitAggregators ();
#ifdef TIMING
endInitTime = MPI_Wtime();
this->PrintTime(startInitTime, endInitTime, "Initialize");
this->PrintTime(startElectTime, endElectTime, " |-> Elect aggregators");
#endif
}
int Tapioca::Write (MPI_File fileHandle, MPI_Offset offset, void *buf,
int count, MPI_Datatype datatype, MPI_Status *status, int64_t bufOffset)
{
int retval, i, c, targetRoundIdx, targetAggrIdx, targetGlobAggr;
int typeSize, targetAggr, win, buffer;
bool multipleRounds = false;
int64_t chunkDataSize, subChunkDataSize, cumulDataSize = 0, cumulDataSizeInRound;
int64_t winOffset = 0, rankDataOffset, offsetInAggrData;
MPI_Request request = NULL;
MPI_Type_size (datatype, &typeSize);
c = this->nCommit_;
chunkDataSize = count * typeSize;
subChunkDataSize = chunkDataSize;
offsetInAggrData = offset - this->offsetInFile_;
winOffset = offsetInAggrData % this->bufferSize_;
targetRoundIdx = (*this->chunksIndexMatching[c].begin());
targetAggrIdx = (*this->chunksIndexMatching[c].begin());
if ( this->chunksIndexMatching[c].size() > 1 ) {
multipleRounds = true;
subChunkDataSize = this->dataSize[targetRoundIdx];
this->chunksIndexMatching[c].erase ( this->chunksIndexMatching[c].begin() );
}
/*
* Wait if it's not the appropriate round
*/
while ( this->roundsIds[targetRoundIdx] > this->currentRound_ ) {
this->GlobalFence ();
if ( this->amAnAggr_ ) {
if (request != NULL)
MPI_Wait ( &request, status );
this->Push (fileHandle, &request);
}
this->currentRound_++;
}
#ifdef TIMING
this->startAggrTime = MPI_Wtime();
#endif
buffer = this->currentRound_ % NBUFFERS;
targetGlobAggr = this->globalAggregatorsRanks[targetAggrIdx];
targetAggr = this->aggregatorsRanks[targetAggrIdx];
switch (buffer)
{
case 0:
retval = MPI_Put (static_cast<char*>(buf) + bufOffset, subChunkDataSize, MPI_BYTE,
targetAggr, winOffset, subChunkDataSize, MPI_BYTE, this->RMAWin1);
this->HandleMPIError (retval);
break;
case 1:
retval = MPI_Put (static_cast<char*>(buf) + bufOffset, subChunkDataSize, MPI_BYTE,
targetAggr, winOffset, subChunkDataSize, MPI_BYTE, this->RMAWin2);
this->HandleMPIError (retval);
break;
}
this->currentDataSize_ += subChunkDataSize;
/*
* If all the data have been written, wait
*/
if ( this->currentDataSize_ == this->rankDataSize_ ) {
while ( this->currentRound_ < this->totalRounds_ ) {
this->GlobalFence ();
if ( this->amAnAggr_ ) {
if (request != NULL)
MPI_Wait ( &request, status );
this->Push (fileHandle, &request);
}
this->currentRound_++;
}
}
if ( multipleRounds ) {
retval = this->Write (fileHandle, offset + subChunkDataSize, buf,
chunkDataSize - subChunkDataSize, MPI_BYTE, status, subChunkDataSize);
}
else {
this->nCommit_ ++;
}
if (request != NULL)
MPI_Wait ( &request, status );
return retval;
}
void Tapioca::Push (MPI_File fileHandle, MPI_Request *request)
{
int64_t offset, dataSize;
int win, buffer;
buffer = this->currentRound_ % NBUFFERS;
if ( this->amAnAggr_ ) {
#ifdef TIMING
this->startIOTime = MPI_Wtime();
#endif
offset = (this->nAggr_ * this->currentRound_ + this->globalAggrRank_) * this->bufferSize_;
offset += this->offsetInFile_;
dataSize = this->bufferSize_;
if ( this->aggrDataSize_ < this->bufferSize_ )
dataSize = this->aggrDataSize_;
switch (buffer)
{
case 0:
if ( this->writeDevNull_ )
MPI_File_iwrite_at (this->devNullFileHandle_, 0, buffer1, dataSize, MPI_BYTE, request);
else
MPI_File_iwrite_at (fileHandle, offset, buffer1, dataSize, MPI_BYTE, request);
break;
case 1:
if ( this->writeDevNull_ )
MPI_File_iwrite_at (this->devNullFileHandle_, 0, buffer2, dataSize, MPI_BYTE, request);
else
MPI_File_iwrite_at (fileHandle, offset, buffer2, dataSize, MPI_BYTE, request);
break;
}
this->aggrDataSize_ -= dataSize;
#ifdef TIMING
this->endIOTime = MPI_Wtime();
this->totIOTime = this->endIOTime - this->startIOTime;
if ( dataSize > 0 )
fprintf (stdout, "[TIMING][AGG][IO] Agg. %d, Rnd %d - %.2f ms\n",
this->commRank_, this->currentRound_, this->totIOTime * 1000);
#endif
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment