Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Francois Tessier
TAPIOCA
Commits
2a595815
Commit
2a595815
authored
Aug 17, 2017
by
Francois Tessier
Browse files
Factorize data movement initialization
parent
1d30b3e0
Changes
2
Hide whitespace changes
Inline
Side-by-side
tp_read.cpp
View file @
2a595815
#include
"tapioca.hpp"
void
Tapioca
::
ReadInitialize
(
char
*
filename
,
int64_t
*
chunkCount
,
int
*
chunkSize
,
int64_t
*
chunkOffset
,
int
nChunks
,
int64_t
offset
,
MEMORY_LAYOUT
layout
,
MPI_Comm
comm
)
{
int
chunk
;
#ifdef TIMING
double
startInitTime
,
endInitTime
,
startElectTime
,
endElectTime
;
startInitTime
=
MPI_Wtime
();
#endif
this
->
SetDefaultValues
();
this
->
ParseEnvVariables
();
this
->
filename_
=
filename
;
this
->
nChunks_
=
nChunks
;
this
->
chunksIndexMatching
.
resize
(
this
->
nChunks_
);
this
->
chunkCount_
=
(
int64_t
*
)
malloc
(
this
->
nChunks_
*
sizeof
(
int64_t
));
this
->
chunkSize_
=
(
int
*
)
malloc
(
this
->
nChunks_
*
sizeof
(
int
));
this
->
chunkOffset_
=
(
int64_t
*
)
malloc
(
this
->
nChunks_
*
sizeof
(
int64_t
));
memcpy
(
this
->
chunkCount_
,
chunkCount
,
this
->
nChunks_
*
sizeof
(
int64_t
));
memcpy
(
this
->
chunkSize_
,
chunkSize
,
this
->
nChunks_
*
sizeof
(
int
));
memcpy
(
this
->
chunkOffset_
,
chunkOffset
,
this
->
nChunks_
*
sizeof
(
int64_t
));
for
(
chunk
=
0
;
chunk
<
this
->
nChunks_
;
chunk
++
)
this
->
rankDataSize_
+=
this
->
chunkCount_
[
chunk
]
*
this
->
chunkSize_
[
chunk
];
this
->
offsetInFile_
=
offset
;
this
->
layout_
=
layout
;
MPI_Comm_dup
(
comm
,
&
this
->
subComm_
);
this
->
SetCommValues
();
this
->
SetOffsets
();
#ifdef DBG
if
(
this
->
commRank_
==
MASTER
)
{
fprintf
(
stdout
,
"[DEBUG] #Aggr = %d
\n
"
,
this
->
nAggr_
);
fprintf
(
stdout
,
"[DEBUG] bufferSize = %lld
\n
"
,
this
->
bufferSize_
);
fprintf
(
stdout
,
"[DEBUG] commDataSize = %lld
\n
"
,
this
->
commDataSize_
);
fprintf
(
stdout
,
"[DEBUG] strategy = %s
\n
"
,
this
->
getStrategyName
());
}
#endif
this
->
SetNodesList
();
this
->
IdentifyMyAggregators
();
#ifdef TIMING
startElectTime
=
MPI_Wtime
();
#endif
this
->
ElectAggregators
();
#ifdef TIMING
endElectTime
=
MPI_Wtime
();
#endif
this
->
InitAggregators
();
#ifdef TIMING
endInitTime
=
MPI_Wtime
();
this
->
PrintTime
(
startInitTime
,
endInitTime
,
"Initialize"
);
this
->
PrintTime
(
startElectTime
,
endElectTime
,
" |-> Elect aggregators"
);
#endif
}
int
Tapioca
::
Read
(
MPI_File
fileHandle
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
,
int64_t
bufOffset
)
int
Tapioca
::
Read
(
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
,
int64_t
bufOffset
)
{
int
retval
,
i
,
c
,
targetRoundIdx
,
targetAggrIdx
,
targetGlobAggr
;
int
typeSize
,
targetAggr
,
win
,
buffer
;
bool
multipleRounds
=
false
;
int64_t
chunkDataSize
,
subChunkDataSize
,
cumulDataSize
=
0
,
cumulDataSizeInRound
;
int64_t
winOffset
=
0
,
rankDataOffset
,
offsetInAggrData
;
MPI_Request
request
=
NULL
;
MPI_Type_size
(
datatype
,
&
typeSize
);
c
=
this
->
nCommit_
;
...
...
@@ -94,9 +26,8 @@ int Tapioca::Read (MPI_File fileHandle, MPI_Offset offset, void *buf,
if
(
!
this
->
firstRead_
)
{
if
(
this
->
amAnAggr_
)
{
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
Pull
(
fileHandle
,
&
request
);
this
->
memTarget
.
memFlush
();
this
->
Pull
();
this
->
readRound_
++
;
}
...
...
@@ -109,9 +40,8 @@ int Tapioca::Read (MPI_File fileHandle, MPI_Offset offset, void *buf,
*/
while
(
this
->
roundsIds
[
targetRoundIdx
]
>
this
->
currentRound_
)
{
if
(
this
->
amAnAggr_
&&
this
->
readRound_
<
this
->
totalRounds_
)
{
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
Pull
(
fileHandle
,
&
request
);
this
->
memTarget
.
memFlush
();
this
->
Pull
();
this
->
readRound_
++
;
}
...
...
@@ -145,9 +75,8 @@ int Tapioca::Read (MPI_File fileHandle, MPI_Offset offset, void *buf,
if
(
this
->
currentDataSize_
==
this
->
rankDataSize_
)
{
while
(
this
->
currentRound_
<
this
->
totalRounds_
)
{
if
(
this
->
amAnAggr_
&&
this
->
readRound_
<
this
->
totalRounds_
)
{
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
Pull
(
fileHandle
,
&
request
);
this
->
memTarget
.
memFlush
();
this
->
Pull
();
this
->
readRound_
++
;
}
...
...
@@ -157,21 +86,20 @@ int Tapioca::Read (MPI_File fileHandle, MPI_Offset offset, void *buf,
}
if
(
multipleRounds
)
{
retval
=
this
->
Read
(
fileHandle
,
offset
+
subChunkDataSize
,
buf
,
retval
=
this
->
Read
(
offset
+
subChunkDataSize
,
buf
,
chunkDataSize
-
subChunkDataSize
,
MPI_BYTE
,
status
,
subChunkDataSize
);
}
else
{
this
->
nCommit_
++
;
}
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
memTarget
.
memFlush
();
return
retval
;
}
void
Tapioca
::
Pull
(
MPI_File
fileHandle
,
MPI_Request
*
request
)
void
Tapioca
::
Pull
()
{
int64_t
offset
,
dataSize
;
int
win
,
buffer
;
...
...
@@ -194,12 +122,13 @@ void Tapioca::Pull (MPI_File fileHandle, MPI_Request *request)
switch
(
buffer
)
{
case
0
:
MPI_File_iread_at
(
fileHandle
,
offset
,
this
->
memBuffer0
.
buffer_
,
dataSize
,
MPI_BYTE
,
request
);
MPI_Wait
(
request
,
&
status
);
// What if the target is not a file ? destRank = ?
this
->
memTarget
.
memRead
(
this
->
memBuffer0
.
buffer_
,
dataSize
,
offset
,
0
);
this
->
memTarget
.
memFlush
();
break
;
case
1
:
MPI_File_iread_at
(
fileHandle
,
offset
,
this
->
memBuffer1
.
buffer_
,
dataSize
,
MPI_BYTE
,
request
);
MPI_Wait
(
request
,
&
status
);
this
->
memTarget
.
memRead
(
this
->
memBuffer1
.
buffer_
,
dataSize
,
offset
,
0
);
this
->
memTarget
.
memFlush
(
);
break
;
}
...
...
tp_write.cpp
View file @
2a595815
#include
"tapioca.hpp"
void
Tapioca
::
WriteInitialize
(
char
*
filename
,
int64_t
*
chunkCount
,
int
*
chunkSize
,
int64_t
*
chunkOffset
,
int
nChunks
,
int64_t
offset
,
MEMORY_LAYOUT
layout
,
MPI_Comm
comm
)
{
int
chunk
;
#ifdef TIMING
double
startInitTime
,
endInitTime
,
startElectTime
,
endElectTime
;
startInitTime
=
MPI_Wtime
();
#endif
this
->
SetDefaultValues
();
this
->
ParseEnvVariables
();
this
->
filename_
=
filename
;
this
->
nChunks_
=
nChunks
;
this
->
chunksIndexMatching
.
resize
(
this
->
nChunks_
);
this
->
chunkCount_
=
(
int64_t
*
)
malloc
(
this
->
nChunks_
*
sizeof
(
int64_t
));
this
->
chunkSize_
=
(
int
*
)
malloc
(
this
->
nChunks_
*
sizeof
(
int
));
this
->
chunkOffset_
=
(
int64_t
*
)
malloc
(
this
->
nChunks_
*
sizeof
(
int64_t
));
memcpy
(
this
->
chunkCount_
,
chunkCount
,
this
->
nChunks_
*
sizeof
(
int64_t
));
memcpy
(
this
->
chunkSize_
,
chunkSize
,
this
->
nChunks_
*
sizeof
(
int
));
memcpy
(
this
->
chunkOffset_
,
chunkOffset
,
this
->
nChunks_
*
sizeof
(
int64_t
));
for
(
chunk
=
0
;
chunk
<
this
->
nChunks_
;
chunk
++
)
this
->
rankDataSize_
+=
this
->
chunkCount_
[
chunk
]
*
this
->
chunkSize_
[
chunk
];
this
->
offsetInFile_
=
offset
;
this
->
layout_
=
layout
;
MPI_Comm_dup
(
comm
,
&
this
->
subComm_
);
this
->
SetCommValues
();
this
->
SetOffsets
();
if
(
this
->
writeDevNull_
)
MPI_File_open
(
MPI_COMM_SELF
,
"/dev/null"
,
MPI_MODE_WRONLY
|
MPI_MODE_CREATE
,
MPI_INFO_NULL
,
&
this
->
devNullFileHandle_
);
#ifdef DBG
if
(
this
->
commRank_
==
MASTER
)
{
fprintf
(
stdout
,
"[DEBUG] #Aggr = %d
\n
"
,
this
->
nAggr_
);
fprintf
(
stdout
,
"[DEBUG] bufferSize = %lld
\n
"
,
this
->
bufferSize_
);
fprintf
(
stdout
,
"[DEBUG] commDataSize = %lld
\n
"
,
this
->
commDataSize_
);
fprintf
(
stdout
,
"[DEBUG] strategy = %s
\n
"
,
this
->
getStrategyName
());
}
#endif
this
->
SetNodesList
();
this
->
IdentifyMyAggregators
();
#ifdef TIMING
startElectTime
=
MPI_Wtime
();
#endif
this
->
ElectAggregators
();
#ifdef TIMING
endElectTime
=
MPI_Wtime
();
#endif
this
->
InitAggregators
();
#ifdef TIMING
endInitTime
=
MPI_Wtime
();
this
->
PrintTime
(
startInitTime
,
endInitTime
,
"Initialize"
);
this
->
PrintTime
(
startElectTime
,
endElectTime
,
" |-> Elect aggregators"
);
#endif
}
int
Tapioca
::
Write
(
MPI_File
fileHandle
,
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
,
int64_t
bufOffset
)
int
Tapioca
::
Write
(
MPI_Offset
offset
,
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Status
*
status
,
int64_t
bufOffset
)
{
int
retval
,
i
,
c
,
targetRoundIdx
,
targetAggrIdx
,
targetGlobAggr
;
int
typeSize
,
targetAggr
,
win
,
buffer
;
bool
multipleRounds
=
false
;
int64_t
chunkDataSize
,
subChunkDataSize
,
cumulDataSize
=
0
,
cumulDataSizeInRound
;
int64_t
winOffset
=
0
,
rankDataOffset
,
offsetInAggrData
;
MPI_Request
request
=
NULL
;
MPI_Type_size
(
datatype
,
&
typeSize
);
c
=
this
->
nCommit_
;
...
...
@@ -104,9 +31,8 @@ int Tapioca::Write (MPI_File fileHandle, MPI_Offset offset, void *buf,
this
->
GlobalFence
();
if
(
this
->
amAnAggr_
)
{
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
Push
(
fileHandle
,
&
request
);
this
->
memTarget
.
memFlush
();
this
->
Push
();
}
if
(
!
this
->
pipelinedBuffers_
)
...
...
@@ -143,9 +69,8 @@ int Tapioca::Write (MPI_File fileHandle, MPI_Offset offset, void *buf,
this
->
GlobalFence
();
if
(
this
->
amAnAggr_
)
{
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
Push
(
fileHandle
,
&
request
);
this
->
memTarget
.
memFlush
();
this
->
Push
();
}
if
(
!
this
->
pipelinedBuffers_
)
...
...
@@ -156,21 +81,19 @@ int Tapioca::Write (MPI_File fileHandle, MPI_Offset offset, void *buf,
}
if
(
multipleRounds
)
{
retval
=
this
->
Write
(
fileHandle
,
offset
+
subChunkDataSize
,
buf
,
retval
=
this
->
Write
(
offset
+
subChunkDataSize
,
buf
,
chunkDataSize
-
subChunkDataSize
,
MPI_BYTE
,
status
,
subChunkDataSize
);
}
else
{
this
->
nCommit_
++
;
}
if
(
request
!=
NULL
)
MPI_Wait
(
&
request
,
status
);
this
->
memTarget
.
memFlush
();
return
retval
;
}
void
Tapioca
::
Push
(
MPI_File
fileHandle
,
MPI_Request
*
request
)
void
Tapioca
::
Push
()
{
int64_t
offset
,
dataSize
;
int
win
,
buffer
;
...
...
@@ -191,24 +114,19 @@ void Tapioca::Push (MPI_File fileHandle, MPI_Request *request)
switch
(
buffer
)
{
case
0
:
if
(
this
->
writeDevNull_
)
MPI_File_iwrite_at
(
this
->
devNullFileHandle_
,
0
,
this
->
memBuffer0
.
buffer_
,
dataSize
,
MPI_BYTE
,
request
);
else
MPI_File_iwrite_at
(
fileHandle
,
offset
,
this
->
memBuffer0
.
buffer_
,
dataSize
,
MPI_BYTE
,
request
);
// What if the target is not a file ? destRank = ?
this
->
memTarget
.
memWrite
(
this
->
memBuffer0
.
buffer_
,
dataSize
,
offset
,
0
);
this
->
memTarget
.
memFlush
();
break
;
case
1
:
if
(
this
->
writeDevNull_
)
MPI_File_iwrite_at
(
this
->
devNullFileHandle_
,
0
,
this
->
memBuffer1
.
buffer_
,
dataSize
,
MPI_BYTE
,
request
);
else
MPI_File_iwrite_at
(
fileHandle
,
offset
,
this
->
memBuffer1
.
buffer_
,
dataSize
,
MPI_BYTE
,
request
);
this
->
memTarget
.
memWrite
(
this
->
memBuffer1
.
buffer_
,
dataSize
,
offset
,
0
);
this
->
memTarget
.
memFlush
();
break
;
}
this
->
aggrDataSize_
-=
dataSize
;
#ifdef TIMING
MPI_Status
status
;
MPI_Wait
(
request
,
&
status
);
// Flush ?
this
->
endIOTime
=
MPI_Wtime
();
this
->
totIOTime
=
this
->
endIOTime
-
this
->
startIOTime
;
if
(
dataSize
>
0
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment