Commit e932ab81 authored by Paul Coffman's avatar Paul Coffman Committed by Rob Latham

[PATCH] assorted fixes to ROMIO Collective IO One-sided

This commit addresses several design deficiencies in the ROMIO one-sided
collective IO algorithm and fixes several bugs.  Specifically:
- Re-engineering of one-sided algorithm to more easily support other
  file systems such as lustre, reworking and simplifying how the data in
  the source buffer is tracked, packing non-contiguous source data
  comprising the same contiguous chunk of target data to make just 1
  rdma call instead of several, and providing a framework in which
  multiple file domains within an aggregator can exist.
- Initial mpi_gather for offsets now includes the total amount of data
  each rank needs to write - this was needed because the one-sided
  algorithm needs to distinguish between ranks with 0 and 1 bytes (those
  that have and don't have data) and the difference in starting and
  ending offsets for 1 byte is 0 the same as 0 bytes so it cannot be
  distiguished --- some corner cases which are implicitly handled by the
  two-phase algorithm need to be explicitly handled by the one-sided
  algorithm.
- Added env var GPFSMPIO_ONESIDED_ALWAYS_RMW which when set will force
  the one-sided write to always first read the write buffer offset range
  from the disk to pre-fill any holes in the data.
- In the case where holes are found during the one-sided write and
  GPFSMPIO_ONESIDED_NO_RMW is unset resulting in a read-modify-write
  rewrite, instead of calling the baseline two-phase algorithm recall
  the one-sided algorithm with GPFSMPIO_ONESIDED_ALWAYS_RMW set.
- Removed the one-sided active synchronization code path since it would
  add significant memory overhead to support the non-contiguous source
  data buffer packing and on blue gene was always slower than passive
  anyhow.
- Fixed bugs related to loss of precision on calculations involving
  offsets defined as long (ADIO_Offset) and sizes defined as int.
- Fixed bug with the number of aggregation rounds where in some cases
  not enough rounds were being performed - essentially take the max
  number of rounds needed by any aggregator instead of the average.
- Fixed bug related to an offset range crossing a file-domain boundary
  where the source buffer data pointer was not advanced properly,
  resulting in incorrect data from the source buffer being written.
- Replaced the GPFSMPIO_AGGMETHOD env var to specify the aggregation
  method for both read and write with GPFSMPIO_WRITE_AGGMETHOD and
  GPFSMPIO_READ_AGGMETHOD to individually specify them --- on BlueGene
  /Q there are one-sided bugs in pami which are in some cases exposed by
  the mpi_put used by the write and other cases the mpi_get used by the
  read -- the user needs the ability to use what works.
Signed-off-by: Rob Latham's avatarRob Latham <robl@mcs.anl.gov>
parent 86692e28
......@@ -114,6 +114,7 @@ void ADIOI_GPFS_ReadStridedColl(ADIO_File fd, void *buf, int count,
ADIO_Offset *offset_list = NULL, *st_offsets = NULL, *fd_start = NULL,
*fd_end = NULL, *end_offsets = NULL;
ADIO_Offset *gpfs_offsets0 = NULL, *gpfs_offsets = NULL;
ADIO_Offset *count_sizes;
int ii;
ADIO_Offset *len_list = NULL;
int *buf_idx = NULL;
......@@ -174,7 +175,36 @@ void ADIOI_GPFS_ReadStridedColl(ADIO_File fd, void *buf, int count,
st_offsets = (ADIO_Offset *) ADIOI_Malloc(nprocs*sizeof(ADIO_Offset));
end_offsets = (ADIO_Offset *) ADIOI_Malloc(nprocs*sizeof(ADIO_Offset));
ADIO_Offset my_count_size;
/* One-sided aggregation needs the amount of data per rank as well because the difference in
* starting and ending offsets for 1 byte is 0 the same as 0 bytes so it cannot be distiguished.
*/
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
count_sizes = (ADIO_Offset *) ADIOI_Malloc(nprocs*sizeof(ADIO_Offset));
MPI_Count buftype_size;
MPI_Type_size_x(datatype, &buftype_size);
my_count_size = (ADIO_Offset) count * (ADIO_Offset)buftype_size;
}
if (gpfsmpio_tunegather) {
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
gpfs_offsets0 = (ADIO_Offset *) ADIOI_Malloc(3*nprocs*sizeof(ADIO_Offset));
gpfs_offsets = (ADIO_Offset *) ADIOI_Malloc(3*nprocs*sizeof(ADIO_Offset));
for (ii=0; ii<nprocs; ii++) {
gpfs_offsets0[ii*3] = 0;
gpfs_offsets0[ii*3+1] = 0;
gpfs_offsets0[ii*3+2] = 0;
}
gpfs_offsets0[myrank*3] = start_offset;
gpfs_offsets0[myrank*3+1] = end_offset;
gpfs_offsets0[myrank*3+2] = my_count_size;
MPI_Allreduce( gpfs_offsets0, gpfs_offsets, nprocs*3, ADIO_OFFSET, MPI_MAX, fd->comm );
for (ii=0; ii<nprocs; ii++) {
st_offsets [ii] = gpfs_offsets[ii*3] ;
end_offsets[ii] = gpfs_offsets[ii*3+1];
count_sizes[ii] = gpfs_offsets[ii*3+2];
}
}
else {
gpfs_offsets0 = (ADIO_Offset *) ADIOI_Malloc(2*nprocs*sizeof(ADIO_Offset));
gpfs_offsets = (ADIO_Offset *) ADIOI_Malloc(2*nprocs*sizeof(ADIO_Offset));
for (ii=0; ii<nprocs; ii++) {
......@@ -190,6 +220,7 @@ void ADIOI_GPFS_ReadStridedColl(ADIO_File fd, void *buf, int count,
st_offsets [ii] = gpfs_offsets[ii*2] ;
end_offsets[ii] = gpfs_offsets[ii*2+1];
}
}
ADIOI_Free( gpfs_offsets0 );
ADIOI_Free( gpfs_offsets );
} else {
......@@ -197,6 +228,10 @@ void ADIOI_GPFS_ReadStridedColl(ADIO_File fd, void *buf, int count,
ADIO_OFFSET, fd->comm);
MPI_Allgather(&end_offset, 1, ADIO_OFFSET, end_offsets, 1,
ADIO_OFFSET, fd->comm);
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
MPI_Allgather(&count_sizes, 1, ADIO_OFFSET, count_sizes, 1,
ADIO_OFFSET, fd->comm);
}
}
GPFSMPIO_T_CIO_SET_GET( r, 1, 1, GPFSMPIO_CIO_T_PATANA, GPFSMPIO_CIO_T_GATHER )
......@@ -259,31 +294,67 @@ void ADIOI_GPFS_ReadStridedColl(ADIO_File fd, void *buf, int count,
* needs to be mapped to an actual rank in the communicator later.
*
*/
if (gpfsmpio_tuneblocking)
int currentNonZeroDataIndex = 0;
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
/* Take out the 0-data offsets by shifting the indexes with data to the
* front and keeping track of the non-zero data index for use as the
* length. By doing this we will optimally use all available aggs
* and spread the actual data across them instead of having offsets
* with empty data potentially dilute the file domains and create
* problems for the one-sided aggregation.
*/
for (i=0; i<nprocs; i++) {
if (count_sizes[i] > 0) {
st_offsets[currentNonZeroDataIndex] = st_offsets[i];
end_offsets[currentNonZeroDataIndex] = end_offsets[i];
currentNonZeroDataIndex++;
}
}
}
if (gpfsmpio_tuneblocking) {
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
ADIOI_GPFS_Calc_file_domains(fd, st_offsets, end_offsets, currentNonZeroDataIndex,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end, &fd_size, fd->fs_ptr);
}
else {
ADIOI_GPFS_Calc_file_domains(fd, st_offsets, end_offsets, nprocs,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end, &fd_size, fd->fs_ptr);
else
}
}
else {
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
ADIOI_Calc_file_domains(st_offsets, end_offsets, currentNonZeroDataIndex,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end,
fd->hints->min_fdomain_size, &fd_size,
fd->hints->striping_unit);
}
else {
ADIOI_Calc_file_domains(st_offsets, end_offsets, nprocs,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end,
fd->hints->min_fdomain_size, &fd_size,
fd->hints->striping_unit);
}
}
GPFSMPIO_T_CIO_SET_GET( r, 1, 1, GPFSMPIO_CIO_T_MYREQ, GPFSMPIO_CIO_T_FD_PART );
if ((gpfsmpio_aggmethod == 1) || (gpfsmpio_aggmethod == 2)) {
if ((gpfsmpio_read_aggmethod == 1) || (gpfsmpio_read_aggmethod == 2)) {
/* If the user has specified to use a one-sided aggregation method then do that at
* this point instead of the two-phase I/O.
*/
ADIOI_OneSidedReadAggregation(fd, offset_list, len_list, contig_access_count, buf,
datatype,error_code, st_offsets, end_offsets, fd_start, fd_end);
GPFSMPIO_T_CIO_REPORT( 1, fd, myrank, nprocs)
datatype,error_code, st_offsets, end_offsets, currentNonZeroDataIndex, fd_start, fd_end);
GPFSMPIO_T_CIO_REPORT( 0, fd, myrank, nprocs)
ADIOI_Free(offset_list);
ADIOI_Free(len_list);
ADIOI_Free(st_offsets);
ADIOI_Free(end_offsets);
ADIOI_Free(fd_start);
ADIOI_Free(fd_end);
ADIOI_Free(count_sizes);
goto fn_exit;
}
if (gpfsmpio_p2pcontig==1) {
......
......@@ -37,11 +37,13 @@ long bglocklessmpio_f_type;
int gpfsmpio_bg_nagg_pset;
int gpfsmpio_pthreadio;
int gpfsmpio_p2pcontig;
int gpfsmpio_aggmethod;
int gpfsmpio_write_aggmethod;
int gpfsmpio_read_aggmethod;
int gpfsmpio_balancecontig;
int gpfsmpio_devnullio;
int gpfsmpio_bridgeringagg;
int gpfsmpio_onesided_no_rmw;
int gpfsmpio_onesided_always_rmw;
int gpfsmpio_onesided_inform_rmw;
double gpfsmpio_prof_cw [GPFSMPIO_CIO_LAST+1];
......@@ -108,8 +110,10 @@ double gpfsmpio_prof_cr [GPFSMPIO_CIO_LAST+1];
* 3.) There are no gaps between the offsets.
* 4.) No single rank has a data size which spans multiple file domains.
*
* - GPFSMPIO_AGGMETHOD - Replaces the two-phase collective IO aggregation with a one-
* sided algorithm, significantly reducing communication and memory overhead. Fully
* - GPFSMPIO_WRITE_AGGMETHOD/GPFSMPIO_READ_AGGMETHOD - Replaces the two-phase
* collective IO aggregation
* with a one-sided algorithm, significantly reducing communication and
* memory overhead. Fully
* supports all datasets and datatypes, the only caveat is that any holes in the data
* when writing to a pre-existing file are ignored -- there is no read-modify-write
* support to maintain the correctness of regions of pre-existing data so every byte
......@@ -124,7 +128,7 @@ double gpfsmpio_prof_cr [GPFSMPIO_CIO_LAST+1];
* optimal performance for this is achieved when paired with PAMID_TYPED_ONESIDED=1.
* - Default is 0
*
* - GPFSMPIO_ONESIDED_NO_RMW - For one-sided aggregation (GPFSMPIO_AGGMETHOD = 1 or 2)
* - GPFSMPIO_ONESIDED_NO_RMW - For one-sided aggregation (GPFSMPIO_WRITE_AGGMETHOD = 1 or 2)
* disable the detection of holes in the data when writing to a pre-existing
* file requiring a read-modify-write, thereby avoiding the communication
* overhead for this detection.
......@@ -200,9 +204,13 @@ void ad_gpfs_get_env_vars() {
x = getenv( "GPFSMPIO_P2PCONTIG" );
if (x) gpfsmpio_p2pcontig = atoi(x);
gpfsmpio_aggmethod = 0;
x = getenv( "GPFSMPIO_AGGMETHOD" );
if (x) gpfsmpio_aggmethod = atoi(x);
gpfsmpio_write_aggmethod = 0;
x = getenv( "GPFSMPIO_WRITE_AGGMETHOD" );
if (x) gpfsmpio_write_aggmethod = atoi(x);
gpfsmpio_read_aggmethod = 0;
x = getenv( "GPFSMPIO_READ_AGGMETHOD" );
if (x) gpfsmpio_read_aggmethod = atoi(x);
gpfsmpio_balancecontig = 0;
x = getenv( "GPFSMPIO_BALANCECONTIG" );
......@@ -220,6 +228,12 @@ void ad_gpfs_get_env_vars() {
x = getenv( "GPFSMPIO_ONESIDED_NO_RMW" );
if (x) gpfsmpio_onesided_no_rmw = atoi(x);
gpfsmpio_onesided_always_rmw = 0;
x = getenv( "GPFSMPIO_ONESIDED_ALWAYS_RMW" );
if (x) gpfsmpio_onesided_always_rmw = atoi(x);
if (gpfsmpio_onesided_always_rmw)
gpfsmpio_onesided_no_rmw = 1;
gpfsmpio_onesided_inform_rmw = 0;
x = getenv( "GPFSMPIO_ONESIDED_INFORM_RMW" );
if (x) gpfsmpio_onesided_inform_rmw = atoi(x);
......
......@@ -66,11 +66,13 @@ extern int gpfsmpio_tuneblocking;
extern long bglocklessmpio_f_type;
extern int gpfsmpio_pthreadio;
extern int gpfsmpio_p2pcontig;
extern int gpfsmpio_aggmethod;
extern int gpfsmpio_write_aggmethod;
extern int gpfsmpio_read_aggmethod;
extern int gpfsmpio_balancecontig;
extern int gpfsmpio_devnullio;
extern int gpfsmpio_bridgeringagg;
extern int gpfsmpio_onesided_no_rmw;
extern int gpfsmpio_onesided_always_rmw;
extern int gpfsmpio_onesided_inform_rmw;
/* Default is, well, kind of complicated. Blue Gene /L and /P had "psets": one
......
......@@ -132,6 +132,7 @@ void ADIOI_GPFS_WriteStridedColl(ADIO_File fd, const void *buf, int count,
ADIO_Offset *offset_list = NULL, *st_offsets = NULL, *fd_start = NULL,
*fd_end = NULL, *end_offsets = NULL;
ADIO_Offset *gpfs_offsets0 = NULL, *gpfs_offsets = NULL;
ADIO_Offset *count_sizes;
int ii;
int *buf_idx = NULL;
......@@ -171,11 +172,41 @@ void ADIOI_GPFS_WriteStridedColl(ADIO_File fd, const void *buf, int count,
/* each process communicates its start and end offsets to other
processes. The result is an array each of start and end offsets stored
in order of process rank. */
st_offsets = (ADIO_Offset *) ADIOI_Malloc(nprocs*sizeof(ADIO_Offset));
end_offsets = (ADIO_Offset *) ADIOI_Malloc(nprocs*sizeof(ADIO_Offset));
ADIO_Offset my_count_size;
/* One-sided aggregation needs the amount of data per rank as well because
* the difference in starting and ending offsets for 1 byte is 0 the same
* as 0 bytes so it cannot be distiguished.
*/
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
count_sizes = (ADIO_Offset *) ADIOI_Malloc(nprocs*sizeof(ADIO_Offset));
MPI_Count buftype_size;
MPI_Type_size_x(datatype, &buftype_size);
my_count_size = (ADIO_Offset) count * (ADIO_Offset)buftype_size;
}
if (gpfsmpio_tunegather) {
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
gpfs_offsets0 = (ADIO_Offset *) ADIOI_Malloc(3*nprocs*sizeof(ADIO_Offset));
gpfs_offsets = (ADIO_Offset *) ADIOI_Malloc(3*nprocs*sizeof(ADIO_Offset));
for (ii=0; ii<nprocs; ii++) {
gpfs_offsets0[ii*3] = 0;
gpfs_offsets0[ii*3+1] = 0;
gpfs_offsets0[ii*3+2] = 0;
}
gpfs_offsets0[myrank*3] = start_offset;
gpfs_offsets0[myrank*3+1] = end_offset;
gpfs_offsets0[myrank*3+2] = my_count_size;
MPI_Allreduce( gpfs_offsets0, gpfs_offsets, nprocs*3, ADIO_OFFSET, MPI_MAX, fd->comm );
for (ii=0; ii<nprocs; ii++) {
st_offsets [ii] = gpfs_offsets[ii*3] ;
end_offsets[ii] = gpfs_offsets[ii*3+1];
count_sizes[ii] = gpfs_offsets[ii*3+2];
}
}
else {
gpfs_offsets0 = (ADIO_Offset *) ADIOI_Malloc(2*nprocs*sizeof(ADIO_Offset));
gpfs_offsets = (ADIO_Offset *) ADIOI_Malloc(2*nprocs*sizeof(ADIO_Offset));
for (ii=0; ii<nprocs; ii++) {
......@@ -191,6 +222,7 @@ void ADIOI_GPFS_WriteStridedColl(ADIO_File fd, const void *buf, int count,
st_offsets [ii] = gpfs_offsets[ii*2] ;
end_offsets[ii] = gpfs_offsets[ii*2+1];
}
}
ADIOI_Free( gpfs_offsets0 );
ADIOI_Free( gpfs_offsets );
} else {
......@@ -198,6 +230,10 @@ void ADIOI_GPFS_WriteStridedColl(ADIO_File fd, const void *buf, int count,
ADIO_OFFSET, fd->comm);
MPI_Allgather(&end_offset, 1, ADIO_OFFSET, end_offsets, 1,
ADIO_OFFSET, fd->comm);
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
MPI_Allgather(&count_sizes, 1, ADIO_OFFSET, count_sizes, 1,
ADIO_OFFSET, fd->comm);
}
}
GPFSMPIO_T_CIO_SET_GET(w, 1, 1, GPFSMPIO_CIO_T_PATANA, GPFSMPIO_CIO_T_GATHER )
......@@ -250,25 +286,61 @@ void ADIOI_GPFS_WriteStridedColl(ADIO_File fd, const void *buf, int count,
done by (logically) dividing the file into file domains (FDs); each
process may directly access only its own file domain. */
if (gpfsmpio_tuneblocking)
int currentValidDataIndex = 0;
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
/* Take out the 0-data offsets by shifting the indexes with data to the front
* and keeping track of the valid data index for use as the length.
*/
for (i=0; i<nprocs; i++) {
if (count_sizes[i] > 0) {
st_offsets[currentValidDataIndex] = st_offsets[i];
end_offsets[currentValidDataIndex] = end_offsets[i];
currentValidDataIndex++;
}
}
}
if (gpfsmpio_tuneblocking) {
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
ADIOI_GPFS_Calc_file_domains(fd, st_offsets, end_offsets,
currentValidDataIndex,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end, &fd_size, fd->fs_ptr);
}
else {
ADIOI_GPFS_Calc_file_domains(fd, st_offsets, end_offsets, nprocs,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end, &fd_size, fd->fs_ptr);
else
}
}
else {
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
ADIOI_Calc_file_domains(st_offsets, end_offsets, currentValidDataIndex,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end,
fd->hints->min_fdomain_size, &fd_size,
fd->hints->striping_unit);
}
else {
ADIOI_Calc_file_domains(st_offsets, end_offsets, nprocs,
nprocs_for_coll, &min_st_offset,
&fd_start, &fd_end,
fd->hints->min_fdomain_size, &fd_size,
fd->hints->striping_unit);
}
}
GPFSMPIO_T_CIO_SET_GET( w, 1, 1, GPFSMPIO_CIO_T_MYREQ, GPFSMPIO_CIO_T_FD_PART );
if ((gpfsmpio_aggmethod == 1) || (gpfsmpio_aggmethod == 2)) {
if ((gpfsmpio_write_aggmethod == 1) || (gpfsmpio_write_aggmethod == 2)) {
/* If the user has specified to use a one-sided aggregation method then do that at
* this point instead of the two-phase I/O.
*/
int holeFound = 0;
ADIOI_OneSidedWriteAggregation(fd, offset_list, len_list, contig_access_count, buf, datatype, error_code, st_offsets, end_offsets, fd_start, fd_end, &holeFound);
ADIOI_OneSidedWriteAggregation(fd, offset_list, len_list, contig_access_count,
buf, datatype, error_code, st_offsets, end_offsets,
currentValidDataIndex, fd_start, fd_end, &holeFound);
int anyHolesFound = 0;
if (!gpfsmpio_onesided_no_rmw)
MPI_Allreduce(&holeFound, &anyHolesFound, 1, MPI_INT, MPI_MAX, fd->comm);
......@@ -280,15 +352,35 @@ void ADIOI_GPFS_WriteStridedColl(ADIO_File fd, const void *buf, int count,
ADIOI_Free(end_offsets);
ADIOI_Free(fd_start);
ADIOI_Free(fd_end);
ADIOI_Free(count_sizes);
goto fn_exit;
}
else {
/* Holes are found in the data and the user has not set gpfsmpio_onesided_no_rmw ---
* fall thru and perform the two-phase aggregation and if the user has gpfsmpio_onesided_inform_rmw
* set then inform him of this condition and behavior.
/* Holes are found in the data and the user has not set
* gpfsmpio_onesided_no_rmw --- set gpfsmpio_onesided_always_rmw to 1
* and re-call ADIOI_OneSidedWriteAggregation and if the user has
* gpfsmpio_onesided_inform_rmw set then inform him of this condition
* and behavior.
*/
if (gpfsmpio_onesided_inform_rmw && (myrank ==0))
FPRINTF(stderr,"Information: Holes found during one-sided write aggregation algorithm --- additionally performing default two-phase aggregation algorithm\n");
FPRINTF(stderr,"Information: Holes found during one-sided "
"write aggregation algorithm --- re-running one-sided "
"write aggregation with GPFSMPIO_ONESIDED_ALWAYS_RMW set to 1.\n");
gpfsmpio_onesided_always_rmw = 1;
int prev_gpfsmpio_onesided_no_rmw = gpfsmpio_onesided_no_rmw;
gpfsmpio_onesided_no_rmw = 1;
ADIOI_OneSidedWriteAggregation(fd, offset_list, len_list, contig_access_count, buf, datatype, error_code, st_offsets, end_offsets, currentValidDataIndex, fd_start, fd_end, &holeFound);
gpfsmpio_onesided_no_rmw = prev_gpfsmpio_onesided_no_rmw;
GPFSMPIO_T_CIO_REPORT( 1, fd, myrank, nprocs)
ADIOI_Free(offset_list);
ADIOI_Free(len_list);
ADIOI_Free(st_offsets);
ADIOI_Free(end_offsets);
ADIOI_Free(fd_start);
ADIOI_Free(fd_end);
ADIOI_Free(count_sizes);
goto fn_exit;
}
}
if (gpfsmpio_p2pcontig==1) {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -707,6 +707,7 @@ void ADIOI_OneSidedWriteAggregation(ADIO_File fd,
int *error_code,
ADIO_Offset *st_offsets,
ADIO_Offset *end_offsets,
int numNonZeroDataOffsets,
ADIO_Offset *fd_start,
ADIO_Offset* fd_end,
int *hole_found);
......@@ -719,6 +720,7 @@ void ADIOI_OneSidedReadAggregation(ADIO_File fd,
int *error_code,
ADIO_Offset *st_offsets,
ADIO_Offset *end_offsets,
int numNonZeroDataOffsets,
ADIO_Offset *fd_start,
ADIO_Offset* fd_end);
ADIO_Offset ADIOI_GEN_SeekIndividual(ADIO_File fd, ADIO_Offset offset,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment