Commit b47d95f7 authored by Antonio J. Pena's avatar Antonio J. Pena Committed by Xin Zhao
Browse files

Fix unsafe datatype release in several RMA tests



The datatypes shouldn't be released until we make sure that there
are no more remote operations using that datatype. I've changed several
tests to release the datatype after a barrier. To avoid introducing
a barrier in every iteration, and aiming to stress out a little more,
I've restructured the tests so that the datatypes are not created and
freed every iteration.

This was causing intermittent segfaults mainly with async enabled.
Signed-off-by: default avatarXin Zhao <xinzhao3@illinois.edu>
parent c6777161
......@@ -36,6 +36,12 @@ int main(int argc, char **argv) {
int i, j, rank, nranks, peer, bufsize, errors;
double *win_buf, *src_buf, *dst_buf;
MPI_Win buf_win;
MPI_Aint idx_loc[SUB_YDIM];
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
void *base_ptr;
MPI_Aint base_int;
MTest_Init(&argc, &argv);
......@@ -58,42 +64,35 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided put operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Aint idx_loc[SUB_YDIM];
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
base_ptr = dst_buf;
void *base_ptr = dst_buf;
MPI_Aint base_int;
MPI_Get_address(base_ptr, &base_int);
MPI_Get_address(base_ptr, &base_int);
for (j = 0; j < SUB_YDIM; j++) {
for (j = 0; j < SUB_YDIM; j++) {
MPI_Get_address(&src_buf[j*XDIM], &idx_loc[j]);
idx_loc[j] = idx_loc[j] - base_int;
idx_rem[j] = j*XDIM*sizeof(double);
blk_len[j] = SUB_XDIM*sizeof(double);
}
}
MPI_Type_create_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_BYTE, &src_type);
MPI_Type_create_indexed_block(SUB_YDIM, SUB_XDIM*sizeof(double), idx_rem, MPI_BYTE, &dst_type);
MPI_Type_create_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_BYTE, &src_type);
MPI_Type_create_indexed_block(SUB_YDIM, SUB_XDIM*sizeof(double), idx_rem, MPI_BYTE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided put operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Put(base_ptr, 1, src_type, peer, 0, 1, dst_type, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -34,6 +34,10 @@ int main(int argc, char **argv) {
int i, j, rank, nranks, peer, bufsize, errors;
double *win_buf, *src_buf, *dst_buf;
MPI_Win buf_win;
MPI_Aint idx_loc[SUB_YDIM];
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -54,36 +58,30 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided put operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Aint idx_loc[SUB_YDIM];
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
for (j = 0; j < SUB_YDIM; j++) {
for (j = 0; j < SUB_YDIM; j++) {
MPI_Get_address(&src_buf[j*XDIM], &idx_loc[j]);
idx_rem[j] = j*XDIM*sizeof(double);
blk_len[j] = SUB_XDIM*sizeof(double);
}
}
MPI_Type_create_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_BYTE, &src_type);
MPI_Type_create_indexed_block(SUB_YDIM, SUB_XDIM*sizeof(double), idx_rem, MPI_BYTE, &dst_type);
MPI_Type_create_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_BYTE, &src_type);
MPI_Type_create_indexed_block(SUB_YDIM, SUB_XDIM*sizeof(double), idx_rem, MPI_BYTE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided put operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Put(MPI_BOTTOM, 1, src_type, peer, 0, 1, dst_type, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -32,6 +32,10 @@ int main(int argc, char **argv) {
int itr, i, j, rank, nranks, peer, bufsize, errors;
double *win_buf, *src_buf;
MPI_Win buf_win;
MPI_Aint idx_loc[SUB_YDIM];
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -51,30 +55,24 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided accumulate operations */
for (itr = 0; itr < ITERATIONS; itr++) {
MPI_Aint idx_loc[SUB_YDIM];
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
for (i = 0; i < SUB_YDIM; i++) {
for (i = 0; i < SUB_YDIM; i++) {
MPI_Get_address(&src_buf[i*XDIM], &idx_loc[i]);
idx_rem[i] = i*XDIM;
blk_len[i] = SUB_XDIM;
}
}
#ifdef ABSOLUTE
MPI_Type_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_DOUBLE, &src_type);
MPI_Type_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_DOUBLE, &src_type);
#else
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
#endif
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided accumulate operations */
for (itr = 0; itr < ITERATIONS; itr++) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
#ifdef ABSOLUTE
......@@ -84,13 +82,13 @@ int main(int argc, char **argv) {
#endif
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -32,6 +32,14 @@ int main(int argc, char **argv) {
int i, j, rank, nranks, peer, bufsize, errors;
double *win_buf, *src_buf;
MPI_Win buf_win;
int ndims = 2;
int src_arr_sizes[2] = { XDIM, YDIM };
int src_arr_subsizes[2] = { SUB_XDIM, SUB_YDIM };
int src_arr_starts[2] = { 0, 0 };
int dst_arr_sizes[2] = { XDIM, YDIM };
int dst_arr_subsizes[2] = { SUB_XDIM, SUB_YDIM };
int dst_arr_starts[2] = { 0, 0 };
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -51,39 +59,27 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
int ndims = 2;
int src_arr_sizes[2] = { XDIM, YDIM };
int src_arr_subsizes[2] = { SUB_XDIM, SUB_YDIM };
int src_arr_starts[2] = { 0, 0 };
int dst_arr_sizes[2] = { XDIM, YDIM };
int dst_arr_subsizes[2] = { SUB_XDIM, SUB_YDIM };
int dst_arr_starts[2] = { 0, 0 };
MPI_Datatype src_type, dst_type;
MPI_Type_create_subarray(ndims, src_arr_sizes, src_arr_subsizes, src_arr_starts,
MPI_Type_create_subarray(ndims, src_arr_sizes, src_arr_subsizes, src_arr_starts,
MPI_ORDER_C, MPI_DOUBLE, &src_type);
MPI_Type_create_subarray(ndims, dst_arr_sizes, dst_arr_subsizes, dst_arr_starts,
MPI_Type_create_subarray(ndims, dst_arr_sizes, dst_arr_subsizes, dst_arr_starts,
MPI_ORDER_C, MPI_DOUBLE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Accumulate(src_buf, 1, src_type, peer, 0, 1, dst_type, MPI_SUM, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -78,11 +78,11 @@ int main(int argc, char **argv) {
MPI_Win_unlock(peer, buf_win);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&loc_type);
MPI_Type_free(&rem_type);
MPI_Barrier(MPI_COMM_WORLD);
/* Verify that the results are correct */
errors = 0;
......
......@@ -32,6 +32,9 @@ int main(int argc, char **argv) {
int i, j, rank, nranks, peer, bufsize, errors;
double *win_buf, *src_buf, *dst_buf;
MPI_Win buf_win;
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -52,24 +55,19 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
for (j = 0; j < SUB_YDIM; j++) {
for (j = 0; j < SUB_YDIM; j++) {
idx_rem[j] = j*XDIM;
blk_len[j] = SUB_XDIM;
}
}
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
/* PUT */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
......@@ -81,13 +79,13 @@ int main(int argc, char **argv) {
MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
1, dst_type, MPI_NO_OP, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -34,6 +34,9 @@ int main(int argc, char **argv) {
double *win_buf, *src_buf, *dst_buf;
MPI_Win buf_win;
MPI_Comm shr_comm;
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -62,24 +65,19 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
for (j = 0; j < SUB_YDIM; j++) {
for (j = 0; j < SUB_YDIM; j++) {
idx_rem[j] = j*XDIM;
blk_len[j] = SUB_XDIM;
}
}
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
/* PUT */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
......@@ -91,13 +89,13 @@ int main(int argc, char **argv) {
MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
1, dst_type, MPI_NO_OP, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -32,6 +32,9 @@ int main(int argc, char **argv) {
int i, j, rank, nranks, peer, bufsize, errors;
double *win_buf, *src_buf, *dst_buf;
MPI_Win buf_win;
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -52,24 +55,19 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
for (j = 0; j < SUB_YDIM; j++) {
for (j = 0; j < SUB_YDIM; j++) {
idx_rem[j] = j*XDIM;
blk_len[j] = SUB_XDIM;
}
}
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Put(src_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
MPI_Win_unlock(peer, buf_win);
......@@ -77,13 +75,13 @@ int main(int argc, char **argv) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Get(dst_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
......@@ -34,6 +34,9 @@ int main(int argc, char **argv) {
double *win_buf, *src_buf, *dst_buf;
MPI_Win buf_win;
MPI_Comm shr_comm;
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
MTest_Init(&argc, &argv);
......@@ -62,24 +65,19 @@ int main(int argc, char **argv) {
peer = (rank+1) % nranks;
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
int idx_rem[SUB_YDIM];
int blk_len[SUB_YDIM];
MPI_Datatype src_type, dst_type;
for (j = 0; j < SUB_YDIM; j++) {
for (j = 0; j < SUB_YDIM; j++) {
idx_rem[j] = j*XDIM;
blk_len[j] = SUB_XDIM;
}
}
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
MPI_Type_commit(&src_type);
MPI_Type_commit(&dst_type);
/* Perform ITERATIONS strided accumulate operations */
for (i = 0; i < ITERATIONS; i++) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Put(src_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
MPI_Win_unlock(peer, buf_win);
......@@ -87,13 +85,13 @@ int main(int argc, char **argv) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
MPI_Get(dst_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
MPI_Win_unlock(peer, buf_win);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
}
MPI_Barrier(shr_comm);
MPI_Type_free(&src_type);
MPI_Type_free(&dst_type);
/* Verify that the results are correct */
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment