Commit ea1faad0 authored by Darius Buntinas's avatar Darius Buntinas
Browse files

[svn-r7012] replaced NMPI_[i]Send, [i]Recv, Wait[all] and Test with...

[svn-r7012] replaced NMPI_[i]Send, [i]Recv, Wait[all] and Test with MPIR__impl, or directly called MPID_ functions.  Reviewed by jayesh@
parent 55624189
...@@ -3180,7 +3180,8 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr); ...@@ -3180,7 +3180,8 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr);
/* TODO convert all cut-over constants above to parameters */ /* TODO convert all cut-over constants above to parameters */
#include "mpich_param_vals.h" #include "mpich_param_vals.h"
/* Tags for point to point operations which implement collective operations */ /* Tags for point to point operations which implement collective and other
internal operations */
#define MPIR_BARRIER_TAG 1 #define MPIR_BARRIER_TAG 1
#define MPIR_BCAST_TAG 2 #define MPIR_BCAST_TAG 2
#define MPIR_GATHER_TAG 3 #define MPIR_GATHER_TAG 3
...@@ -3210,10 +3211,10 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr); ...@@ -3210,10 +3211,10 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr);
#define MPIR_TOPO_B_TAG 27 #define MPIR_TOPO_B_TAG 27
#define MPIR_REDUCE_SCATTER_BLOCK_TAG 28 #define MPIR_REDUCE_SCATTER_BLOCK_TAG 28
/* These functions are used in the implementation of collective /* These functions are used in the implementation of collective and
operations. They are wrappers around MPID send/recv functions. They do other internal operations. They are wrappers around MPID send/recv
sends/receives by setting the context offset to functions. They do sends/receives by setting the context offset to
MPID_CONTEXT_INTRA_COLL. */ MPID_CONTEXT_INTRA(INTER)_COLL. */
int MPIC_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, int MPIC_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPI_Comm comm); MPI_Comm comm);
int MPIC_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, int MPIC_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
...@@ -3235,7 +3236,7 @@ int MPIC_Irecv(void *buf, int count, MPI_Datatype datatype, int ...@@ -3235,7 +3236,7 @@ int MPIC_Irecv(void *buf, int count, MPI_Datatype datatype, int
int MPIC_Isend(void *buf, int count, MPI_Datatype datatype, int dest, int tag, int MPIC_Isend(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPI_Comm comm, MPI_Request *request); MPI_Comm comm, MPI_Request *request);
int MPIC_Wait(MPID_Request * request_ptr); int MPIC_Wait(MPID_Request * request_ptr);
int MPIC_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status);
void MPIR_MAXF ( void *, void *, int *, MPI_Datatype * ) ; void MPIR_MAXF ( void *, void *, int *, MPI_Datatype * ) ;
void MPIR_MINF ( void *, void *, int *, MPI_Datatype * ) ; void MPIR_MINF ( void *, void *, int *, MPI_Datatype * ) ;
...@@ -3480,6 +3481,7 @@ int MPIU_Get_intranode_rank(MPID_Comm *comm_ptr, int r); ...@@ -3480,6 +3481,7 @@ int MPIU_Get_intranode_rank(MPID_Comm *comm_ptr, int r);
#define MPIR_Comm_size(comm_ptr) ((comm_ptr)->local_size) #define MPIR_Comm_size(comm_ptr) ((comm_ptr)->local_size)
#define MPIR_Type_extent_impl(datatype, extent_ptr) MPID_Datatype_get_extent_macro(datatype, *(extent_ptr)) #define MPIR_Type_extent_impl(datatype, extent_ptr) MPID_Datatype_get_extent_macro(datatype, *(extent_ptr))
#define MPIR_Type_size_impl(datatype, size) MPID_Datatype_get_size_macro(datatype, *(size)) #define MPIR_Type_size_impl(datatype, size) MPID_Datatype_get_size_macro(datatype, *(size))
#define MPIR_Test_cancelled_impl(status, flag) *(flag) = (status)->cancelled
/* MPIR_ functions. These are versions of MPI_ functions appropriate for calling within MPI */ /* MPIR_ functions. These are versions of MPI_ functions appropriate for calling within MPI */
int MPIR_Cancel_impl(MPID_Request *request_ptr); int MPIR_Cancel_impl(MPID_Request *request_ptr);
...@@ -3548,6 +3550,13 @@ int MPIR_Type_indexed_impl(int count, int blocklens[], int indices[], MPI_Dataty ...@@ -3548,6 +3550,13 @@ int MPIR_Type_indexed_impl(int count, int blocklens[], int indices[], MPI_Dataty
void MPIR_Type_free_impl(MPI_Datatype *datatype); void MPIR_Type_free_impl(MPI_Datatype *datatype);
int MPIR_Type_vector_impl(int count, int blocklength, int stride, MPI_Datatype old_type, MPI_Datatype *newtype_p); int MPIR_Type_vector_impl(int count, int blocklength, int stride, MPI_Datatype old_type, MPI_Datatype *newtype_p);
int MPIR_Type_struct_impl(int count, int blocklens[], MPI_Aint indices[], MPI_Datatype old_types[], MPI_Datatype *newtype); int MPIR_Type_struct_impl(int count, int blocklens[], MPI_Aint indices[], MPI_Datatype old_types[], MPI_Datatype *newtype);
void MPIR_Type_lb_impl(MPI_Datatype datatype, MPI_Aint *displacement);
int MPIR_Ibsend_impl(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPID_Comm *comm_ptr, MPI_Request *request);
int MPIR_Test_impl(MPI_Request *request, int *flag, MPI_Status *status);
int MPIR_Wait_impl(MPI_Request *request, MPI_Status *status);
int MPIR_Waitall_impl(int count, MPI_Request array_of_requests[],
MPI_Status array_of_statuses[]);
#endif /* MPIIMPL_INCLUDED */ #endif /* MPIIMPL_INCLUDED */
...@@ -26,16 +26,10 @@ ...@@ -26,16 +26,10 @@
#define NMPI_Pack MPI_Pack #define NMPI_Pack MPI_Pack
#define NMPI_Pack_size MPI_Pack_size #define NMPI_Pack_size MPI_Pack_size
#define NMPI_Unpack MPI_Unpack #define NMPI_Unpack MPI_Unpack
#define NMPI_Wait MPI_Wait
#define NMPI_Test MPI_Test
#define NMPI_Type_get_attr MPI_Type_get_attr #define NMPI_Type_get_attr MPI_Type_get_attr
#define NMPI_Type_set_attr MPI_Type_set_attr #define NMPI_Type_set_attr MPI_Type_set_attr
#define NMPI_Isend MPI_Isend
#define NMPI_Irecv MPI_Irecv #define NMPI_Irecv MPI_Irecv
#define NMPI_Recv MPI_Recv #define NMPI_Recv MPI_Recv
#define NMPI_Send MPI_Send
#define NMPI_Waitall MPI_Waitall
#define NMPI_Sendrecv MPI_Sendrecv
#define NMPI_Type_lb MPI_Type_lb #define NMPI_Type_lb MPI_Type_lb
#define NMPI_Iprobe MPI_Iprobe #define NMPI_Iprobe MPI_Iprobe
#define NMPI_Probe MPI_Probe #define NMPI_Probe MPI_Probe
...@@ -44,8 +38,6 @@ ...@@ -44,8 +38,6 @@
#define NMPI_Info_create MPI_Info_create #define NMPI_Info_create MPI_Info_create
#define NMPI_Info_set MPI_Info_set #define NMPI_Info_set MPI_Info_set
#define NMPI_Comm_call_errhandler MPI_Comm_call_errhandler #define NMPI_Comm_call_errhandler MPI_Comm_call_errhandler
#define NMPI_Test_cancelled MPI_Test_cancelled
#define NMPI_Ibsend MPI_Ibsend
#define NMPI_Buffer_detach MPI_Buffer_detach #define NMPI_Buffer_detach MPI_Buffer_detach
#define NMPI_Type_hindexed MPI_Type_hindexed #define NMPI_Type_hindexed MPI_Type_hindexed
#define NMPIX_Grequest_class_create MPIX_Grequest_class_create #define NMPIX_Grequest_class_create MPIX_Grequest_class_create
...@@ -55,16 +47,10 @@ ...@@ -55,16 +47,10 @@
#define NMPI_Pack PMPI_Pack #define NMPI_Pack PMPI_Pack
#define NMPI_Pack_size PMPI_Pack_size #define NMPI_Pack_size PMPI_Pack_size
#define NMPI_Unpack PMPI_Unpack #define NMPI_Unpack PMPI_Unpack
#define NMPI_Wait PMPI_Wait
#define NMPI_Test PMPI_Test
#define NMPI_Type_get_attr PMPI_Type_get_attr #define NMPI_Type_get_attr PMPI_Type_get_attr
#define NMPI_Type_set_attr PMPI_Type_set_attr #define NMPI_Type_set_attr PMPI_Type_set_attr
#define NMPI_Isend PMPI_Isend
#define NMPI_Irecv PMPI_Irecv #define NMPI_Irecv PMPI_Irecv
#define NMPI_Recv PMPI_Recv #define NMPI_Recv PMPI_Recv
#define NMPI_Send PMPI_Send
#define NMPI_Waitall PMPI_Waitall
#define NMPI_Sendrecv PMPI_Sendrecv
#define NMPI_Type_lb PMPI_Type_lb #define NMPI_Type_lb PMPI_Type_lb
#define NMPI_Iprobe PMPI_Iprobe #define NMPI_Iprobe PMPI_Iprobe
#define NMPI_Probe PMPI_Probe #define NMPI_Probe PMPI_Probe
...@@ -73,8 +59,6 @@ ...@@ -73,8 +59,6 @@
#define NMPI_Info_create PMPI_Info_create #define NMPI_Info_create PMPI_Info_create
#define NMPI_Info_set PMPI_Info_set #define NMPI_Info_set PMPI_Info_set
#define NMPI_Comm_call_errhandler PMPI_Comm_call_errhandler #define NMPI_Comm_call_errhandler PMPI_Comm_call_errhandler
#define NMPI_Test_cancelled PMPI_Test_cancelled
#define NMPI_Ibsend PMPI_Ibsend
#define NMPI_Buffer_detach PMPI_Buffer_detach #define NMPI_Buffer_detach PMPI_Buffer_detach
#define NMPI_Type_hindexed PMPI_Type_hindexed #define NMPI_Type_hindexed PMPI_Type_hindexed
#define NMPIX_Grequest_class_create PMPIX_Grequest_class_create #define NMPIX_Grequest_class_create PMPIX_Grequest_class_create
......
...@@ -449,7 +449,7 @@ int MPIR_Alltoall_intra( ...@@ -449,7 +449,7 @@ int MPIR_Alltoall_intra(
} }
/* ... then wait for them to finish: */ /* ... then wait for them to finish: */
mpi_errno = NMPI_Waitall(2*ss,reqarray,starray); mpi_errno = MPIR_Waitall_impl(2*ss,reqarray,starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno); if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */ /* --BEGIN ERROR HANDLING-- */
......
...@@ -175,7 +175,7 @@ int MPIR_Alltoallv_intra ( ...@@ -175,7 +175,7 @@ int MPIR_Alltoallv_intra (
} }
} }
mpi_errno = NMPI_Waitall(req_cnt, reqarray, starray); mpi_errno = MPIR_Waitall_impl(req_cnt, reqarray, starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno); if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */ /* --BEGIN ERROR HANDLING-- */
......
...@@ -164,7 +164,7 @@ int MPIR_Alltoallw_intra ( ...@@ -164,7 +164,7 @@ int MPIR_Alltoallw_intra (
} }
} }
mpi_errno = NMPI_Waitall(outstanding_requests, reqarray, starray); mpi_errno = MPIR_Waitall_impl(outstanding_requests, reqarray, starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno); if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */ /* --BEGIN ERROR HANDLING-- */
......
...@@ -69,7 +69,6 @@ int MPIR_Gatherv ( ...@@ -69,7 +69,6 @@ int MPIR_Gatherv (
MPI_Request *reqarray; MPI_Request *reqarray;
MPI_Status *starray; MPI_Status *starray;
MPIU_CHKLMEM_DECL(2); MPIU_CHKLMEM_DECL(2);
MPIU_THREADPRIV_DECL;
comm = comm_ptr->handle; comm = comm_ptr->handle;
rank = comm_ptr->rank; rank = comm_ptr->rank;
...@@ -114,10 +113,7 @@ int MPIR_Gatherv ( ...@@ -114,10 +113,7 @@ int MPIR_Gatherv (
} }
} }
/* ... then wait for *all* of them to finish: */ /* ... then wait for *all* of them to finish: */
MPIU_THREADPRIV_GET; mpi_errno = MPIR_Waitall_impl(reqs, reqarray, starray);
MPIR_Nest_incr();
mpi_errno = NMPI_Waitall(reqs, reqarray, starray);
MPIR_Nest_decr();
if (mpi_errno&& mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno); if (mpi_errno&& mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */ /* --BEGIN ERROR HANDLING-- */
......
...@@ -15,6 +15,31 @@ ...@@ -15,6 +15,31 @@
sends/receives by setting the context offset to sends/receives by setting the context offset to
MPID_CONTEXT_INTRA_COLL or MPID_CONTEXT_INTER_COLL. */ MPID_CONTEXT_INTRA_COLL or MPID_CONTEXT_INTER_COLL. */
#undef FUNCNAME
#define FUNCNAME MPIC_Probe
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
int MPIC_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status)
{
int mpi_errno = MPI_SUCCESS;
int context_id;
MPID_Comm *comm_ptr;
MPID_Comm_get_ptr( comm, comm_ptr );
context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
mpi_errno = MPID_Probe(source, tag, comm_ptr, context_id, status);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
#undef FUNCNAME #undef FUNCNAME
#define FUNCNAME MPIC_Send #define FUNCNAME MPIC_Send
#undef FCNAME #undef FCNAME
......
...@@ -65,7 +65,6 @@ int MPIR_Scatterv ( ...@@ -65,7 +65,6 @@ int MPIR_Scatterv (
int i, reqs; int i, reqs;
MPI_Request *reqarray; MPI_Request *reqarray;
MPI_Status *starray; MPI_Status *starray;
MPIU_THREADPRIV_DECL;
MPIU_CHKLMEM_DECL(2); MPIU_CHKLMEM_DECL(2);
comm = comm_ptr->handle; comm = comm_ptr->handle;
...@@ -114,10 +113,7 @@ int MPIR_Scatterv ( ...@@ -114,10 +113,7 @@ int MPIR_Scatterv (
} }
} }
/* ... then wait for *all* of them to finish: */ /* ... then wait for *all* of them to finish: */
MPIU_THREADPRIV_GET; mpi_errno = MPIR_Waitall_impl(reqs, reqarray, starray);
MPIR_Nest_incr();
mpi_errno = NMPI_Waitall(reqs, reqarray, starray);
MPIR_Nest_decr();
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno); if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */ /* --BEGIN ERROR HANDLING-- */
if (mpi_errno == MPI_ERR_IN_STATUS) { if (mpi_errno == MPI_ERR_IN_STATUS) {
......
...@@ -390,8 +390,8 @@ PMPI_LOCAL int MPIR_Comm_create_inter(MPID_Comm *comm_ptr, MPID_Group *group_ptr ...@@ -390,8 +390,8 @@ PMPI_LOCAL int MPIR_Comm_create_inter(MPID_Comm *comm_ptr, MPID_Group *group_ptr
so that the remote process can construct the appropriate VCRT so that the remote process can construct the appropriate VCRT
First we exchange group sizes and context ids. Then the First we exchange group sizes and context ids. Then the
ranks in the remote group, from which the remote VCRT can ranks in the remote group, from which the remote VCRT can
be constructed. We can't use NMPI_Sendrecv since we need to be constructed. We need to use the "collective" context in the
use the "collective" context in the original intercommunicator */ original intercommunicator */
if (comm_ptr->rank == 0) { if (comm_ptr->rank == 0) {
int info[2]; int info[2];
info[0] = new_context_id; info[0] = new_context_id;
......
...@@ -239,7 +239,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -239,7 +239,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
int i; int i;
MPID_Comm *newcomm_ptr; MPID_Comm *newcomm_ptr;
MPIU_CHKLMEM_DECL(4); MPIU_CHKLMEM_DECL(4);
MPIU_THREADPRIV_DECL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_INTERCOMM_CREATE); MPID_MPI_STATE_DECL(MPID_STATE_MPI_INTERCOMM_CREATE);
MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIR_ERRTEST_INITIALIZED_ORDIE();
...@@ -247,8 +246,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -247,8 +246,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPIU_THREAD_CS_ENTER(ALLFUNC,); MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_INTERCOMM_CREATE); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_INTERCOMM_CREATE);
MPIU_THREADPRIV_GET;
/* Validate parameters, especially handles needing to be converted */ /* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING # ifdef HAVE_ERROR_CHECKING
{ {
...@@ -349,7 +346,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -349,7 +346,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
} }
# endif /* HAVE_ERROR_CHECKING */ # endif /* HAVE_ERROR_CHECKING */
MPIR_Nest_incr();
/* First, exchange the group information. If we were certain /* First, exchange the group information. If we were certain
that the groups were disjoint, we could exchange possible that the groups were disjoint, we could exchange possible
context ids at the same time, saving one communication. context ids at the same time, saving one communication.
...@@ -362,11 +358,12 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -362,11 +358,12 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPIU_DBG_MSG_FMT(COMM,VERBOSE, MPIU_DBG_MSG_FMT(COMM,VERBOSE,
(MPIU_DBG_FDEST,"rank %d sendrecv to rank %d", (MPIU_DBG_FDEST,"rank %d sendrecv to rank %d",
peer_comm_ptr->rank, remote_leader)); peer_comm_ptr->rank, remote_leader));
mpi_errno = NMPI_Sendrecv( &local_size, 1, MPI_INT, mpi_errno = MPIC_Sendrecv( &local_size, 1, MPI_INT,
remote_leader, tag, remote_leader, tag,
&remote_size, 1, MPI_INT, &remote_size, 1, MPI_INT,
remote_leader, tag, remote_leader, tag,
peer_comm, MPI_STATUS_IGNORE ); peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_DBG_MSG_FMT(COMM,VERBOSE, MPIU_DBG_MSG_FMT(COMM,VERBOSE,
(MPIU_DBG_FDEST, "local size = %d, remote size = %d", local_size, (MPIU_DBG_FDEST, "local size = %d, remote size = %d", local_size,
...@@ -384,30 +381,23 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -384,30 +381,23 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
mpi_errno = MPID_GPID_GetAllInComm( comm_ptr, local_size, local_gpids, mpi_errno = MPID_GPID_GetAllInComm( comm_ptr, local_size, local_gpids,
&singlePG ); &singlePG );
if (mpi_errno) { if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIR_Nest_decr();
goto fn_fail;
}
/* Exchange the lpid arrays */
NMPI_Sendrecv( local_gpids, 2*local_size, MPI_INT,
remote_leader, tag,
remote_gpids, 2*remote_size, MPI_INT,
remote_leader, tag, peer_comm, MPI_STATUS_IGNORE );
/* Exchange the lpid arrays */
mpi_errno = MPIC_Sendrecv( local_gpids, 2*local_size, MPI_INT,
remote_leader, tag,
remote_gpids, 2*remote_size, MPI_INT,
remote_leader, tag, peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* Convert the remote gpids to the lpids */ /* Convert the remote gpids to the lpids */
mpi_errno = MPID_GPID_ToLpidArray( remote_size, mpi_errno = MPID_GPID_ToLpidArray( remote_size,
remote_gpids, remote_lpids ); remote_gpids, remote_lpids );
if (mpi_errno) { if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIR_Nest_decr();
goto fn_fail;
}
/* Get our own lpids */ /* Get our own lpids */
mpi_errno = MPID_LPID_GetAllInComm( comm_ptr, local_size, local_lpids ); mpi_errno = MPID_LPID_GetAllInComm( comm_ptr, local_size, local_lpids );
if (mpi_errno) { if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIR_Nest_decr();
goto fn_fail;
}
# ifdef HAVE_ERROR_CHECKING # ifdef HAVE_ERROR_CHECKING
{ {
...@@ -417,11 +407,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -417,11 +407,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
check for any overlap */ check for any overlap */
mpi_errno = MPIR_CheckDisjointLpids( local_lpids, local_size, mpi_errno = MPIR_CheckDisjointLpids( local_lpids, local_size,
remote_lpids, remote_size ); remote_lpids, remote_size );
if (mpi_errno) if (mpi_errno) MPIU_ERR_POP(mpi_errno);
{
MPIR_Nest_decr();
goto fn_fail;
}
} }
MPID_END_ERROR_CHECKS; MPID_END_ERROR_CHECKS;
} }
...@@ -435,7 +421,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -435,7 +421,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
/* At this point, we're done with the local lpids; they'll /* At this point, we're done with the local lpids; they'll
be freed with the other local memory on exit */ be freed with the other local memory on exit */
MPIR_Nest_decr();
} /* End of the first phase of the leader communication */ } /* End of the first phase of the leader communication */
/* /*
...@@ -459,10 +444,11 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -459,10 +444,11 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if (comm_ptr->rank == local_leader) { if (comm_ptr->rank == local_leader) {
MPIR_Context_id_t remote_context_id; MPIR_Context_id_t remote_context_id;
NMPI_Sendrecv( &recvcontext_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag, mpi_errno = MPIC_Sendrecv( &recvcontext_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag,
&remote_context_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag, &remote_context_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag,
peer_comm, MPI_STATUS_IGNORE ); peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
final_context_id = remote_context_id; final_context_id = remote_context_id;
/* Now, send all of our local processes the remote_lpids, /* Now, send all of our local processes the remote_lpids,
...@@ -524,7 +510,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, ...@@ -524,7 +510,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if (comm_ptr->rank != local_leader) { if (comm_ptr->rank != local_leader) {
mpi_errno = MPID_GPID_ToLpidArray( remote_size, remote_gpids, mpi_errno = MPID_GPID_ToLpidArray( remote_size, remote_gpids,
remote_lpids ); remote_lpids );
if (mpi_errno) { MPIR_Nest_decr(); goto fn_fail; } if (mpi_errno) MPIU_ERR_POP(mpi_errno);
} }
......
...@@ -23,13 +23,27 @@ ...@@ -23,13 +23,27 @@
#undef MPI_Type_lb #undef MPI_Type_lb
#define MPI_Type_lb PMPI_Type_lb #define MPI_Type_lb PMPI_Type_lb
#undef FUNCNAME
#define FUNCNAME MPIR_Type_lb_impl
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
void MPIR_Type_lb_impl(MPI_Datatype datatype, MPI_Aint *displacement)
{
if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN) {
*displacement = 0;
} else {
MPID_Datatype *datatype_ptr = NULL;
MPID_Datatype_get_ptr(datatype, datatype_ptr);
*displacement = datatype_ptr->lb;
}
}
#endif #endif
#undef FUNCNAME #undef FUNCNAME
#define FUNCNAME MPI_Type_lb #define FUNCNAME MPI_Type_lb
#undef FCNAME #undef FCNAME
#define FCNAME "MPI_Type_lb" #define FCNAME "MPI_Type_lb"
/*@ /*@
MPI_Type_lb - Returns the lower-bound of a datatype MPI_Type_lb - Returns the lower-bound of a datatype
...@@ -92,15 +106,8 @@ int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint *displacement) ...@@ -92,15 +106,8 @@ int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint *displacement)
/* ... body of routine ... */ /* ... body of routine ... */
if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN) MPIR_Type_lb_impl(datatype, displacement);
{
*displacement = 0;
}
else
{
*displacement = datatype_ptr->lb;
}
/* ... end of body of routine ... */ /* ... end of body of routine ... */
#ifdef HAVE_ERROR_CHECKING #ifdef HAVE_ERROR_CHECKING
......
...@@ -10,12 +10,14 @@ ...@@ -10,12 +10,14 @@
#ifndef MPICH_MPI_FROM_PMPI #ifndef MPICH_MPI_FROM_PMPI
static MPI_Comm progress_comm; static MPID_Comm *progress_comm_ptr;
static MPIU_Thread_id_t progress_thread_id; static MPIU_Thread_id_t progress_thread_id;
static MPIU_Thread_mutex_t progress_mutex; static MPIU_Thread_mutex_t progress_mutex;
static MPIU_Thread_cond_t progress_cond; static MPIU_Thread_cond_t progress_cond;
static volatile int progress_thread_done = 0; static volatile int progress_thread_done = 0;
#define WAKE_TAG 100
#undef FUNCNAME #undef FUNCNAME
#define FUNCNAME progress_fn #define FUNCNAME progress_fn
#undef FCNAME #undef FCNAME
...@@ -24,7 +26,9 @@ static void progress_fn(void * data) ...@@ -24,7 +26,9 @@ static void progress_fn(void * data)
{ {
#if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED #if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED
int mpi_errno = MPI_SUCCESS; int mpi_errno = MPI_SUCCESS;
MPIU_THREADPRIV_DECL; MPID_Request *request_ptr = NULL;
MPI_Request request;
MPI_Status status;
/* Explicitly add CS_ENTER/EXIT since this thread is created from /* Explicitly add CS_ENTER/EXIT since this thread is created from
* within an internal function and will call NMPI functions * within an internal function and will call NMPI functions
...@@ -40,12 +44,12 @@ static void progress_fn(void * data) ...@@ -40,12 +44,12 @@ static void progress_fn(void * data)
* appropriate, either change what we do in this thread, or delete * appropriate, either change what we do in this thread, or delete
* this comment. */ * this comment. */
MPIU_THREADPRIV_GET; mpi_errno = MPID_Irecv(NULL, 0, MPI_CHAR, 0, WAKE_TAG, progress_comm_ptr,
MPID_CONTEXT_INTRA_PT2PT, &request_ptr);
MPIR_Nest_incr(); MPIU_Assert(!mpi_errno);
mpi_errno = NMPI_Recv(NULL, 0, MPI_CHAR, 0, 0, progress_comm, MPI_STATUS_IGNORE); request = request_ptr->handle;
mpi_errno = MPIR_Wait_impl(&request, &status);
MPIU_Assert(!mpi_errno); MPIU_Assert(!mpi_errno);
MPIR_Nest_decr();
/* Send a signal to the main thread saying we are done */ /* Send a signal to the main thread saying we are done */
MPIU_Thread_mutex_lock(&progress_mutex, &mpi_errno); MPIU_Thread_mutex_lock(&progress_mutex, &mpi_errno);
...@@ -73,7 +77,7 @@ int MPIR_Init_async_thread(void) ...@@ -73,7 +77,7 @@ int MPIR_Init_async_thread(void)
{ {
#if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED #if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED
int mpi_errno = MPI_SUCCESS; int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_self_ptr, *progress_comm_ptr; MPID_Comm *comm_self_ptr;
int err = 0; int err = 0;
MPID_MPI_STATE_DECL(MPID_STATE_MPIR_INIT_ASYNC_THREAD); MPID_MPI_STATE_DECL(MPID_STATE_MPIR_INIT_ASYNC_THREAD);
...@@ -84,7 +88,6 @@ int MPIR_Init_async_thread(void) ...@@ -84,7 +88,6 @@ int MPIR_Init_async_thread(void)