Commit ea1faad0 authored by Darius Buntinas's avatar Darius Buntinas
Browse files

[svn-r7012] replaced NMPI_[i]Send, [i]Recv, Wait[all] and Test with...

[svn-r7012] replaced NMPI_[i]Send, [i]Recv, Wait[all] and Test with MPIR__impl, or directly called MPID_ functions.  Reviewed by jayesh@
parent 55624189
......@@ -3180,7 +3180,8 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr);
/* TODO convert all cut-over constants above to parameters */
#include "mpich_param_vals.h"
/* Tags for point to point operations which implement collective operations */
/* Tags for point to point operations which implement collective and other
internal operations */
#define MPIR_BARRIER_TAG 1
#define MPIR_BCAST_TAG 2
#define MPIR_GATHER_TAG 3
......@@ -3210,10 +3211,10 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr);
#define MPIR_TOPO_B_TAG 27
#define MPIR_REDUCE_SCATTER_BLOCK_TAG 28
/* These functions are used in the implementation of collective
operations. They are wrappers around MPID send/recv functions. They do
sends/receives by setting the context offset to
MPID_CONTEXT_INTRA_COLL. */
/* These functions are used in the implementation of collective and
other internal operations. They are wrappers around MPID send/recv
functions. They do sends/receives by setting the context offset to
MPID_CONTEXT_INTRA(INTER)_COLL. */
int MPIC_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPI_Comm comm);
int MPIC_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
......@@ -3235,7 +3236,7 @@ int MPIC_Irecv(void *buf, int count, MPI_Datatype datatype, int
int MPIC_Isend(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPI_Comm comm, MPI_Request *request);
int MPIC_Wait(MPID_Request * request_ptr);
int MPIC_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status);
void MPIR_MAXF ( void *, void *, int *, MPI_Datatype * ) ;
void MPIR_MINF ( void *, void *, int *, MPI_Datatype * ) ;
......@@ -3480,6 +3481,7 @@ int MPIU_Get_intranode_rank(MPID_Comm *comm_ptr, int r);
#define MPIR_Comm_size(comm_ptr) ((comm_ptr)->local_size)
#define MPIR_Type_extent_impl(datatype, extent_ptr) MPID_Datatype_get_extent_macro(datatype, *(extent_ptr))
#define MPIR_Type_size_impl(datatype, size) MPID_Datatype_get_size_macro(datatype, *(size))
#define MPIR_Test_cancelled_impl(status, flag) *(flag) = (status)->cancelled
/* MPIR_ functions. These are versions of MPI_ functions appropriate for calling within MPI */
int MPIR_Cancel_impl(MPID_Request *request_ptr);
......@@ -3548,6 +3550,13 @@ int MPIR_Type_indexed_impl(int count, int blocklens[], int indices[], MPI_Dataty
void MPIR_Type_free_impl(MPI_Datatype *datatype);
int MPIR_Type_vector_impl(int count, int blocklength, int stride, MPI_Datatype old_type, MPI_Datatype *newtype_p);
int MPIR_Type_struct_impl(int count, int blocklens[], MPI_Aint indices[], MPI_Datatype old_types[], MPI_Datatype *newtype);
void MPIR_Type_lb_impl(MPI_Datatype datatype, MPI_Aint *displacement);
int MPIR_Ibsend_impl(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPID_Comm *comm_ptr, MPI_Request *request);
int MPIR_Test_impl(MPI_Request *request, int *flag, MPI_Status *status);
int MPIR_Wait_impl(MPI_Request *request, MPI_Status *status);
int MPIR_Waitall_impl(int count, MPI_Request array_of_requests[],
MPI_Status array_of_statuses[]);
#endif /* MPIIMPL_INCLUDED */
......@@ -26,16 +26,10 @@
#define NMPI_Pack MPI_Pack
#define NMPI_Pack_size MPI_Pack_size
#define NMPI_Unpack MPI_Unpack
#define NMPI_Wait MPI_Wait
#define NMPI_Test MPI_Test
#define NMPI_Type_get_attr MPI_Type_get_attr
#define NMPI_Type_set_attr MPI_Type_set_attr
#define NMPI_Isend MPI_Isend
#define NMPI_Irecv MPI_Irecv
#define NMPI_Recv MPI_Recv
#define NMPI_Send MPI_Send
#define NMPI_Waitall MPI_Waitall
#define NMPI_Sendrecv MPI_Sendrecv
#define NMPI_Type_lb MPI_Type_lb
#define NMPI_Iprobe MPI_Iprobe
#define NMPI_Probe MPI_Probe
......@@ -44,8 +38,6 @@
#define NMPI_Info_create MPI_Info_create
#define NMPI_Info_set MPI_Info_set
#define NMPI_Comm_call_errhandler MPI_Comm_call_errhandler
#define NMPI_Test_cancelled MPI_Test_cancelled
#define NMPI_Ibsend MPI_Ibsend
#define NMPI_Buffer_detach MPI_Buffer_detach
#define NMPI_Type_hindexed MPI_Type_hindexed
#define NMPIX_Grequest_class_create MPIX_Grequest_class_create
......@@ -55,16 +47,10 @@
#define NMPI_Pack PMPI_Pack
#define NMPI_Pack_size PMPI_Pack_size
#define NMPI_Unpack PMPI_Unpack
#define NMPI_Wait PMPI_Wait
#define NMPI_Test PMPI_Test
#define NMPI_Type_get_attr PMPI_Type_get_attr
#define NMPI_Type_set_attr PMPI_Type_set_attr
#define NMPI_Isend PMPI_Isend
#define NMPI_Irecv PMPI_Irecv
#define NMPI_Recv PMPI_Recv
#define NMPI_Send PMPI_Send
#define NMPI_Waitall PMPI_Waitall
#define NMPI_Sendrecv PMPI_Sendrecv
#define NMPI_Type_lb PMPI_Type_lb
#define NMPI_Iprobe PMPI_Iprobe
#define NMPI_Probe PMPI_Probe
......@@ -73,8 +59,6 @@
#define NMPI_Info_create PMPI_Info_create
#define NMPI_Info_set PMPI_Info_set
#define NMPI_Comm_call_errhandler PMPI_Comm_call_errhandler
#define NMPI_Test_cancelled PMPI_Test_cancelled
#define NMPI_Ibsend PMPI_Ibsend
#define NMPI_Buffer_detach PMPI_Buffer_detach
#define NMPI_Type_hindexed PMPI_Type_hindexed
#define NMPIX_Grequest_class_create PMPIX_Grequest_class_create
......
......@@ -449,7 +449,7 @@ int MPIR_Alltoall_intra(
}
/* ... then wait for them to finish: */
mpi_errno = NMPI_Waitall(2*ss,reqarray,starray);
mpi_errno = MPIR_Waitall_impl(2*ss,reqarray,starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */
......
......@@ -175,7 +175,7 @@ int MPIR_Alltoallv_intra (
}
}
mpi_errno = NMPI_Waitall(req_cnt, reqarray, starray);
mpi_errno = MPIR_Waitall_impl(req_cnt, reqarray, starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */
......
......@@ -164,7 +164,7 @@ int MPIR_Alltoallw_intra (
}
}
mpi_errno = NMPI_Waitall(outstanding_requests, reqarray, starray);
mpi_errno = MPIR_Waitall_impl(outstanding_requests, reqarray, starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */
......
......@@ -69,7 +69,6 @@ int MPIR_Gatherv (
MPI_Request *reqarray;
MPI_Status *starray;
MPIU_CHKLMEM_DECL(2);
MPIU_THREADPRIV_DECL;
comm = comm_ptr->handle;
rank = comm_ptr->rank;
......@@ -114,10 +113,7 @@ int MPIR_Gatherv (
}
}
/* ... then wait for *all* of them to finish: */
MPIU_THREADPRIV_GET;
MPIR_Nest_incr();
mpi_errno = NMPI_Waitall(reqs, reqarray, starray);
MPIR_Nest_decr();
mpi_errno = MPIR_Waitall_impl(reqs, reqarray, starray);
if (mpi_errno&& mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */
......
......@@ -15,6 +15,31 @@
sends/receives by setting the context offset to
MPID_CONTEXT_INTRA_COLL or MPID_CONTEXT_INTER_COLL. */
#undef FUNCNAME
#define FUNCNAME MPIC_Probe
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
int MPIC_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status)
{
int mpi_errno = MPI_SUCCESS;
int context_id;
MPID_Comm *comm_ptr;
MPID_Comm_get_ptr( comm, comm_ptr );
context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
mpi_errno = MPID_Probe(source, tag, comm_ptr, context_id, status);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
#undef FUNCNAME
#define FUNCNAME MPIC_Send
#undef FCNAME
......
......@@ -65,7 +65,6 @@ int MPIR_Scatterv (
int i, reqs;
MPI_Request *reqarray;
MPI_Status *starray;
MPIU_THREADPRIV_DECL;
MPIU_CHKLMEM_DECL(2);
comm = comm_ptr->handle;
......@@ -114,10 +113,7 @@ int MPIR_Scatterv (
}
}
/* ... then wait for *all* of them to finish: */
MPIU_THREADPRIV_GET;
MPIR_Nest_incr();
mpi_errno = NMPI_Waitall(reqs, reqarray, starray);
MPIR_Nest_decr();
mpi_errno = MPIR_Waitall_impl(reqs, reqarray, starray);
if (mpi_errno && mpi_errno != MPI_ERR_IN_STATUS) MPIU_ERR_POP(mpi_errno);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno == MPI_ERR_IN_STATUS) {
......
......@@ -390,8 +390,8 @@ PMPI_LOCAL int MPIR_Comm_create_inter(MPID_Comm *comm_ptr, MPID_Group *group_ptr
so that the remote process can construct the appropriate VCRT
First we exchange group sizes and context ids. Then the
ranks in the remote group, from which the remote VCRT can
be constructed. We can't use NMPI_Sendrecv since we need to
use the "collective" context in the original intercommunicator */
be constructed. We need to use the "collective" context in the
original intercommunicator */
if (comm_ptr->rank == 0) {
int info[2];
info[0] = new_context_id;
......
......@@ -239,7 +239,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
int i;
MPID_Comm *newcomm_ptr;
MPIU_CHKLMEM_DECL(4);
MPIU_THREADPRIV_DECL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_INTERCOMM_CREATE);
MPIR_ERRTEST_INITIALIZED_ORDIE();
......@@ -247,8 +246,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_INTERCOMM_CREATE);
MPIU_THREADPRIV_GET;
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
......@@ -349,7 +346,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
}
# endif /* HAVE_ERROR_CHECKING */
MPIR_Nest_incr();
/* First, exchange the group information. If we were certain
that the groups were disjoint, we could exchange possible
context ids at the same time, saving one communication.
......@@ -362,11 +358,12 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPIU_DBG_MSG_FMT(COMM,VERBOSE,
(MPIU_DBG_FDEST,"rank %d sendrecv to rank %d",
peer_comm_ptr->rank, remote_leader));
mpi_errno = NMPI_Sendrecv( &local_size, 1, MPI_INT,
mpi_errno = MPIC_Sendrecv( &local_size, 1, MPI_INT,
remote_leader, tag,
&remote_size, 1, MPI_INT,
remote_leader, tag,
peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_DBG_MSG_FMT(COMM,VERBOSE,
(MPIU_DBG_FDEST, "local size = %d, remote size = %d", local_size,
......@@ -384,30 +381,23 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
mpi_errno = MPID_GPID_GetAllInComm( comm_ptr, local_size, local_gpids,
&singlePG );
if (mpi_errno) {
MPIR_Nest_decr();
goto fn_fail;
}
/* Exchange the lpid arrays */
NMPI_Sendrecv( local_gpids, 2*local_size, MPI_INT,
remote_leader, tag,
remote_gpids, 2*remote_size, MPI_INT,
remote_leader, tag, peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* Exchange the lpid arrays */
mpi_errno = MPIC_Sendrecv( local_gpids, 2*local_size, MPI_INT,
remote_leader, tag,
remote_gpids, 2*remote_size, MPI_INT,
remote_leader, tag, peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* Convert the remote gpids to the lpids */
mpi_errno = MPID_GPID_ToLpidArray( remote_size,
remote_gpids, remote_lpids );
if (mpi_errno) {
MPIR_Nest_decr();
goto fn_fail;
}
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* Get our own lpids */
mpi_errno = MPID_LPID_GetAllInComm( comm_ptr, local_size, local_lpids );
if (mpi_errno) {
MPIR_Nest_decr();
goto fn_fail;
}
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
# ifdef HAVE_ERROR_CHECKING
{
......@@ -417,11 +407,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
check for any overlap */
mpi_errno = MPIR_CheckDisjointLpids( local_lpids, local_size,
remote_lpids, remote_size );
if (mpi_errno)
{
MPIR_Nest_decr();
goto fn_fail;
}
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
......@@ -435,7 +421,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
/* At this point, we're done with the local lpids; they'll
be freed with the other local memory on exit */
MPIR_Nest_decr();
} /* End of the first phase of the leader communication */
/*
......@@ -459,10 +444,11 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if (comm_ptr->rank == local_leader) {
MPIR_Context_id_t remote_context_id;
NMPI_Sendrecv( &recvcontext_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag,
&remote_context_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag,
peer_comm, MPI_STATUS_IGNORE );
mpi_errno = MPIC_Sendrecv( &recvcontext_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag,
&remote_context_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, tag,
peer_comm, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
final_context_id = remote_context_id;
/* Now, send all of our local processes the remote_lpids,
......@@ -524,7 +510,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if (comm_ptr->rank != local_leader) {
mpi_errno = MPID_GPID_ToLpidArray( remote_size, remote_gpids,
remote_lpids );
if (mpi_errno) { MPIR_Nest_decr(); goto fn_fail; }
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
......
......@@ -23,13 +23,27 @@
#undef MPI_Type_lb
#define MPI_Type_lb PMPI_Type_lb
#undef FUNCNAME
#define FUNCNAME MPIR_Type_lb_impl
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
void MPIR_Type_lb_impl(MPI_Datatype datatype, MPI_Aint *displacement)
{
if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN) {
*displacement = 0;
} else {
MPID_Datatype *datatype_ptr = NULL;
MPID_Datatype_get_ptr(datatype, datatype_ptr);
*displacement = datatype_ptr->lb;
}
}
#endif
#undef FUNCNAME
#define FUNCNAME MPI_Type_lb
#undef FCNAME
#define FCNAME "MPI_Type_lb"
/*@
MPI_Type_lb - Returns the lower-bound of a datatype
......@@ -92,15 +106,8 @@ int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint *displacement)
/* ... body of routine ... */
if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN)
{
*displacement = 0;
}
else
{
*displacement = datatype_ptr->lb;
}
MPIR_Type_lb_impl(datatype, displacement);
/* ... end of body of routine ... */
#ifdef HAVE_ERROR_CHECKING
......
......@@ -10,12 +10,14 @@
#ifndef MPICH_MPI_FROM_PMPI
static MPI_Comm progress_comm;
static MPID_Comm *progress_comm_ptr;
static MPIU_Thread_id_t progress_thread_id;
static MPIU_Thread_mutex_t progress_mutex;
static MPIU_Thread_cond_t progress_cond;
static volatile int progress_thread_done = 0;
#define WAKE_TAG 100
#undef FUNCNAME
#define FUNCNAME progress_fn
#undef FCNAME
......@@ -24,7 +26,9 @@ static void progress_fn(void * data)
{
#if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED
int mpi_errno = MPI_SUCCESS;
MPIU_THREADPRIV_DECL;
MPID_Request *request_ptr = NULL;
MPI_Request request;
MPI_Status status;
/* Explicitly add CS_ENTER/EXIT since this thread is created from
* within an internal function and will call NMPI functions
......@@ -40,12 +44,12 @@ static void progress_fn(void * data)
* appropriate, either change what we do in this thread, or delete
* this comment. */
MPIU_THREADPRIV_GET;
MPIR_Nest_incr();
mpi_errno = NMPI_Recv(NULL, 0, MPI_CHAR, 0, 0, progress_comm, MPI_STATUS_IGNORE);
mpi_errno = MPID_Irecv(NULL, 0, MPI_CHAR, 0, WAKE_TAG, progress_comm_ptr,
MPID_CONTEXT_INTRA_PT2PT, &request_ptr);
MPIU_Assert(!mpi_errno);
request = request_ptr->handle;
mpi_errno = MPIR_Wait_impl(&request, &status);
MPIU_Assert(!mpi_errno);
MPIR_Nest_decr();
/* Send a signal to the main thread saying we are done */
MPIU_Thread_mutex_lock(&progress_mutex, &mpi_errno);
......@@ -73,7 +77,7 @@ int MPIR_Init_async_thread(void)
{
#if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_self_ptr, *progress_comm_ptr;
MPID_Comm *comm_self_ptr;
int err = 0;
MPID_MPI_STATE_DECL(MPID_STATE_MPIR_INIT_ASYNC_THREAD);
......@@ -84,7 +88,6 @@ int MPIR_Init_async_thread(void)
MPID_Comm_get_ptr(MPI_COMM_SELF, comm_self_ptr);
mpi_errno = MPIR_Comm_dup_impl(comm_self_ptr, &progress_comm_ptr);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
progress_comm = progress_comm_ptr->handle;
MPIU_Thread_cond_create(&progress_cond, &err);
MPIU_ERR_CHKANDJUMP1(err, mpi_errno, MPI_ERR_OTHER, "**cond_create", "**cond_create %s", strerror(err));
......@@ -114,17 +117,19 @@ int MPIR_Finalize_async_thread(void)
{
int mpi_errno = MPI_SUCCESS;
#if MPICH_THREAD_LEVEL >= MPI_THREAD_SERIALIZED
MPIU_THREADPRIV_DECL;
MPID_Request *request_ptr = NULL;
MPI_Request request;
MPI_Status status;
MPID_MPI_STATE_DECL(MPID_STATE_MPIR_FINALIZE_ASYNC_THREAD);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_FINALIZE_ASYNC_THREAD);
MPIU_THREADPRIV_GET;
MPIR_Nest_incr();
mpi_errno = NMPI_Send(NULL, 0, MPI_CHAR, 0, 0, progress_comm);
mpi_errno = MPID_Isend(NULL, 0, MPI_CHAR, 0, WAKE_TAG, progress_comm_ptr,
MPID_CONTEXT_INTRA_PT2PT, &request_ptr);
MPIU_Assert(!mpi_errno);
request = request_ptr->handle;
mpi_errno = MPIR_Wait_impl(&request, &status);
MPIU_Assert(!mpi_errno);
MPIR_Nest_decr();
/* XXX DJG why is this unlock/lock necessary? Should we just YIELD here or later? */
MPIU_THREAD_CS_EXIT(ALLFUNC,);
......
......@@ -77,7 +77,7 @@ static int initialized = 0; /* keep track of the first call to any
/* Forward references */
static void MPIR_Bsend_retry_pending( void );
static void MPIR_Bsend_check_active ( void );
static int MPIR_Bsend_check_active ( void );
static MPIR_Bsend_data_t *MPIR_Bsend_find_buffer( int );
static void MPIR_Bsend_take_buffer( MPIR_Bsend_data_t *, int );
static int MPIR_Bsend_finalize( void * );
......@@ -169,17 +169,12 @@ int MPIR_Bsend_detach( void *bufferp, int *size )
if (BsendBuffer.active) {
/* Loop through each active element and wait on it */
MPIR_Bsend_data_t *p = BsendBuffer.active;
MPIU_THREADPRIV_DECL;
MPIU_THREADPRIV_GET;
MPIR_Nest_incr();
while (p) {
MPI_Request r = p->request->handle;
NMPI_Wait( &r, MPI_STATUS_IGNORE );
MPIR_Wait_impl( &r, MPI_STATUS_IGNORE );
p = p->next;
}
MPIR_Nest_decr();
}
/* Note that this works even when the buffer does not exist */
......@@ -207,9 +202,10 @@ int MPIR_Bsend_isend( void *buf, int count, MPI_Datatype dtype,
int dest, int tag, MPID_Comm *comm_ptr,
MPIR_Bsend_kind_t kind, MPID_Request **request )
{
int mpi_errno = MPI_SUCCESS;
MPIR_Bsend_data_t *p;
MPIR_Bsend_msg_t *msg;
int packsize, mpi_errno, pass;
int packsize, pass;
MPIU_THREADPRIV_DECL;
/* Find a free segment and copy the data into it. If we could
......@@ -225,7 +221,8 @@ int MPIR_Bsend_isend( void *buf, int count, MPI_Datatype dtype,
/* We check the active buffer first. This helps avoid storage
fragmentation */
MPIR_Bsend_check_active();
mpi_errno = MPIR_Bsend_check_active();
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
if (dtype != MPI_PACKED)
{
......@@ -273,6 +270,7 @@ int MPIR_Bsend_isend( void *buf, int count, MPI_Datatype dtype,
mpi_errno = MPID_Isend(msg->msgbuf, msg->count, MPI_PACKED,
dest, tag, comm_ptr,
MPID_CONTEXT_INTRA_PT2PT, &p->request );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
if (p->request) {
MPIU_DBG_MSG_FMT(BSEND,TYPICAL,
(MPIU_DBG_FDEST,"saving request %p in %p",p->request,p));
......@@ -284,16 +282,6 @@ int MPIR_Bsend_isend( void *buf, int count, MPI_Datatype dtype,
p->kind = kind;
*request = p->request;
}
else {
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno) {
MPIU_Internal_error_printf ("Bsend internal error: isend returned err = %d", mpi_errno );
}
/* --END ERROR HANDLING-- */
/* If the error is "request not available", we should
put this on the pending list. This will depend on
how we signal failure to send. */
}
break;
}
/* If we found a buffer or we're in the seccond pass, then break.
......@@ -315,13 +303,16 @@ int MPIR_Bsend_isend( void *buf, int count, MPI_Datatype dtype,
MPIU_DBG_MSG(BSEND,TYPICAL,"Could not find space; dumping arena" );
MPIU_DBG_STMT(BSEND,TYPICAL,MPIR_Bsend_dump());
return MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, "MPIR_Bsend_isend", __LINE__, MPI_ERR_BUFFER, "**bufbsend",
"**bufbsend %d %d", packsize,
BsendBuffer.buffer_size );
}
else {
return MPI_SUCCESS;
mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, "MPIR_Bsend_isend", __LINE__, MPI_ERR_BUFFER, "**bufbsend",
"**bufbsend %d %d", packsize,
BsendBuffer.buffer_size );
MPIU_ERR_POP(mpi_errno);
}
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
/*
......@@ -431,8 +422,9 @@ static void MPIR_Bsend_free_segment( MPIR_Bsend_data_t *p )
#define FUNCNAME MPIR_Bsend_check_active
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
static void MPIR_Bsend_check_active( void )
static int MPIR_Bsend_check_active( void )
{
int mpi_errno = MPI_SUCCESS;
MPIR_Bsend_data_t *active = BsendBuffer.active, *next_active;
MPIU_DBG_MSG_P(BSEND,TYPICAL,"Checking active starting at %p", active);
......@@ -452,19 +444,20 @@ static void MPIR_Bsend_check_active( void )
flag = 0;
/* XXX DJG FIXME-MT should we be checking this? */
if (MPIU_Object_get_ref(active->request) == 1) {
NMPI_Test(&r, &flag, MPI_STATUS_IGNORE );
}
else {
mpi_errno = MPIR_Test_impl(&r, &flag, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
} else {
/* We need to invoke the progress engine in case we
need to advance other, incomplete communication. */
MPID_Progress_state progress_state;
MPID_Progress_start(&progress_state);
MPID_Progress_test( );
mpi_errno = MPID_Progress_test( );
MPID_Progress_end(&progress_state);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
}
else {
NMPI_Test( &r, &flag, MPI_STATUS_IGNORE );
} else {
mpi_errno = MPIR_Test_impl( &r, &flag, MPI_STATUS_IGNORE );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
if (flag) {
/* We're done. Remove this segment */
......@@ -474,6 +467,11 @@ static void MPIR_Bsend_check_active( void )
active = next_active;
MPIU_DBG_MSG_P(BSEND,TYPICAL,"Next active is %p",active);
}
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
/*
......
......@@ -37,7 +37,7 @@ PMPI_LOCAL int MPIR_Ibsend_query( void *extra, MPI_Status *status )
{
ibsend_req_info *ibsend_info = (ibsend_req_info *)extra;
status->cancelled =