Commit 3be7e0fc authored by Pavan Balaji's avatar Pavan Balaji
Browse files

[svn-r9678] MPIU_CALL is no longer required since we don't provide dllchan.

No reviewer.
parent b3ca2868
......@@ -3520,31 +3520,6 @@ int MPID_VCR_Get_lpid(MPID_VCR vcr, int * lpid_ptr);
* collectives */
#include "mpir_nbc.h"
/* ------------------------------------------------------------------------- */
/* Define a macro to allow us to select between statically selected functions
* and dynamically loaded ones. If USE_DYNAMIC_LIBRARIES is defined,
* the macro MPIU_CALL(context,funccall) expands into
* MPIU_CALL##context.funccall.
* For example,
* err = MPIU_CALL(MPIDI_CH3,iSend(...))
* will expand into
* err = MPIU_CALL_MPIDI_CH3.iSend(...)
* If USE_DYNAMIC_LIBS is not selected, then it expands into
* err = MPIDI_CH3_iSend(...)
*
* In the case where dynamic libraries are used, a variable named
* MPIU_CALL_context must be defined that contains the function pointers;
* initializing the function pointers must be done before the first use.
* Typically, this variable will be the single instance of a structure that
* contains the function pointers.
*/
/* ------------------------------------------------------------------------- */
#ifdef USE_DYNAMIC_LIBRARIES
#define MPIU_CALL(context,funccall) MPIU_CALL_##context.funccall
#else
#define MPIU_CALL(context,funccall) context##_##funccall
#endif
#include "mpiimplthreadpost.h"
/* Include definitions from the device which require items defined by this
......
......@@ -10,12 +10,6 @@
#include "mpidimpl.h"
#include "ch3usock.h"
/* Redefine MPIU_CALL since the sock channel should be self-contained.
This only affects the building of a dynamically loadable library for
the sock channel, and then only when debugging is enabled */
#undef MPIU_CALL
#define MPIU_CALL(context,funccall) context##_##funccall
/* Define the channel-private data structures; these are overlaid on the
channel_private scratchpads */
typedef struct MPIDI_CH3I_VC
......
......@@ -972,13 +972,13 @@ int MPIDI_PrintConnStrToFile( FILE *fd, const char *file, int line,
MPIU_DBG_MSG_FMT(CH3_CONNECT,TYPICAL,(MPIU_DBG_FDEST, \
"vc=%p: Setting state (vc) from %s to %s, vcchstate is %s", \
_vc, MPIDI_VC_GetStateString((_vc)->state), \
#_newstate, MPIU_CALL(MPIDI_CH3,VC_GetStateString( (_vc) ))) );\
#_newstate, MPIDI_CH3_VC_GetStateString( (_vc) ))); \
} while (0)
#define MPIU_DBG_VCCHSTATECHANGE(_vc,_newstate) \
MPIU_DBG_MSG_FMT(CH3_CONNECT,TYPICAL,(MPIU_DBG_FDEST, \
"vc=%p: Setting state (ch) from %s to %s, vc state is %s", \
_vc, MPIU_CALL(MPIDI_CH3,VC_GetStateString((_vc))), \
_vc, MPIDI_CH3_VC_GetStateString((_vc)), \
#_newstate, MPIDI_VC_GetStateString( (_vc)->state )) )
#define MPIU_DBG_CONNSTATECHANGE(_vc,_conn,_newstate) \
......
......@@ -41,7 +41,7 @@ int MPIDI_CH3_SendNoncontig_iov( MPIDI_VC_t *vc, MPID_Request *sreq,
/* Note this routine is invoked withing a CH3 critical section */
/* MPIU_THREAD_CS_ENTER(CH3COMM,vc); */
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, sreq, iov, iov_n));
mpi_errno = MPIDI_CH3_iSendv(vc, sreq, iov, iov_n);
/* MPIU_THREAD_CS_EXIT(CH3COMM,vc); */
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......@@ -180,7 +180,7 @@ int MPIDI_CH3_EagerContigSend( MPID_Request **sreq_p,
MPIU_DBG_MSGPKT(vc,tag,eager_pkt->match.parts.context_id,rank,data_sz,"EagerContig");
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsgv(vc, iov, 2, sreq_p));
mpi_errno = MPIDI_CH3_iStartMsgv(vc, iov, 2, sreq_p);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|eagermsg");
......@@ -255,8 +255,7 @@ int MPIDI_CH3_EagerContigShortSend( MPID_Request **sreq_p,
MPIU_DBG_MSGPKT(vc,tag,eagershort_pkt->match.parts.context_id,rank,data_sz,
"EagerShort");
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, eagershort_pkt,
sizeof(*eagershort_pkt), sreq_p ));
mpi_errno = MPIDI_CH3_iStartMsg(vc, eagershort_pkt, sizeof(*eagershort_pkt), sreq_p);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|eagermsg");
......@@ -546,7 +545,7 @@ int MPIDI_CH3_EagerContigIsend( MPID_Request **sreq_p,
MPIU_DBG_MSGPKT(vc,tag,eager_pkt->match.parts.context_id,rank,data_sz,"EagerIsend");
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, sreq, iov, 2 ));
mpi_errno = MPIDI_CH3_iSendv(vc, sreq, iov, 2);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......
......@@ -74,7 +74,7 @@ int MPIDI_CH3_EagerSyncNoncontigSend( MPID_Request **sreq_p,
iov[1].MPID_IOV_LEN = data_sz;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, sreq, iov, 2));
mpi_errno = MPIDI_CH3_iSendv(vc, sreq, iov, 2);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......@@ -146,7 +146,7 @@ int MPIDI_CH3_EagerSyncZero(MPID_Request **sreq_p, int rank, int tag,
MPIU_DBG_MSGPKT(vc,tag,es_pkt->match.parts.context_id,rank,(MPIDI_msg_sz_t)0,"EagerSync0");
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSend(vc, sreq, es_pkt, sizeof(*es_pkt)));
mpi_errno = MPIDI_CH3_iSend(vc, sreq, es_pkt, sizeof(*es_pkt));
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......@@ -178,8 +178,7 @@ int MPIDI_CH3_EagerSyncAck( MPIDI_VC_t *vc, MPID_Request *rreq )
MPIDI_Pkt_init(esa_pkt, MPIDI_CH3_PKT_EAGER_SYNC_ACK);
esa_pkt->sender_req_id = rreq->dev.sender_req_id;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, esa_pkt, sizeof(*esa_pkt),
&esa_req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, esa_pkt, sizeof(*esa_pkt), &esa_req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_POP(mpi_errno);
......@@ -279,8 +278,7 @@ int MPIDI_CH3_PktHandler_EagerSyncSend( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
esa_pkt->sender_req_id = rreq->dev.sender_req_id;
/* Because this is a packet handler, it is already within a CH3 CS */
/* MPIU_THREAD_CS_ENTER(CH3COMM,vc); */
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, esa_pkt,
sizeof(*esa_pkt), &esa_req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, esa_pkt, sizeof(*esa_pkt), &esa_req);
/* MPIU_THREAD_CS_EXIT(CH3COMM,vc); */
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,
......
......@@ -249,8 +249,7 @@ int MPIDI_CH3U_VC_SendClose( MPIDI_VC_t *vc, int rank )
MPIDI_CHANGE_VC_STATE(vc, CLOSE_ACKED);
}
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, close_pkt,
sizeof(*close_pkt), &sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, close_pkt, sizeof(*close_pkt), &sreq);
MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|send_close_ack");
if (sreq != NULL) {
......@@ -291,8 +290,7 @@ int MPIDI_CH3_PktHandler_Close( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
MPIU_DBG_MSG_D(CH3_DISCONNECT,TYPICAL,"sending close(TRUE) to %d",
vc->pg_rank);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, resp_pkt,
sizeof(*resp_pkt), &resp_sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, resp_pkt, sizeof(*resp_pkt), &resp_sreq);
MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|send_close_ack");
if (resp_sreq != NULL)
......@@ -337,7 +335,7 @@ int MPIDI_CH3_PktHandler_Close( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
MPIDI_CHANGE_VC_STATE(vc, CLOSED);
/* For example, with sockets, Connection_terminate will close
the socket */
mpi_errno = MPIU_CALL(MPIDI_CH3,Connection_terminate(vc));
mpi_errno = MPIDI_CH3_Connection_terminate(vc);
}
*buflen = sizeof(MPIDI_CH3_Pkt_t);
......@@ -412,7 +410,7 @@ static int terminate_failed_VCs(MPID_Group *new_failed_group)
/* terminate the VC */
/* FIXME: This won't work for dynamic procs */
MPIDI_PG_Get_vc(MPIDI_Process.my_pg, new_failed_group->lrank_to_lpid[i].lpid, &vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,Connection_terminate(vc));
mpi_errno = MPIDI_CH3_Connection_terminate(vc);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
......
......@@ -908,8 +908,7 @@ int MPIDI_CH3I_Send_pt_rma_done_pkt(MPIDI_VC_t *vc, MPI_Win source_win_handle)
/* Because this is in a packet handler, it is already within a critical section */
/* MPIU_THREAD_CS_ENTER(CH3COMM,vc); */
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, pt_rma_done_pkt,
sizeof(*pt_rma_done_pkt), &req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, pt_rma_done_pkt, sizeof(*pt_rma_done_pkt), &req);
/* MPIU_THREAD_CS_EXIT(CH3COMM,vc); */
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");
......@@ -1015,7 +1014,7 @@ static int do_simple_get(MPID_Win *win_ptr, MPIDI_Win_lock_queue *lock_queue)
/* Because this is in a packet handler, it is already within a critical section */
/* MPIU_THREAD_CS_ENTER(CH3COMM,vc); */
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(lock_queue->vc, req, iov, 2));
mpi_errno = MPIDI_CH3_iSendv(lock_queue->vc, req, iov, 2);
/* MPIU_THREAD_CS_EXIT(CH3COMM,vc); */
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......
......@@ -129,7 +129,7 @@ static int MPIDI_Create_inter_root_communicator_connect(const char *port_name,
temporary intercommunicator between the two roots so that
we can use MPI functions to communicate data between them. */
mpi_errno = MPIU_CALL(MPIDI_CH3,Connect_to_root(port_name, &connect_vc));
mpi_errno = MPIDI_CH3_Connect_to_root(port_name, &connect_vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_POP(mpi_errno);
}
......@@ -529,7 +529,7 @@ int MPIDI_Comm_connect(const char *port_name, MPID_Info *info, int root,
{
int mpi_errno2 = MPI_SUCCESS;
if (new_vc) {
mpi_errno2 = MPIU_CALL(MPIDI_CH3,VC_Destroy(new_vc));
mpi_errno2 = MPIDI_CH3_VC_Destroy(new_vc);
if (mpi_errno2) MPIU_ERR_SET(mpi_errno2, MPI_ERR_OTHER, "**fail");
}
......@@ -1231,7 +1231,7 @@ static int FreeNewVC( MPIDI_VC_t *new_vc )
MPID_Progress_end(&progress_state);
}
MPIU_CALL(MPIDI_CH3,VC_Destroy(new_vc));
MPIDI_CH3_VC_Destroy(new_vc);
MPIU_Free(new_vc);
fn_fail:
......
......@@ -644,7 +644,7 @@ static int MPIDI_CH3I_Send_rma_msg(MPIDI_RMA_ops *rma_op, MPID_Win *win_ptr,
iov[1].MPID_IOV_LEN = rma_op->origin_count * origin_type_size;
iovcnt = 2;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsgv(vc, iov, iovcnt, request));
mpi_errno = MPIDI_CH3_iStartMsgv(vc, iov, iovcnt, request);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|rmamsg");
}
......@@ -795,7 +795,7 @@ static int MPIDI_CH3I_Send_contig_acc_msg(MPIDI_RMA_ops *rma_op,
comm_ptr = win_ptr->comm_ptr;
MPIDI_Comm_get_vc_set_active(comm_ptr, rma_op->target_rank, &vc);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, accumi_pkt, sizeof(*accumi_pkt), request));
mpi_errno = MPIDI_CH3_iStartMsg(vc, accumi_pkt, sizeof(*accumi_pkt), request);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|rmamsg");
goto fn_exit;
......@@ -833,7 +833,7 @@ static int MPIDI_CH3I_Send_contig_acc_msg(MPIDI_RMA_ops *rma_op,
iov[1].MPID_IOV_LEN = rma_op->origin_count * origin_type_size;
iovcnt = 2;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsgv(vc, iov, iovcnt, request));
mpi_errno = MPIDI_CH3_iStartMsgv(vc, iov, iovcnt, request);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|rmamsg");
......@@ -927,7 +927,7 @@ static int MPIDI_CH3I_Recv_rma_msg(MPIDI_RMA_ops *rma_op, MPID_Win *win_ptr,
{
/* basic datatype on target. simply send the get_pkt. */
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, get_pkt, sizeof(*get_pkt), &req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, get_pkt, sizeof(*get_pkt), &req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
}
else
......@@ -972,7 +972,7 @@ static int MPIDI_CH3I_Recv_rma_msg(MPIDI_RMA_ops *rma_op, MPID_Win *win_ptr,
iov[2].MPID_IOV_LEN = dtp->dataloop_size;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsgv(vc, iov, 3, &req));
mpi_errno = MPIDI_CH3_iStartMsgv(vc, iov, 3, &req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* release the target datatype */
......@@ -1424,9 +1424,7 @@ int MPIDI_Win_complete(MPID_Win *win_ptr)
MPIDI_Comm_get_vc_set_active(comm_ptr, dst, &vc);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, put_pkt,
sizeof(*put_pkt),
&request));
mpi_errno = MPIDI_CH3_iStartMsg(vc, put_pkt, sizeof(*put_pkt), &request);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg" );
......@@ -1798,7 +1796,7 @@ int MPIDI_Win_unlock(int dest, MPID_Win *win_ptr)
win_ptr->lock_granted = 0;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, lock_pkt, sizeof(*lock_pkt), &req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, lock_pkt, sizeof(*lock_pkt), &req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**winRMAmessage");
......@@ -2223,7 +2221,7 @@ static int MPIDI_CH3I_Send_lock_put_or_acc(MPID_Win *win_ptr)
iovcnt = 2;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsgv(vc, iov, iovcnt, &request));
mpi_errno = MPIDI_CH3_iStartMsgv(vc, iov, iovcnt, &request);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");
......@@ -2379,8 +2377,7 @@ static int MPIDI_CH3I_Send_lock_get(MPID_Win *win_ptr)
MPIDI_Comm_get_vc_set_active(comm_ptr, rma_op->target_rank, &vc);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, lock_get_unlock_pkt,
sizeof(*lock_get_unlock_pkt), &sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, lock_get_unlock_pkt, sizeof(*lock_get_unlock_pkt), &sreq);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");
......@@ -2457,8 +2454,7 @@ int MPIDI_CH3I_Send_lock_granted_pkt(MPIDI_VC_t *vc, MPI_Win source_win_handle)
lock_granted_pkt->source_win_handle = source_win_handle;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, lock_granted_pkt,
sizeof(*lock_granted_pkt), &req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, lock_granted_pkt, sizeof(*lock_granted_pkt), &req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|rmamsg");
......@@ -2697,7 +2693,7 @@ int MPIDI_CH3_PktHandler_Get( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
iov[1].MPID_IOV_LEN = get_pkt->count * type_size;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, req, iov, 2));
mpi_errno = MPIDI_CH3_iSendv(vc, req, iov, 2);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......@@ -3305,7 +3301,7 @@ int MPIDI_CH3_PktHandler_LockGetUnlock( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
MPID_Datatype_get_size_macro(lock_get_unlock_pkt->datatype, type_size);
iov[1].MPID_IOV_LEN = lock_get_unlock_pkt->count * type_size;
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, req, iov, 2));
mpi_errno = MPIDI_CH3_iSendv(vc, req, iov, 2);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
{
......
......@@ -51,8 +51,7 @@ int MPIDI_CH3_RndvSend( MPID_Request **sreq_p, const void * buf, int count,
MPIU_DBG_MSGPKT(vc,tag,rts_pkt->match.parts.context_id,rank,data_sz,"Rndv");
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, rts_pkt, sizeof(*rts_pkt),
&rts_sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, rts_pkt, sizeof(*rts_pkt), &rts_sreq);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......@@ -153,8 +152,7 @@ int MPIDI_CH3_PktHandler_RndvReqToSend( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
cts_pkt->sender_req_id = rts_pkt->sender_req_id;
cts_pkt->receiver_req_id = rreq->handle;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, cts_pkt,
sizeof(*cts_pkt), &cts_req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, cts_pkt, sizeof(*cts_pkt), &cts_req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,
......@@ -248,7 +246,7 @@ int MPIDI_CH3_PktHandler_RndvClrToSend( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
iov[1].MPID_IOV_LEN = data_sz;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, sreq, iov, 2));
mpi_errno = MPIDI_CH3_iSendv(vc, sreq, iov, 2);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|senddata");
}
......@@ -349,8 +347,7 @@ int MPIDI_CH3_RecvRndv( MPIDI_VC_t * vc, MPID_Request *rreq )
cts_pkt->sender_req_id = rreq->dev.sender_req_id;
cts_pkt->receiver_req_id = rreq->handle;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, cts_pkt,
sizeof(*cts_pkt), &cts_req));
mpi_errno = MPIDI_CH3_iStartMsg(vc, cts_pkt, sizeof(*cts_pkt), &cts_req);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**ch3|ctspkt");
......
......@@ -183,8 +183,7 @@ int MPID_Cancel_send(MPID_Request * sreq)
csr_pkt->sender_req_id = sreq->handle;
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, csr_pkt,
sizeof(*csr_pkt), &csr_sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, csr_pkt, sizeof(*csr_pkt), &csr_sreq);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**ch3|cancelreq");
......@@ -255,8 +254,7 @@ int MPIDI_CH3_PktHandler_CancelSendReq( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
resp_pkt->ack = ack;
/* FIXME: This is called within the packet handler */
/* MPIU_THREAD_CS_ENTER(CH3COMM,vc); */
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, resp_pkt,
sizeof(*resp_pkt), &resp_sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, resp_pkt, sizeof(*resp_pkt), &resp_sreq);
/* MPIU_THREAD_CS_EXIT(CH3COMM,vc); */
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,
......
......@@ -186,7 +186,7 @@ int MPID_Init(int *argc, char ***argv, int requested, int *provided,
* the basic information about the job has been extracted from PMI (e.g.,
* the size and rank of this process, and the process group id)
*/
mpi_errno = MPIU_CALL(MPIDI_CH3,Init(has_parent, pg, pg_rank));
mpi_errno = MPIDI_CH3_Init(has_parent, pg, pg_rank);
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**ch3|ch3_init");
}
......@@ -352,7 +352,7 @@ int MPID_Init(int *argc, char ***argv, int requested, int *provided,
int MPID_InitCompleted( void )
{
int mpi_errno;
mpi_errno = MPIU_CALL(MPIDI_CH3,InitCompleted());
mpi_errno = MPIDI_CH3_InitCompleted();
return mpi_errno;
}
......
......@@ -85,7 +85,7 @@ int MPID_Irsend(const void * buf, int count, MPI_Datatype datatype, int rank, in
MPIDI_Request_set_seqnum(sreq, seqnum);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSend(vc, sreq, ready_pkt, sizeof(*ready_pkt)));
mpi_errno = MPIDI_CH3_iSend(vc, sreq, ready_pkt, sizeof(*ready_pkt));
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......
......@@ -103,8 +103,7 @@ int MPID_Isend(const void * buf, int count, MPI_Datatype datatype, int rank,
MPIDI_Request_set_seqnum(sreq, seqnum);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iSend(vc, sreq, eager_pkt,
sizeof(*eager_pkt)));
mpi_errno = MPIDI_CH3_iSend(vc, sreq, eager_pkt, sizeof(*eager_pkt));
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......
......@@ -51,7 +51,7 @@ int MPID_Open_port(MPID_Info *info_ptr, char *port_name)
/* Check to see if we need to setup channel-specific functions
for handling the port operations */
if (setupPortFunctions) {
MPIU_CALL(MPIDI_CH3,PortFnsInit( &portFns ));
MPIDI_CH3_PortFnsInit( &portFns );
setupPortFunctions = 0;
}
......@@ -102,7 +102,7 @@ int MPID_Close_port(const char *port_name)
/* Check to see if we need to setup channel-specific functions
for handling the port operations */
if (setupPortFunctions) {
MPIU_CALL(MPIDI_CH3,PortFnsInit( &portFns ));
MPIDI_CH3_PortFnsInit( &portFns );
setupPortFunctions = 0;
}
......@@ -139,7 +139,7 @@ int MPID_Comm_accept(char * port_name, MPID_Info * info, int root,
/* Check to see if we need to setup channel-specific functions
for handling the port operations */
if (setupPortFunctions) {
MPIU_CALL(MPIDI_CH3,PortFnsInit( &portFns ));
MPIDI_CH3_PortFnsInit( &portFns );
setupPortFunctions = 0;
}
......@@ -177,7 +177,7 @@ int MPID_Comm_connect(const char * port_name, MPID_Info * info, int root,
/* Check to see if we need to setup channel-specific functions
for handling the port operations */
if (setupPortFunctions) {
MPIU_CALL(MPIDI_CH3,PortFnsInit( &portFns ));
MPIDI_CH3_PortFnsInit( &portFns );
setupPortFunctions = 0;
}
......@@ -309,7 +309,7 @@ static int MPIDI_Open_port(MPID_Info *info_ptr, char *port_name)
connections between processes that are started separately (e.g.,
may not use shared memory). We may need a channel-specific
function to create an exportable connection string. */
mpi_errno = MPIU_CALL(MPIDI_CH3,Get_business_card(myRank, port_name, len));
mpi_errno = MPIDI_CH3_Get_business_card(myRank, port_name, len);
MPIU_DBG_MSG_FMT(CH3, VERBOSE, (MPIU_DBG_FDEST, "port_name = %s", port_name));
fn_exit:
......
......@@ -81,8 +81,7 @@ int MPID_Rsend(const void * buf, int count, MPI_Datatype datatype, int rank, int
MPIDI_Pkt_set_seqnum(ready_pkt, seqnum);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, ready_pkt,
sizeof(*ready_pkt), &sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, ready_pkt, sizeof(*ready_pkt), &sreq);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......
......@@ -94,8 +94,7 @@ int MPID_Send(const void * buf, int count, MPI_Datatype datatype, int rank,
MPIDI_Pkt_set_seqnum(eager_pkt, seqnum);
MPIU_THREAD_CS_ENTER(CH3COMM,vc);
mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, eager_pkt,
sizeof(*eager_pkt), &sreq));
mpi_errno = MPIDI_CH3_iStartMsg(vc, eager_pkt, sizeof(*eager_pkt), &sreq);
MPIU_THREAD_CS_EXIT(CH3COMM,vc);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
......
......@@ -213,7 +213,7 @@ int MPID_VCRT_Release(MPID_VCRT vcrt, int isDisconnect )
vc, i, MPIDI_VC_GetStateString(vc->state)));
}
/* NOTE: we used to * MPIU_CALL(MPIDI_CH3,VC_Destroy(&(pg->vct[i])))
/* NOTE: we used to * MPIDI_CH3_VC_Destroy(&(pg->vct[i])))
here but that is incorrect. According to the standard, it's
entirely possible (likely even) that this VC might still be
connected. VCs are now destroyed when the PG that "owns"
......@@ -723,7 +723,7 @@ int MPIDI_VC_Init( MPIDI_VC_t *vc, MPIDI_PG_t *pg, int rank )
MPIU_Assert(err == 0);
}
#endif /* MPIU_THREAD_GRANULARITY */
MPIU_CALL(MPIDI_CH3,VC_Init( vc ));
MPIDI_CH3_VC_Init(vc);
MPIDI_DBG_PrintVCState(vc);
return MPI_SUCCESS;
......
......@@ -218,7 +218,7 @@ int MPIDI_PG_Create(int vct_sz, void * pg_id, MPIDI_PG_t ** pg_ptr)
/* We may first need to initialize the channel before calling the channel
VC init functions. This routine may be a no-op; look in the
ch3_init.c file in each channel */
MPIU_CALL(MPIDI_CH3,PG_Init( pg ));
MPIDI_CH3_PG_Init(pg);
/* These are now done in MPIDI_VC_Init */
#if 0
......@@ -317,7 +317,7 @@ int MPIDI_PG_Destroy(MPIDI_PG_t * pg)
use. Alternately, if the PG is able to recreate a VC,
and can thus free unused (or idle) VCs, it should be allowed
to do so. [wdg 2008-08-31] */
mpi_errno = MPIU_CALL(MPIDI_CH3,VC_Destroy(&(pg->vct[i])));
mpi_errno = MPIDI_CH3_VC_Destroy(&(pg->vct[i]));
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
}
......@@ -331,7 +331,7 @@ int MPIDI_PG_Destroy(MPIDI_PG_t * pg)
MPIU_Free(pg->connData);
}
}
mpi_errno = MPIU_CALL(MPIDI_CH3,PG_Destroy(pg));
mpi_errno = MPIDI_CH3_PG_Destroy(pg);
MPIU_Free(pg);
goto fn_exit;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment