Commit ef22b67d authored by Charles J Archer's avatar Charles J Archer
Browse files

Include uppercase SFI to OFI in netmod rename

parent a4b5016f
......@@ -4,7 +4,7 @@
## (C) 2011 by Argonne National Laboratory.
## See COPYRIGHT in top-level directory.
##
if BUILD_NEMESIS_NETMOD_SFI
if BUILD_NEMESIS_NETMOD_OFI
mpi_core_sources += \
src/mpid/ch3/channels/nemesis/netmod/ofi/ofi_init.c \
......
**ofi_avmap:SFI get address vector map failed
**ofi_avmap %s %d %s %s:SFI address vector map failed (%s:%d:%s:%s)
**ofi_tsend:SFI tagged sendto failed
**ofi_tsend %s %d %s %s:SFI tagged sendto failed (%s:%d:%s:%s)
**ofi_trecv:SFI tagged recvfrom failed
**ofi_trecv %s %d %s %s:SFI tagged recvfrom failed (%s:%d:%s:%s)
**ofi_getinfo:SFI getinfo() failed
**ofi_getinfo %s %d %s %s:SFI getinfo() failed (%s:%d:%s:%s)
**ofi_openep:SFI endpoint open failed
**ofi_openep %s %d %s %s:SFI endpoint open failed (%s:%d:%s:%s)
**ofi_openfabric:SFI fabric open failure
**ofi_openfabric %s %d %s %s:SFI fabric open failed (%s:%d:%s:%s)
**ofi_opendomain:SFI domain open failure
**ofi_opendomain %s %d %s %s:SFI domain open failed (%s:%d:%s:%s)
**ofi_opencq:SFI event queue create failure
**ofi_opencq %s %d %s %s:SFI event queue create failed (%s:%d:%s:%s)
**ofi_avopen:SFI address vector open failed
**ofi_avopen %s %d %s %s:SFI address vector open failed (%s:%d:%s:%s)
**ofi_bind:SFI resource bind failure
**ofi_bind %s %d %s %s:SFI resource bind failed (%s:%d:%s:%s)
**ofi_ep_enable:SFI endpoint enable failed
**ofi_ep_enable %s %d %s %s:SFI endpoint enable failed (%s:%d:%s:%s)
**ofi_getname:SFI get endpoint name failed
**ofi_getname %s %d %s %s:SFI get endpoint name failed (%s:%d:%s:%s)
**ofi_avclose:SFI av close failed
**ofi_avclose %s %d %s %s:SFI av close failed (%s:%d:%s:%s)
**ofi_epclose:SFI endpoint close failed
**ofi_epclose %s %d %s %s:SFI endpoint close failed (%s:%d:%s:%s)
**ofi_cqclose:SFI cq close failed
**ofi_cqclose %s %d %s %s:SFI cq close failed (%s:%d:%s:%s)
**ofi_mrclose:SFI mr close failed
**ofi_mrclose %s %d %s %s:SFI mr close failed (%s:%d:%s:%s)
**ofi_fabricclose:SFI fabric close failed
**ofi_fabricclose %s %d %s %s:SFI fabric close failed (%s:%d:%s:%s)
**ofi_domainclose:SFI domain close failed
**ofi_domainclose %s %d %s %s:SFI domain close failed (%s:%d:%s:%s)
**ofi_tsearch:SFI tsearch failed
**ofi_tsearch %s %d %s %s:SFI tsearch failed (%s:%d:%s:%s)
**ofi_poll:SFI poll failed
**ofi_poll %s %d %s %s:SFI poll failed (%s:%d:%s:%s)
**ofi_cancel:SFI cancel failed
**ofi_cancel %s %d %s %s:SFI cancel failed (%s:%d:%s:%s)
**ofi_avmap:OFI get address vector map failed
**ofi_avmap %s %d %s %s:OFI address vector map failed (%s:%d:%s:%s)
**ofi_tsend:OFI tagged sendto failed
**ofi_tsend %s %d %s %s:OFI tagged sendto failed (%s:%d:%s:%s)
**ofi_trecv:OFI tagged recvfrom failed
**ofi_trecv %s %d %s %s:OFI tagged recvfrom failed (%s:%d:%s:%s)
**ofi_getinfo:OFI getinfo() failed
**ofi_getinfo %s %d %s %s:OFI getinfo() failed (%s:%d:%s:%s)
**ofi_openep:OFI endpoint open failed
**ofi_openep %s %d %s %s:OFI endpoint open failed (%s:%d:%s:%s)
**ofi_openfabric:OFI fabric open failure
**ofi_openfabric %s %d %s %s:OFI fabric open failed (%s:%d:%s:%s)
**ofi_opendomain:OFI domain open failure
**ofi_opendomain %s %d %s %s:OFI domain open failed (%s:%d:%s:%s)
**ofi_opencq:OFI event queue create failure
**ofi_opencq %s %d %s %s:OFI event queue create failed (%s:%d:%s:%s)
**ofi_avopen:OFI address vector open failed
**ofi_avopen %s %d %s %s:OFI address vector open failed (%s:%d:%s:%s)
**ofi_bind:OFI resource bind failure
**ofi_bind %s %d %s %s:OFI resource bind failed (%s:%d:%s:%s)
**ofi_ep_enable:OFI endpoint enable failed
**ofi_ep_enable %s %d %s %s:OFI endpoint enable failed (%s:%d:%s:%s)
**ofi_getname:OFI get endpoint name failed
**ofi_getname %s %d %s %s:OFI get endpoint name failed (%s:%d:%s:%s)
**ofi_avclose:OFI av close failed
**ofi_avclose %s %d %s %s:OFI av close failed (%s:%d:%s:%s)
**ofi_epclose:OFI endpoint close failed
**ofi_epclose %s %d %s %s:OFI endpoint close failed (%s:%d:%s:%s)
**ofi_cqclose:OFI cq close failed
**ofi_cqclose %s %d %s %s:OFI cq close failed (%s:%d:%s:%s)
**ofi_mrclose:OFI mr close failed
**ofi_mrclose %s %d %s %s:OFI mr close failed (%s:%d:%s:%s)
**ofi_fabricclose:OFI fabric close failed
**ofi_fabricclose %s %d %s %s:OFI fabric close failed (%s:%d:%s:%s)
**ofi_domainclose:OFI domain close failed
**ofi_domainclose %s %d %s %s:OFI domain close failed (%s:%d:%s:%s)
**ofi_tsearch:OFI tsearch failed
**ofi_tsearch %s %d %s %s:OFI tsearch failed (%s:%d:%s:%s)
**ofi_poll:OFI poll failed
**ofi_poll %s %d %s %s:OFI poll failed (%s:%d:%s:%s)
**ofi_cancel:OFI cancel failed
**ofi_cancel %s %d %s %s:OFI cancel failed (%s:%d:%s:%s)
......@@ -38,7 +38,7 @@ static inline MPIDI_VC_t *ofi_tag_to_vc(uint64_t match_bits)
port = get_port(match_bits);
vc = gl_data.cm_vcs;
while (vc && vc->port_name_tag != port) {
vc = VC_SFI(vc)->next;
vc = VC_OFI(vc)->next;
}
if (NULL == vc) {
MPIU_Assertp(0);
......@@ -90,7 +90,7 @@ static inline MPIDI_VC_t *ofi_tag_to_vc(uint64_t match_bits)
static inline int MPID_nem_ofi_conn_req_callback(cq_tagged_entry_t * wc, MPID_Request * rreq)
{
int ret, len, mpi_errno = MPI_SUCCESS;
char bc[SFI_KVSAPPSTRLEN];
char bc[OFI_KVSAPPSTRLEN];
MPIDI_VC_t *vc;
char *addr = NULL;
......@@ -103,12 +103,12 @@ static inline int MPID_nem_ofi_conn_req_callback(cq_tagged_entry_t * wc, MPID_Re
MPIU_Assert(gl_data.conn_req == rreq);
FI_RC(fi_trecv(gl_data.endpoint,
gl_data.conn_req->dev.user_buf,
SFI_KVSAPPSTRLEN,
OFI_KVSAPPSTRLEN,
gl_data.mr,
0,
MPID_CONN_REQ,
~MPID_PROTOCOL_MASK,
(void *) &(REQ_SFI(gl_data.conn_req)->ofi_context)), trecv);
(void *) &(REQ_OFI(gl_data.conn_req)->ofi_context)), trecv);
addr = MPIU_Malloc(gl_data.bound_addrlen);
MPIU_Assertp(addr);
......@@ -118,16 +118,16 @@ static inline int MPID_nem_ofi_conn_req_callback(cq_tagged_entry_t * wc, MPID_Re
MPIDI_VC_Init(vc, NULL, 0);
MPI_RC(MPIDI_GetTagFromPort(bc, &vc->port_name_tag));
ret = MPIU_Str_get_binary_arg(bc, "SFI", addr, gl_data.bound_addrlen, &len);
ret = MPIU_Str_get_binary_arg(bc, "OFI", addr, gl_data.bound_addrlen, &len);
MPIU_ERR_CHKANDJUMP((ret != MPIU_STR_SUCCESS && ret != MPIU_STR_NOMEM) ||
(size_t) len != gl_data.bound_addrlen,
mpi_errno, MPI_ERR_OTHER, "**badbusinesscard");
FI_RC(fi_av_insert(gl_data.av, addr, 1, &direct_addr, 0ULL, NULL), avmap);
VC_SFI(vc)->direct_addr = direct_addr;
VC_SFI(vc)->ready = 1;
VC_SFI(vc)->is_cmvc = 1;
VC_SFI(vc)->next = gl_data.cm_vcs;
VC_OFI(vc)->direct_addr = direct_addr;
VC_OFI(vc)->ready = 1;
VC_OFI(vc)->is_cmvc = 1;
VC_OFI(vc)->next = gl_data.cm_vcs;
gl_data.cm_vcs = vc;
MPIDI_CH3I_Acceptq_enqueue(vc, vc->port_name_tag);
......@@ -159,10 +159,10 @@ static inline int MPID_nem_ofi_handle_packet(cq_tagged_entry_t * wc ATTRIBUTE((u
BEGIN_FUNC(FCNAME);
if (rreq->cc == 1) {
vc = REQ_SFI(rreq)->vc;
vc = REQ_OFI(rreq)->vc;
MPIU_Assert(vc);
MPI_RC(MPID_nem_handle_pkt(vc, REQ_SFI(rreq)->pack_buffer, REQ_SFI(rreq)->pack_buffer_size))
MPIU_Free(REQ_SFI(rreq)->pack_buffer);
MPI_RC(MPID_nem_handle_pkt(vc, REQ_OFI(rreq)->pack_buffer, REQ_OFI(rreq)->pack_buffer_size))
MPIU_Free(REQ_OFI(rreq)->pack_buffer);
}
MPIDI_CH3U_Request_complete(rreq);
END_FUNC_RC(FCNAME);
......@@ -179,7 +179,7 @@ static inline int MPID_nem_ofi_cts_send_callback(cq_tagged_entry_t * wc, MPID_Re
{
int mpi_errno = MPI_SUCCESS;
BEGIN_FUNC(FCNAME);
MPI_RC(MPID_nem_ofi_handle_packet(wc, REQ_SFI(sreq)->parent));
MPI_RC(MPID_nem_ofi_handle_packet(wc, REQ_OFI(sreq)->parent));
MPIDI_CH3U_Request_complete(sreq);
END_FUNC_RC(FCNAME);
}
......@@ -217,28 +217,28 @@ static inline int MPID_nem_ofi_preposted_callback(cq_tagged_entry_t * wc, MPID_R
MPID_cc_incr(new_rreq->cc_ptr, &c);
new_rreq->dev.OnDataAvail = NULL;
new_rreq->dev.next = NULL;
REQ_SFI(new_rreq)->event_callback = MPID_nem_ofi_handle_packet;
REQ_SFI(new_rreq)->vc = vc;
REQ_SFI(new_rreq)->pack_buffer = pack_buffer;
REQ_SFI(new_rreq)->pack_buffer_size = pkt_len;
REQ_OFI(new_rreq)->event_callback = MPID_nem_ofi_handle_packet;
REQ_OFI(new_rreq)->vc = vc;
REQ_OFI(new_rreq)->pack_buffer = pack_buffer;
REQ_OFI(new_rreq)->pack_buffer_size = pkt_len;
FI_RC(fi_trecv(gl_data.endpoint,
REQ_SFI(new_rreq)->pack_buffer,
REQ_SFI(new_rreq)->pack_buffer_size,
REQ_OFI(new_rreq)->pack_buffer,
REQ_OFI(new_rreq)->pack_buffer_size,
gl_data.mr,
VC_SFI(vc)->direct_addr,
wc->tag | MPID_MSG_DATA, 0, &(REQ_SFI(new_rreq)->ofi_context)), trecv);
VC_OFI(vc)->direct_addr,
wc->tag | MPID_MSG_DATA, 0, &(REQ_OFI(new_rreq)->ofi_context)), trecv);
MPID_nem_ofi_create_req(&sreq, 1);
sreq->dev.OnDataAvail = NULL;
sreq->dev.next = NULL;
REQ_SFI(sreq)->event_callback = MPID_nem_ofi_cts_send_callback;
REQ_SFI(sreq)->parent = new_rreq;
REQ_OFI(sreq)->event_callback = MPID_nem_ofi_cts_send_callback;
REQ_OFI(sreq)->parent = new_rreq;
FI_RC(fi_tsend(gl_data.endpoint,
NULL,
0,
gl_data.mr,
VC_SFI(vc)->direct_addr,
wc->tag | MPID_MSG_CTS, &(REQ_SFI(sreq)->ofi_context)), tsend);
VC_OFI(vc)->direct_addr,
wc->tag | MPID_MSG_CTS, &(REQ_OFI(sreq)->ofi_context)), tsend);
MPIU_Assert(gl_data.persistent_req == rreq);
rreq->dev.user_count = 0;
......@@ -248,7 +248,7 @@ static inline int MPID_nem_ofi_preposted_callback(cq_tagged_entry_t * wc, MPID_R
gl_data.mr,
0,
MPID_MSG_RTS,
~MPID_PROTOCOL_MASK, &(REQ_SFI(rreq)->ofi_context)), trecv);
~MPID_PROTOCOL_MASK, &(REQ_OFI(rreq)->ofi_context)), trecv);
END_FUNC_RC(FCNAME);
}
......@@ -264,8 +264,8 @@ int MPID_nem_ofi_connect_to_root_callback(cq_tagged_entry_t * wc ATTRIBUTE((unus
int mpi_errno = MPI_SUCCESS;
BEGIN_FUNC(FCNAME);
if (REQ_SFI(sreq)->pack_buffer)
MPIU_Free(REQ_SFI(sreq)->pack_buffer);
if (REQ_OFI(sreq)->pack_buffer)
MPIU_Free(REQ_OFI(sreq)->pack_buffer);
MPIDI_CH3U_Request_complete(sreq);
END_FUNC(FCNAME);
......@@ -301,8 +301,8 @@ int MPID_nem_ofi_cm_init(MPIDI_PG_t * pg_p, int pg_rank ATTRIBUTE((unused)))
MPID_nem_ofi_create_req(&persistent_req, 1);
persistent_req->dev.OnDataAvail = NULL;
persistent_req->dev.next = NULL;
REQ_SFI(persistent_req)->vc = NULL;
REQ_SFI(persistent_req)->event_callback = MPID_nem_ofi_preposted_callback;
REQ_OFI(persistent_req)->vc = NULL;
REQ_OFI(persistent_req)->event_callback = MPID_nem_ofi_preposted_callback;
FI_RC(fi_trecv(gl_data.endpoint,
&persistent_req->dev.user_count,
sizeof persistent_req->dev.user_count,
......@@ -310,25 +310,25 @@ int MPID_nem_ofi_cm_init(MPIDI_PG_t * pg_p, int pg_rank ATTRIBUTE((unused)))
0,
MPID_MSG_RTS,
~MPID_PROTOCOL_MASK,
(void *) &(REQ_SFI(persistent_req)->ofi_context)), trecv);
(void *) &(REQ_OFI(persistent_req)->ofi_context)), trecv);
gl_data.persistent_req = persistent_req;
/* --------------------------------- */
/* Post recv for connection requests */
/* --------------------------------- */
MPID_nem_ofi_create_req(&conn_req, 1);
conn_req->dev.user_buf = MPIU_Malloc(SFI_KVSAPPSTRLEN * sizeof(char));
conn_req->dev.user_buf = MPIU_Malloc(OFI_KVSAPPSTRLEN * sizeof(char));
conn_req->dev.OnDataAvail = NULL;
conn_req->dev.next = NULL;
REQ_SFI(conn_req)->vc = NULL; /* We don't know the source yet */
REQ_SFI(conn_req)->event_callback = MPID_nem_ofi_conn_req_callback;
REQ_OFI(conn_req)->vc = NULL; /* We don't know the source yet */
REQ_OFI(conn_req)->event_callback = MPID_nem_ofi_conn_req_callback;
FI_RC(fi_trecv(gl_data.endpoint,
conn_req->dev.user_buf,
SFI_KVSAPPSTRLEN,
OFI_KVSAPPSTRLEN,
gl_data.mr,
0,
MPID_CONN_REQ,
~MPID_PROTOCOL_MASK, (void *) &(REQ_SFI(conn_req)->ofi_context)), trecv);
~MPID_PROTOCOL_MASK, (void *) &(REQ_OFI(conn_req)->ofi_context)), trecv);
gl_data.conn_req = conn_req;
......@@ -351,12 +351,12 @@ int MPID_nem_ofi_cm_finalize()
int mpi_errno = MPI_SUCCESS;
BEGIN_FUNC(FCNAME);
FI_RC(fi_cancel((fid_t) gl_data.endpoint,
&(REQ_SFI(gl_data.persistent_req)->ofi_context)), cancel);
&(REQ_OFI(gl_data.persistent_req)->ofi_context)), cancel);
MPIR_STATUS_SET_CANCEL_BIT(gl_data.persistent_req->status, TRUE);
MPIR_STATUS_SET_COUNT(gl_data.persistent_req->status, 0);
MPIDI_CH3U_Request_complete(gl_data.persistent_req);
FI_RC(fi_cancel((fid_t) gl_data.endpoint, &(REQ_SFI(gl_data.conn_req)->ofi_context)), cancel);
FI_RC(fi_cancel((fid_t) gl_data.endpoint, &(REQ_OFI(gl_data.conn_req)->ofi_context)), cancel);
MPIU_Free(gl_data.conn_req->dev.user_buf);
MPIR_STATUS_SET_CANCEL_BIT(gl_data.conn_req->status, TRUE);
MPIR_STATUS_SET_COUNT(gl_data.conn_req->status, 0);
......@@ -373,31 +373,31 @@ int MPID_nem_ofi_cm_finalize()
/* Handle CH3/Nemesis VC connections */
/* * Query the VC address information. In particular we are looking for */
/* the fabric address name. */
/* * Use fi_av_insert to register the address name with SFI */
/* * Use fi_av_insert to register the address name with OFI */
/* ------------------------------------------------------------------------ */
#undef FCNAME
#define FCNAME DECL_FUNC(MPID_nem_ofi_vc_connect)
int MPID_nem_ofi_vc_connect(MPIDI_VC_t * vc)
{
int len, ret, mpi_errno = MPI_SUCCESS;
char bc[SFI_KVSAPPSTRLEN], *addr = NULL;
char bc[OFI_KVSAPPSTRLEN], *addr = NULL;
BEGIN_FUNC(FCNAME);
addr = MPIU_Malloc(gl_data.bound_addrlen);
MPIU_Assert(addr);
MPIU_Assert(1 != VC_SFI(vc)->ready);
MPIU_Assert(1 != VC_OFI(vc)->ready);
if (!vc->pg || !vc->pg->getConnInfo) {
goto fn_exit;
}
MPI_RC(vc->pg->getConnInfo(vc->pg_rank, bc, SFI_KVSAPPSTRLEN, vc->pg));
ret = MPIU_Str_get_binary_arg(bc, "SFI", addr, gl_data.bound_addrlen, &len);
MPI_RC(vc->pg->getConnInfo(vc->pg_rank, bc, OFI_KVSAPPSTRLEN, vc->pg));
ret = MPIU_Str_get_binary_arg(bc, "OFI", addr, gl_data.bound_addrlen, &len);
MPIU_ERR_CHKANDJUMP((ret != MPIU_STR_SUCCESS && ret != MPIU_STR_NOMEM) ||
(size_t) len != gl_data.bound_addrlen,
mpi_errno, MPI_ERR_OTHER, "**badbusinesscard");
FI_RC(fi_av_insert(gl_data.av, addr, 1, &(VC_SFI(vc)->direct_addr), 0ULL, NULL), avmap);
VC_SFI(vc)->ready = 1;
FI_RC(fi_av_insert(gl_data.av, addr, 1, &(VC_OFI(vc)->direct_addr), 0ULL, NULL), avmap);
VC_OFI(vc)->ready = 1;
fn_exit:
if (addr)
......@@ -415,7 +415,7 @@ int MPID_nem_ofi_vc_init(MPIDI_VC_t * vc)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_CH3I_VC *const vc_ch = &vc->ch;
MPID_nem_ofi_vc_t *const vc_ofi = VC_SFI(vc);
MPID_nem_ofi_vc_t *const vc_ofi = VC_OFI(vc);
BEGIN_FUNC(FCNAME);
vc->sendNoncontig_fn = MPID_nem_ofi_SendNoncontig;
......@@ -447,25 +447,25 @@ int MPID_nem_ofi_vc_init(MPIDI_VC_t * vc)
int MPID_nem_ofi_vc_destroy(MPIDI_VC_t * vc)
{
BEGIN_FUNC(FCNAME);
if (vc && (VC_SFI(vc)->is_cmvc == 1) && (VC_SFI(vc)->ready == 1)) {
if (vc && (VC_OFI(vc)->is_cmvc == 1) && (VC_OFI(vc)->ready == 1)) {
if (vc->pg != NULL) {
printf("ERROR: VC Destroy (%p) pg = %s\n", vc, (char *) vc->pg->id);
}
MPIDI_VC_t *prev = gl_data.cm_vcs;
while (prev && prev != vc && VC_SFI(prev)->next != vc) {
prev = VC_SFI(vc)->next;
while (prev && prev != vc && VC_OFI(prev)->next != vc) {
prev = VC_OFI(vc)->next;
}
if (VC_SFI(prev)->next == vc) {
VC_SFI(prev)->next = VC_SFI(vc)->next;
if (VC_OFI(prev)->next == vc) {
VC_OFI(prev)->next = VC_OFI(vc)->next;
}
else if (vc == gl_data.cm_vcs) {
gl_data.cm_vcs = VC_SFI(vc)->next;
gl_data.cm_vcs = VC_OFI(vc)->next;
}
else {
MPIU_Assert(0);
}
}
VC_SFI(vc)->ready = 0;
VC_OFI(vc)->ready = 0;
END_FUNC(FCNAME);
return MPI_SUCCESS;
}
......@@ -477,7 +477,7 @@ int MPID_nem_ofi_vc_terminate(MPIDI_VC_t * vc)
int mpi_errno = MPI_SUCCESS;
BEGIN_FUNC(FCNAME);
MPI_RC(MPIDI_CH3U_Handle_connection(vc, MPIDI_VC_EVENT_TERMINATED));
VC_SFI(vc)->ready = 0;
VC_OFI(vc)->ready = 0;
END_FUNC_RC(FCNAME);
}
......@@ -502,14 +502,14 @@ int MPID_nem_ofi_vc_terminate(MPIDI_VC_t * vc)
int MPID_nem_ofi_connect_to_root(const char *business_card, MPIDI_VC_t * new_vc)
{
int len, ret, mpi_errno = MPI_SUCCESS, str_errno = MPI_SUCCESS;
int my_bc_len = SFI_KVSAPPSTRLEN;
int my_bc_len = OFI_KVSAPPSTRLEN;
char *addr = NULL, *bc = NULL, *my_bc = NULL;
MPID_Request *sreq;
uint64_t conn_req_send_bits;
BEGIN_FUNC(FCNAME);
addr = MPIU_Malloc(gl_data.bound_addrlen);
bc = MPIU_Malloc(SFI_KVSAPPSTRLEN);
bc = MPIU_Malloc(OFI_KVSAPPSTRLEN);
MPIU_Assertp(addr);
MPIU_Assertp(bc);
my_bc = bc;
......@@ -518,34 +518,34 @@ int MPID_nem_ofi_connect_to_root(const char *business_card, MPIDI_VC_t * new_vc)
goto fn_fail;
}
MPI_RC(MPIDI_GetTagFromPort(business_card, &new_vc->port_name_tag));
ret = MPIU_Str_get_binary_arg(business_card, "SFI", addr, gl_data.bound_addrlen, &len);
ret = MPIU_Str_get_binary_arg(business_card, "OFI", addr, gl_data.bound_addrlen, &len);
MPIU_ERR_CHKANDJUMP((ret != MPIU_STR_SUCCESS && ret != MPIU_STR_NOMEM) ||
(size_t) len != gl_data.bound_addrlen,
mpi_errno, MPI_ERR_OTHER, "**badbusinesscard");
FI_RC(fi_av_insert(gl_data.av, addr, 1, &(VC_SFI(new_vc)->direct_addr), 0ULL, NULL), avmap);
FI_RC(fi_av_insert(gl_data.av, addr, 1, &(VC_OFI(new_vc)->direct_addr), 0ULL, NULL), avmap);
VC_SFI(new_vc)->ready = 1;
VC_OFI(new_vc)->ready = 1;
str_errno = MPIU_Str_add_int_arg(&bc, &my_bc_len, "tag", new_vc->port_name_tag);
MPIU_ERR_CHKANDJUMP(str_errno, mpi_errno, MPI_ERR_OTHER, "**argstr_port_name_tag");
MPI_RC(MPID_nem_ofi_get_business_card(MPIR_Process.comm_world->rank, &bc, &my_bc_len));
my_bc_len = SFI_KVSAPPSTRLEN - my_bc_len;
my_bc_len = OFI_KVSAPPSTRLEN - my_bc_len;
MPID_nem_ofi_create_req(&sreq, 1);
sreq->kind = MPID_REQUEST_SEND;
sreq->dev.OnDataAvail = NULL;
sreq->dev.next = NULL;
REQ_SFI(sreq)->event_callback = MPID_nem_ofi_connect_to_root_callback;
REQ_SFI(sreq)->pack_buffer = my_bc;
REQ_OFI(sreq)->event_callback = MPID_nem_ofi_connect_to_root_callback;
REQ_OFI(sreq)->pack_buffer = my_bc;
conn_req_send_bits = init_sendtag(0, MPIR_Process.comm_world->rank, 0, MPID_CONN_REQ);
FI_RC(fi_tsend(gl_data.endpoint,
REQ_SFI(sreq)->pack_buffer,
REQ_OFI(sreq)->pack_buffer,
my_bc_len,
gl_data.mr,
VC_SFI(new_vc)->direct_addr,
conn_req_send_bits, &(REQ_SFI(sreq)->ofi_context)), tsend);
VC_OFI(new_vc)->direct_addr,
conn_req_send_bits, &(REQ_OFI(sreq)->ofi_context)), tsend);
MPID_nem_ofi_poll(MPID_NONBLOCKING_POLL);
VC_SFI(new_vc)->is_cmvc = 1;
VC_SFI(new_vc)->next = gl_data.cm_vcs;
VC_OFI(new_vc)->is_cmvc = 1;
VC_OFI(new_vc)->next = gl_data.cm_vcs;
gl_data.cm_vcs = new_vc;
fn_exit:
if (addr)
......@@ -567,7 +567,7 @@ int MPID_nem_ofi_get_business_card(int my_rank ATTRIBUTE((unused)),
BEGIN_FUNC(FCNAME);
str_errno = MPIU_Str_add_binary_arg(bc_val_p,
val_max_sz_p,
"SFI",
"OFI",
(char *) &gl_data.bound_addr, sizeof(gl_data.bound_addr));
if (str_errno) {
MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len");
......
......@@ -7,8 +7,8 @@
* to Argonne National Laboratory subject to Software Grant and Corporate
* Contributor License Agreement dated February 8, 2012.
*/
#ifndef SFI_IMPL_H
#define SFI_IMPL_H
#ifndef OFI_IMPL_H
#define OFI_IMPL_H
#include "mpid_nem_impl.h"
#include "mpihandlemem.h"
......@@ -61,20 +61,20 @@ typedef struct {
/* This is per destination */
/* ******************************** */
typedef struct {
fi_addr_t direct_addr; /* Remote SFI address */
fi_addr_t direct_addr; /* Remote OFI address */
int ready; /* VC ready state */
int is_cmvc; /* Cleanup VC */
MPIDI_VC_t *next; /* VC queue */
} MPID_nem_ofi_vc_t;
#define VC_SFI(vc) ((MPID_nem_ofi_vc_t *)vc->ch.netmod_area.padding)
#define VC_OFI(vc) ((MPID_nem_ofi_vc_t *)vc->ch.netmod_area.padding)
/* ******************************** */
/* Per request object data */
/* SFI/Netmod specific */
/* OFI/Netmod specific */
/* ******************************** */
typedef struct {
context_t ofi_context; /* Context Object */
void *addr; /* SFI Address */
void *addr; /* OFI Address */
event_callback_fn event_callback; /* Callback Event */
char *pack_buffer; /* MPI Pack Buffer */
int pack_buffer_size; /* Pack buffer size */
......@@ -84,7 +84,7 @@ typedef struct {
uint64_t tag; /* 64 bit tag request */
MPID_Request *parent; /* Parent request */
} MPID_nem_ofi_req_t;
#define REQ_SFI(req) ((MPID_nem_ofi_req_t *)((req)->ch.netmod_area.padding))
#define REQ_OFI(req) ((MPID_nem_ofi_req_t *)((req)->ch.netmod_area.padding))
/* ******************************** */
/* Logging and function macros */
......@@ -109,7 +109,7 @@ fn_fail: \
: __FILE__ \
)
#define DECL_FUNC(FUNCNAME) MPIU_QUOTE(FUNCNAME)
#define SFI_COMPILE_TIME_ASSERT(expr_) \
#define OFI_COMPILE_TIME_ASSERT(expr_) \
do { switch(0) { case 0: case (expr_): default: break; } } while (0)
#define FI_RC(FUNC,STR) \
......@@ -151,17 +151,17 @@ fn_fail: \
#define VC_READY_CHECK(vc) \
({ \
if (1 != VC_SFI(vc)->ready) { \
if (1 != VC_OFI(vc)->ready) { \
MPI_RC(MPID_nem_ofi_vc_connect(vc)); \
} \
})
#define SFI_ADDR_INIT(src, vc, remote_proc) \
#define OFI_ADDR_INIT(src, vc, remote_proc) \
({ \
if (MPI_ANY_SOURCE != src) { \
MPIU_Assert(vc != NULL); \
VC_READY_CHECK(vc); \
remote_proc = VC_SFI(vc)->direct_addr; \
remote_proc = VC_OFI(vc)->direct_addr; \
} else { \
MPIU_Assert(vc == NULL); \
remote_proc = gl_data.any_addr; \
......@@ -197,14 +197,14 @@ fn_fail: \
#define MPID_TAG_SHIFT (28)
#define MPID_PSOURCE_SHIFT (16)
#define MPID_PORT_SHIFT (32)
#define SFI_KVSAPPSTRLEN 1024
#define OFI_KVSAPPSTRLEN 1024
/* ******************************** */
/* Request manipulation inlines */
/* ******************************** */
static inline void MPID_nem_ofi_init_req(MPID_Request * req)
{
memset(REQ_SFI(req), 0, sizeof(MPID_nem_ofi_req_t));
memset(REQ_OFI(req), 0, sizeof(MPID_nem_ofi_req_t));
}
static inline int MPID_nem_ofi_create_req(MPID_Request ** request, int refcnt)
......@@ -320,7 +320,7 @@ int MPID_nem_ofi_iSendContig(MPIDI_VC_t * vc, MPID_Request * sreq, void *hdr,
MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz);
/* ************************************************************************** */
/* SFI utility functions : not exposed as a netmod public API */
/* OFI utility functions : not exposed as a netmod public API */
/* ************************************************************************** */
#define MPID_NONBLOCKING_POLL 0
#define MPID_BLOCKING_POLL 1
......
......@@ -21,7 +21,7 @@ int MPID_nem_ofi_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_
info_t hints, *prov_tagged, *prov_use;
cq_attr_t cq_attr;
av_attr_t av_attr;
char kvsname[SFI_KVSAPPSTRLEN], key[SFI_KVSAPPSTRLEN], bc[SFI_KVSAPPSTRLEN];
char kvsname[OFI_KVSAPPSTRLEN], key[OFI_KVSAPPSTRLEN], bc[OFI_KVSAPPSTRLEN];
char *my_bc, *addrs, *null_addr;
fi_addr_t *fi_addrs = NULL;
MPIDI_VC_t *vc;
......@@ -39,11 +39,11 @@ int MPID_nem_ofi_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_
/* communication calls. */
/* Note that we do not fill in FI_LOCAL_MR, which means this netmod */
/* does not support exchange of memory regions on communication calls */
/* SFI requires that all communication calls use a registered mr */
/* OFI requires that all communication calls use a registered mr */
/* but in our case this netmod is written to only support transfers */
/* on a dynamic memory region that spans all of memory. So, we do */
/* not set the FI_LOCAL_MR mode bit, and we set the FI_DYNAMIC_MR */
/* bit to tell SFI our requirement and filter providers appropriately */
/* bit to tell OFI our requirement and filter providers appropriately */
/* ep_type: reliable datagram operation */
/* caps: Capabilities required from the provider. The bits specified */
/* with buffered receive, cancel, and remote complete implements */
......@@ -62,7 +62,7 @@ int MPID_nem_ofi_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_
/* ------------------------------------------------------------------------ */
/* FI_VERSION provides binary backward and forward compatibility support */
/* Specify the version of SFI is coded to, the provider will select struct */
/* Specify the version of OFI is coded to, the provider will select struct */
/* layouts that are compatible with this version. */
/* ------------------------------------------------------------------------ */
fi_version = FI_VERSION(1, 0);
......@@ -203,8 +203,8 @@ int MPID_nem_ofi_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_
/* Publish the business card */
/* to the KVS */
/* -------------------------------- */
PMI_RC(PMI_KVS_Get_my_name(kvsname, SFI_KVSAPPSTRLEN), pmi);
sprintf(key, "SFI-%d", pg_rank);
PMI_RC(PMI_KVS_Get_my_name(kvsname, OFI_KVSAPPSTRLEN), pmi);
sprintf(key, "OFI-%d", pg_rank);
PMI_RC(PMI_KVS_Put(kvsname, key, my_bc), pmi);
PMI_RC(PMI_KVS_Commit(kvsname), pmi);
......@@ -228,10 +228,10 @@ int MPID_nem_ofi_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_
MPIU_CHKLMEM_MALLOC(addrs, char *, pg_p->size * gl_data.bound_addrlen, mpi_errno, "addrs");
for (i = 0; i < pg_p->size; ++i) {