Commit 0bdc7e74 authored by Norio Yamaguchi's avatar Norio Yamaguchi Committed by Huiwei Lu
Browse files

Delete unused code in netmod-IB


Signed-off-by: default avatarHuiwei Lu <huiweilu@mcs.anl.gov>
parent 32e24c1d
......@@ -24,22 +24,10 @@
int MPID_nem_ib_finalize(void)
{
int mpi_errno = MPI_SUCCESS;
#if 0
int ibcom_errno;
int i;
#endif
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_IB_FINALIZE);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_IB_FINALIZE);
#if 0
for (i = 0; i < MPID_nem_ib_nranks; i++) {
ibcom_errno = MPID_nem_ib_com_close(MPID_nem_ib_conns[i].fd);
MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_com_close");
}
#endif
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_IB_FINALIZE);
fn_exit:
......
......@@ -698,9 +698,6 @@ extern uint8_t MPID_nem_ib_lmt_tail_addr_cbf[MPID_nem_ib_cbf_nslot *
typedef struct {
void *addr;
uint32_t rkey;
#if 0 /* moving to packet header */
int seq_num_tail; /* notify RDMA-write-to buffer occupation */
#endif
uint8_t tail; /* last word of payload */
uint32_t max_msg_sz; /* max message size */
int seg_seq_num;
......@@ -718,9 +715,6 @@ typedef struct {
typedef enum MPID_nem_ib_pkt_subtype {
MPIDI_NEM_IB_PKT_EAGER_SEND,
#if 0 /* modification of mpid_nem_lmt.c is required */
MPIDI_NEM_IB_PKT_LMT_RTS,
#endif
MPIDI_NEM_IB_PKT_RMA_LMT_RTS,
MPIDI_NEM_IB_PKT_PUT,
MPIDI_NEM_IB_PKT_ACCUMULATE,
......@@ -791,28 +785,9 @@ typedef struct MPID_nem_ib_pkt_change_rdmabuf_occupancy_notify_state_t {
int MPID_nem_ib_PktHandler_EagerSend(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
#if 0 /* modification of mpid_nem_lmt.c is required */
int MPID_nem_ib_pkt_RTS_handler(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
#endif
int MPID_nem_ib_PktHandler_rma_lmt_rts(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
#if 0
int MPID_nem_ib_PktHandler_Put(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
int MPID_nem_ib_PktHandler_Accumulate(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
int MPID_nem_ib_PktHandler_Get(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
int MPID_nem_ib_PktHandler_GetResp(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen /* out */ ,
MPID_Request ** rreqp /* out */);
#endif
int MPID_nem_ib_PktHandler_lmt_done(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
MPIDI_msg_sz_t * buflen, MPID_Request ** rreqp);
int MPID_nem_ib_pkt_GET_DONE_handler(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt,
......
......@@ -499,12 +499,6 @@ int MPID_nem_ib_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_m
}
}
#if 0
MPIU_CHKPMEM_MALLOC(MPID_nem_ib_pollingset, MPIDI_VC_t **,
MPID_NEM_IB_MAX_POLLINGSET * sizeof(MPIDI_VC_t *), mpi_errno,
"connection table");
memset(MPID_nem_ib_pollingset, 0, MPID_NEM_IB_MAX_POLLINGSET * sizeof(MPIDI_VC_t *));
#endif
#ifndef MPID_NEM_IB_ONDEMAND
/* prepare eager-send QP */
for (i = 0; i < MPID_nem_ib_nranks; i++) {
......@@ -596,12 +590,6 @@ int MPID_nem_ib_init(MPIDI_PG_t * pg_p, int pg_rank, char **bc_val_p, int *val_m
}
}
#if 0 /* debug */
for (i = 0; i < MPID_nem_ib_nranks; i++) {
dprintf("init,fd[%d]=%d\n", i, MPID_nem_ib_conns[i].fd);
}
#endif
#else /* define(MPID_NEM_IB_ONDEMAND) */
/* We need to communicate with all other ranks in close sequence. */
MPID_nem_ib_conns_ref_count = MPID_nem_ib_nranks - MPID_nem_mem_region.num_local;
......@@ -821,13 +809,6 @@ int MPID_nem_ib_vc_onconnect(MPIDI_VC_t * vc)
MPID_nem_ib_com_obtain_pointer(MPID_nem_ib_conns[vc->pg_rank].fd, &VC_FIELD(vc, ibcom));
MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_com_obtain_pointer");
#if 0
/* Insert into polling set */
MPIU_ERR_CHKANDJUMP(MPID_nem_ib_npollingset + 1 > MPID_NEM_IB_MAX_POLLINGSET, mpi_errno,
MPI_ERR_OTHER, "**MPID_nem_ib_npollingset");
MPID_nem_ib_pollingset[MPID_nem_ib_npollingset++] = vc;
//printf("vc_init,%d->%d,vc=%p,npollingset=%d\n", MPID_nem_ib_myrank, vc->pg_rank, vc, MPID_nem_ib_npollingset);
#endif
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_IB_VC_ONCONNECT);
return mpi_errno;
......@@ -903,29 +884,12 @@ int MPID_nem_ib_vc_init(MPIDI_VC_t * vc)
#endif
MPIDI_CHANGE_VC_STATE(vc, ACTIVE);
#if 0 /* dead code */
uint32_t max_msg_sz;
MPID_nem_ib_com_get_info_conn(MPID_nem_ib_conns[vc->pg_rank].fd,
MPID_NEM_IB_COM_INFOKEY_PATTR_MAX_MSG_SZ, &max_msg_sz,
sizeof(max_msg_sz));
#endif
VC_FIELD(vc, pending_sends) = 0;
//MPIU_Assert(sizeof(MPID_nem_ib_netmod_hdr_t) == 8); /* assumption in ib_ibcom.h */
MPIU_Assert(sizeof(MPID_nem_ib_netmod_trailer_t) == 1); /* assumption in ib_ibcom.h */
uint32_t sz;
#if 0
/* assumption in released(), must be power of two */
sz = MPID_NEM_IB_COM_RDMABUF_NSEG;
while ((sz & 1) == 0) {
sz >>= 1;
}
sz >>= 1;
if (sz) {
MPIU_Assert(0);
}
#endif
/* assumption in ib_poll.c, must be power of two */
for (sz = MPID_NEM_IB_COM_RDMABUF_SZSEG; sz > 0; sz >>= 1) {
......@@ -951,16 +915,7 @@ int MPID_nem_ib_vc_init(MPIDI_VC_t * vc)
vc_ch->pkt_handler = MPID_nem_ib_pkt_handler;
vc_ch->num_pkt_handlers = MPIDI_NEM_IB_PKT_NUM_PKT_HANDLERS;
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_EAGER_SEND] = MPID_nem_ib_PktHandler_EagerSend;
#if 0 /* modification of mpid_nem_lmt.c is required */
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_LMT_RTS] = MPID_nem_ib_pkt_RTS_handler;
#endif
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_RMA_LMT_RTS] = MPID_nem_ib_PktHandler_rma_lmt_rts;
#if 0
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_PUT] = MPID_nem_ib_PktHandler_Put;
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_GET] = MPID_nem_ib_PktHandler_Get;
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_GET_RESP] = MPID_nem_ib_PktHandler_GetResp;
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_ACCUMULATE] = MPID_nem_ib_PktHandler_Accumulate;
#endif
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_LMT_GET_DONE] = MPID_nem_ib_pkt_GET_DONE_handler;
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_LMT_RTS] = MPID_nem_ib_pkt_RTS_handler;
MPID_nem_ib_pkt_handler[MPIDI_NEM_IB_PKT_REQ_SEQ_NUM] = MPID_nem_ib_PktHandler_req_seq_num;
......@@ -1055,21 +1010,6 @@ int MPID_nem_ib_vc_terminate(MPIDI_VC_t * vc)
MPID_nem_ib_diff16(vc_ib->ibcom->sseq_num, vc_ib->ibcom->lsr_seq_num_tail),
MPID_nem_ib_sendq_empty(vc_ib->sendq), MPID_nem_ib_ncqe, VC_FIELD(vc, pending_sends));
/* update remote RDMA-write-to buffer occupancy */
#if 0 /* we can't send it when the other party has closed QP */
while (MPID_nem_ib_diff16
(vc_ib->ibcom->rsr_seq_num_tail, vc_ib->ibcom->rsr_seq_num_tail_last_sent) > 0) {
MPID_nem_ib_send_reply_seq_num(vc);
}
#endif
/* update local RDMA-write-to buffer occupancy */
#if 0
while (MPID_nem_ib_diff16(vc_ib->ibcom->sseq_num, vc_ib->ibcom->lsr_seq_num_tail) > 0) {
MPID_nem_ib_poll_eager(vc);
}
#endif
#ifdef MPID_NEM_IB_ONDEMAND
MPID_nem_ib_cm_notify_send_req_t *req = MPIU_Malloc(sizeof(MPID_nem_ib_cm_notify_send_req_t));
req->ibcom = MPID_nem_ib_scratch_pad_ibcoms[vc->pg_rank];
......@@ -1116,13 +1056,6 @@ int MPID_nem_ib_vc_terminate(MPIDI_VC_t * vc)
MPID_nem_ib_diff16(vc_ib->ibcom->sseq_num, vc_ib->ibcom->lsr_seq_num_tail),
MPID_nem_ib_sendq_empty(vc_ib->sendq), MPID_nem_ib_ncqe, VC_FIELD(vc, pending_sends));
#if 0
if (MPID_nem_ib_ncqe > 0 || VC_FIELD(vc, pending_sends) > 0) {
usleep(1000);
MPID_nem_ib_drain_scq(0);
}
#endif
dprintf("init,middle2,%d->%d,r rdmaocc=%d,l rdmaocc=%d,sendq=%d,ncqe=%d,pending_sends=%d\n",
MPID_nem_ib_myrank, vc->pg_rank,
MPID_nem_ib_diff16(vc_ib->ibcom->rsr_seq_num_tail,
......@@ -1134,15 +1067,6 @@ int MPID_nem_ib_vc_terminate(MPIDI_VC_t * vc)
usleep(1000);
MPID_nem_ib_drain_scq(0);
}
#if 0
/* drain scq */
while (MPID_nem_ib_ncqe > 0 || VC_FIELD(vc, pending_sends) > 0) {
usleep(1000);
MPID_nem_ib_drain_scq(0);
//printf("%d\n", VC_FIELD(vc, pending_sends));
//printf("%d\n", MPID_nem_ib_ncqe);
}
#endif
dprintf("init,after ,%d->%d,r rdmaocc=%d,l rdmaocc=%d,sendq=%d,ncqe=%d,pending_sends=%d\n",
MPID_nem_ib_myrank, vc->pg_rank,
......@@ -1170,50 +1094,6 @@ int MPID_nem_ib_vc_terminate(MPIDI_VC_t * vc)
MPIU_ERR_POP(mpi_errno);
}
#if 0 /* We move this code to the end of poll_eager. */
/* Destroy VC QP */
/* Destroy ring-buffer */
ibcom_errno = MPID_nem_ib_ringbuf_free(vc);
MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_ringbuf_free");
/* Check connection status stored in VC when on-demand connection is used */
dprintf("vc_terminate,%d->%d,close\n", MPID_nem_ib_myrank, vc->pg_rank);
ibcom_errno = MPID_nem_ib_com_close(vc_ib->sc->fd);
MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_com_close");
/* Destroy array of scratch-pad QPs */
MPIU_Assert(MPID_nem_ib_conns_ref_count > 0);
if (--MPID_nem_ib_conns_ref_count == 0) {
MPIU_Free(MPID_nem_ib_conns);
}
/* TODO don't create them for shared memory vc */
/* Destroy scratch-pad */
ibcom_errno = MPID_nem_ib_com_free(MPID_nem_ib_scratch_pad_fds[vc->pg_rank],
#ifdef MPID_NEM_IB_ONDEMAND
MPID_NEM_IB_CM_OFF_CMD +
MPID_NEM_IB_CM_NSEG * sizeof(MPID_nem_ib_cm_cmd_t) +
sizeof(MPID_nem_ib_ringbuf_headtail_t)
#else
MPID_nem_ib_nranks * sizeof(MPID_nem_ib_com_qp_state_t)
#endif
);
MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_com_free");
/* Destroy scratch-pad QP */
ibcom_errno = MPID_nem_ib_com_close(MPID_nem_ib_scratch_pad_fds[vc->pg_rank]);
MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_com_close");
/* Destroy array of scratch-pad QPs */
MPIU_Assert(MPID_nem_ib_scratch_pad_fds_ref_count > 0);
if (--MPID_nem_ib_scratch_pad_fds_ref_count == 0) {
MPIU_Free(MPID_nem_ib_scratch_pad_fds);
MPIU_Free(MPID_nem_ib_scratch_pad_ibcoms);
}
#endif
dprintf("vc_terminate,exit\n");
fn_exit:
......
......@@ -30,9 +30,6 @@ int MPID_nem_ib_lmt_initiate_lmt(struct MPIDI_VC *vc, union MPIDI_CH3_Pkt *rts_p
MPIDI_msg_sz_t data_sz;
MPID_Datatype *dt_ptr;
MPI_Aint dt_true_lb;
#if 0
MPID_nem_ib_vc_area *vc_ib = VC_IB(vc);
#endif
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_IB_LMT_INITIATE_LMT);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_IB_LMT_INITIATE_LMT);
......@@ -101,15 +98,6 @@ int MPID_nem_ib_lmt_initiate_lmt(struct MPIDI_VC *vc, union MPIDI_CH3_Pkt *rts_p
/* prepare magic */
//*((uint32_t*)(write_from_buf + data_sz - sizeof(tailmagic_t))) = MPID_NEM_IB_COM_MAGIC;
#if 0 /* moving to packet header */ /* embed RDMA-write-to buffer occupancy information */
dprintf("lmt_initiate_lmt,rsr_seq_num_tail=%d\n", vc_ib->ibcom->rsr_seq_num_tail);
/* embed RDMA-write-to buffer occupancy information */
s_cookie_buf->seq_num_tail = vc_ib->ibcom->rsr_seq_num_tail;
/* remember the last one sent */
vc_ib->ibcom->rsr_seq_num_tail_last_sent = vc_ib->ibcom->rsr_seq_num_tail;
#endif
int post_num;
uint32_t max_msg_sz;
MPID_nem_ib_vc_area *vc_ib = VC_IB(vc);
......@@ -367,14 +355,6 @@ int MPID_nem_ib_lmt_start_recv(struct MPIDI_VC *vc, struct MPID_Request *req, MP
MPID_nem_ib_sendq_enqueue(&vc_ib->sendq, req);
}
#if 0 /* moving to packet header */
/* extract embeded RDMA-write-to buffer occupancy information */
dprintf("lmt_start_recv,old lsr_seq_num=%d,s_cookie_buf->seq_num_tail=%d\n",
vc_ib->ibcom->lsr_seq_num_tail, s_cookie_buf->seq_num_tail);
vc_ib->ibcom->lsr_seq_num_tail = s_cookie_buf->seq_num_tail;
//dprintf("lmt_start_recv,new lsr_seq_num=%d\n", vc_ib->ibcom->lsr_seq_num_tail);
#endif
#ifndef MPID_NEM_IB_DISABLE_VAR_OCC_NOTIFY_RATE
/* change remote notification policy of RDMA-write-to buf */
//dprintf("lmt_start_recv,reply_seq_num,old rstate=%d\n", vc_ib->ibcom->rdmabuf_occupancy_notify_rstate);
......@@ -404,83 +384,6 @@ int MPID_nem_ib_lmt_start_recv(struct MPIDI_VC *vc, struct MPID_Request *req, MP
goto fn_exit;
}
#if 0 /* unused function */
/* fall-back to lmt-get if end-flag of send-buf has the same value as the end-flag of recv-buf */
#undef FUNCNAME
#define FUNCNAME MPID_nem_ib_lmt_switch_send
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
int MPID_nem_ib_lmt_switch_send(struct MPIDI_VC *vc, struct MPID_Request *req)
{
int mpi_errno = MPI_SUCCESS;
int dt_contig;
MPIDI_msg_sz_t data_sz;
MPID_Datatype *dt_ptr;
MPI_Aint dt_true_lb;
MPID_IOV r_cookie = req->ch.lmt_tmp_cookie;
MPID_nem_ib_lmt_cookie_t *r_cookie_buf = r_cookie.iov_base;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_IB_LMT_SWITCH_SEND);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_IB_LMT_SWITCH_SEND);
MPIDI_Datatype_get_info(req->dev.user_count, req->dev.datatype, dt_contig, data_sz, dt_ptr,
dt_true_lb);
void *write_from_buf;
if (dt_contig) {
write_from_buf = req->dev.user_buf;
}
else {
/* see MPIDI_CH3_EagerNoncontigSend (in ch3u_eager.c) */
req->dev.segment_ptr = MPID_Segment_alloc();
MPIU_ERR_CHKANDJUMP((req->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER,
"**outofmemory");
MPID_Segment_init(req->dev.user_buf, req->dev.user_count, req->dev.datatype,
req->dev.segment_ptr, 0);
req->dev.segment_first = 0;
req->dev.segment_size = data_sz;
MPIDI_msg_sz_t last;
last = req->dev.segment_size; /* segment_size is byte offset */
MPIU_Assert(last > 0);
REQ_FIELD(req, lmt_pack_buf) = MPIU_Malloc(data_sz);
MPIU_ERR_CHKANDJUMP(!REQ_FIELD(req, lmt_pack_buf), mpi_errno, MPI_ERR_OTHER,
"**outofmemory");
MPID_Segment_pack(req->dev.segment_ptr, req->dev.segment_first, &last,
(char *) (REQ_FIELD(req, lmt_pack_buf)));
MPIU_Assert(last == req->dev.segment_size);
write_from_buf = REQ_FIELD(req, lmt_pack_buf);
}
//assert(dt_true_lb == 0);
uint8_t *tailp =
(uint8_t *) ((uint8_t *) write_from_buf /*+ dt_true_lb */ + data_sz - sizeof(uint8_t));
#if 0
*is_end_flag_same = (r_cookie_buf->tail == *tailp) ? 1 : 0;
#else
REQ_FIELD(req, lmt_receiver_tail) = r_cookie_buf->tail;
REQ_FIELD(req, lmt_sender_tail) = *tailp;
dprintf("lmt_switch_send,tail on sender=%02x,tail onreceiver=%02x,req=%p\n", *tailp,
r_cookie_buf->tail, req);
#ifdef MPID_NEM_IB_DEBUG_LMT
uint8_t *tail_wordp = (uint8_t *) ((uint8_t *) write_from_buf + data_sz - sizeof(uint32_t) * 2);
#endif
dprintf("lmt_switch_send,tail on sender=%d\n", *tail_wordp);
fflush(stdout);
#endif
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_IB_LMT_SWITCH_SEND);
return mpi_errno;
fn_fail:
goto fn_exit;
}
#endif
/* when cookie is received in the middle of the lmt */
#undef FUNCNAME
#define FUNCNAME MPID_nem_ib_lmt_handle_cookie
......@@ -531,9 +434,7 @@ int MPID_nem_ib_lmt_done_send(struct MPIDI_VC *vc, struct MPID_Request *req)
MPID_Datatype_is_contig(req->dev.datatype, &is_contig);
if (!is_contig && REQ_FIELD(req, lmt_pack_buf)) {
dprintf("lmt_done_send,lmt-get,non-contiguous,free lmt_pack_buf\n");
#if 1 /* debug, enable again later */
MPIU_Free(REQ_FIELD(req, lmt_pack_buf));
#endif
}
/* mark completion on sreq */
......
......@@ -34,10 +34,6 @@ static int ref_count;
typedef struct {
char *next;
} free_list_t;
#if 0
static char *free_list_front[MPID_NEM_IB_NIALLOCID] = { 0 };
static char *arena_flist[MPID_NEM_IB_NIALLOCID] = { 0 };
#endif
#define MPID_NEM_IB_SZARENA 4096
#define MPID_NEM_IB_CLUSTER_SIZE (MPID_NEM_IB_SZARENA/sz)
......@@ -155,35 +151,16 @@ static inline void __lru_queue_display()
void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
enum ibv_access_flags additional_flags, int mode)
{
#if 0 /* debug */
struct ibv_mr *mr;
int ibcom_errno = MPID_nem_ib_com_reg_mr(addr, len, &mr);
printf("mrcache,MPID_nem_ib_com_reg_mr,error,addr=%p,len=%d,lkey=%08x,rkey=%08x\n", addr, len,
mr->lkey, mr->rkey);
if (ibcom_errno != 0) {
goto fn_fail;
}
fn_exit:
return mr;
fn_fail:
goto fn_exit;
#else
int ibcom_errno;
int key;
struct MPID_nem_ib_com_reg_mr_cache_entry_t *e;
static unsigned long long num_global_cache = 0ULL;
#if 1 /*def HAVE_LIBDCFA */
/* we can't change addr because ibv_post_send assumes mr->host_addr (output of this function)
* must have an exact mirror value of addr (input of this function) */
void *addr_aligned = addr;
long len_aligned = len;
#else
void *addr_aligned = (void *) ((unsigned long) addr & ~(MPID_NEM_IB_COM_REG_MR_SZPAGE - 1));
long len_aligned =
((((unsigned long) addr + len) - (unsigned long) addr_aligned +
MPID_NEM_IB_COM_REG_MR_SZPAGE - 1) & ~(MPID_NEM_IB_COM_REG_MR_SZPAGE - 1));
#endif
key = MPID_nem_ib_com_hash_func(addr);
dprintf("[MrCache] addr=%p, len=%ld\n", addr, len);
......@@ -208,24 +185,6 @@ void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
// miss
#if 0
// evict an entry and de-register its MR when the cache-set is full
if (way > MPID_NEM_IB_COM_REG_MR_NWAY) {
struct MPID_nem_ib_com_reg_mr_cache_entry_t *victim =
(struct MPID_nem_ib_com_reg_mr_cache_entry_t *) e->lru_prev;
MPID_nem_ib_com_reg_mr_unlink((struct MPID_nem_ib_com_reg_mr_listnode_t *) victim);
//dprintf("MPID_nem_ib_com_reg_mr,evict,entry addr=%p,len=%d,mr addr=%p,len=%ld\n", e->addr, e->len,
//e->mr->addr, e->mr->length);
ibcom_errno = MPID_nem_ib_com_dereg_mr(victim->mr);
if (ibcom_errno) {
printf("mrcache,MPID_nem_ib_com_dereg_mr\n");
goto fn_fail;
}
afree(victim, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE);
}
#endif
e = aalloc(sizeof(struct MPID_nem_ib_com_reg_mr_cache_entry_t),
MPID_NEM_IB_COM_AALLOC_ID_MRCACHE);
/* reference counter is used when evicting entry */
......@@ -237,7 +196,6 @@ void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
if (ibcom_errno != 0) {
/* ib_com_reg_mr returns the errno of ibv_reg_mr */
if (ibcom_errno == ENOMEM) {
#if 1
/* deregister memory region unused and re-register new one */
struct MPID_nem_ib_com_reg_mr_listnode_t *ptr;
struct MPID_nem_ib_com_reg_mr_cache_entry_t *victim;
......@@ -287,40 +245,6 @@ void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE);
goto fn_fail;
}
#else
/* deregister memory region. The value of 'num_global_cache' means the number of global-cached.
* delete 5 percents of global-cached */
int i;
int del_num = (num_global_cache + 19) / 20;
struct MPID_nem_ib_com_reg_mr_cache_entry_t *victim;
dprintf("mrcache,MPID_nem_ib_com_reg_mr,ENOMEM,del_num(%d)\n", del_num);
for (i = 0; i < del_num; i++) {
/* get LRU data from MPID_nem_ib_com_reg_mr_global_cache */
victim = list_entry(MPID_nem_ib_com_reg_mr_global_cache.lru_prev, struct MPID_nem_ib_com_reg_mr_cache_entry_t, g_lru);
MPID_nem_ib_com_reg_mr_unlink((struct MPID_nem_ib_com_reg_mr_listnode_t *)victim);
MPID_nem_ib_com_reg_mr_unlink(&(victim->g_lru));
ibcom_errno = MPID_nem_ib_com_dereg_mr(victim->mr);
if (ibcom_errno) {
printf("mrcache,MPID_nem_ib_com_dereg_mr\n");
afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE);
goto fn_fail;
}
afree(victim, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE);
num_global_cache--;
}
/* re-registraion */
ibcom_errno = MPID_nem_ib_com_reg_mr(addr_aligned, len_aligned, &e->mr, additional_flags);
if (ibcom_errno != 0) {
fprintf(stderr, "mrcache,MPID_nem_ib_com_reg_mr,retry,errno=%d\n", ibcom_errno);
afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE);
goto fn_fail;
}
#endif
}
else {
/* errno is not ENOMEM */
......@@ -352,7 +276,7 @@ void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
/* reference counter is used when evicting entry */
e->refc++;
#if 1
/* move to head of the list */
if (e !=
(struct MPID_nem_ib_com_reg_mr_cache_entry_t *) MPID_nem_ib_com_reg_mr_cache[key].lru_next)
......@@ -361,7 +285,7 @@ void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
MPID_nem_ib_com_reg_mr_insert(&MPID_nem_ib_com_reg_mr_cache[key],
(struct MPID_nem_ib_com_reg_mr_listnode_t *) e);
}
#endif
if (mode != MPID_NEM_IB_COM_REG_MR_STICKY) {
/* move to head of the list in global-cache */
MPID_nem_ib_com_reg_mr_unlink(&(e->g_lru));
......@@ -379,23 +303,8 @@ void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len,
return e;
fn_fail:
goto fn_exit;
#endif
}
#if 0
static void MPID_nem_ib_com_reg_mr_dereg(struct ibv_mr *mr)
{
struct MPID_nem_ib_com_reg_mr_cache_entry_t *e;
struct MPID_nem_ib_com_reg_mr_cache_entry_t *zero = 0;
unsigned long offset = (unsigned long) zero->mr;
e = (struct MPID_nem_ib_com_reg_mr_cache_entry_t *) ((unsigned long) mr - offset);
e->refc--;
//dprintf("MPID_nem_ib_com_reg_mr_dereg,entry=%p,mr=%p,addr=%p,refc=%d,offset=%lx\n", e, mr, e->mr->addr,
//e->refc, offset);
}
#endif
void MPID_nem_ib_com_reg_mr_release(struct MPID_nem_ib_com_reg_mr_cache_entry_t *entry)
{
entry->refc--;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment