Commit b72457a4 authored by Philip Carns's avatar Philip Carns

Merge branch 'commit_f2' into 'master'

Commit function

The ROSS API has changed to add a commit function (see [d3bdc07](https://github.com/carothersc/ROSS/commit/d3bdc077a0c4d1481dc06c4b4d4f1eb8e4e6fbf6)).

This merge request updates the various CODES models to the latest ROSS API. All  test pass. It also updates the documentation to CODES v 0.5.2 and ROSS v d3bdc07. 

See merge request !5
parents 78745f23 00475389
......@@ -3,12 +3,12 @@ machines.
0 - Checkout, build, and install the trunk version of ROSS
(https://github.com/carothersc/ROSS). At the time of
release (0.5.0), ROSS's latest commit hash was d9cef53, so this revision is
release (0.5.2), ROSS's latest commit hash was d3bdc07, so this revision is
"safe" in the unlikely case incompatible changes come along in the future. If
working from the CODES master branches, use the ROSS master branch.
git clone http://github.com/carothersc/ROSS.git
# if using 0.5.0 release: git checkout d9cef53
# if using 0.5.2 release: git checkout d3bdc07
cd ROSS
mkdir build
cd build
......@@ -18,7 +18,7 @@ working from the CODES master branches, use the ROSS master branch.
make -j 3
make install
<the result should be that the latest version of ROSS is installed in the
<the result should be that the latest version of ROSS is installed in the
ROSS/install/ directory>
For more details on installing ROSS, go to
......@@ -60,7 +60,7 @@ working from the CODES master branches, use the ROSS master branch.
of Darshan installed in the default system path, or else add
<DARSHAN_PREFIX>/lib/pkgconfig to your PKG_CONFIG_PATH environment
variable before calling configure.
4 - Build and install CODES
make && make install
......
0.5.2 (July 13, 2016)
Summer of CODES was another huge success!
This update adds the commit function to the CODES models, representing an
update to the latest version of ROSS (d3bdc07)
0.5.1 (June 09, 2016)
network:
......@@ -34,7 +40,7 @@ networks:
addition of the SlimFly network topology, corresponding to the Wolfe et al.
paper "Modeling a Million-node Slim Fly Network using Parallel Discrete-event
Simulation", at SIGSIM-PADS'16. See README.slimfly.txt
Simulation", at SIGSIM-PADS'16. See README.slimfly.txt
(src/networks/model-net/doc).
modelnet now supports sampling at regular intervals. Dragonfly LPs can
......
......@@ -53,7 +53,7 @@ enum svr_event
LOCAL /* local event */
};
/* this struct serves as the ***persistent*** state of the LP representing the
/* this struct serves as the ***persistent*** state of the LP representing the
* server in question. This struct is setup when the LP initialization function
* ptr is called */
struct svr_state
......@@ -107,7 +107,8 @@ tw_lptype svr_lp = {
(pre_run_f) NULL,
(event_f) svr_event,
(revent_f) svr_rev_event,
(final_f) svr_finalize,
(commit_f) NULL,
(final_f) svr_finalize,
(map_f) codes_mapping,
sizeof(svr_state),
};
......@@ -193,9 +194,9 @@ int main(
/* ROSS initialization function calls */
tw_opt_add(app_opt); /* add user-defined args */
/* initialize ROSS and parse args. NOTE: tw_init calls MPI_Init */
tw_init(&argc, &argv);
tw_init(&argc, &argv);
if (!conf_file_name[0])
if (!conf_file_name[0])
{
fprintf(stderr, "Expected \"codes-config\" option, please see --help.\n");
MPI_Finalize();
......@@ -204,9 +205,9 @@ int main(
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
/* loading the config file into the codes-mapping utility, giving us the
* parsed config object in return.
* parsed config object in return.
* "config" is a global var defined by codes-mapping */
if (configuration_load(conf_file_name, MPI_COMM_WORLD, &config)){
fprintf(stderr, "Error loading config file %s.\n", conf_file_name);
......@@ -222,7 +223,7 @@ int main(
/* Setup takes the global config object, the registered LPs, and
* generates/places the LPs as specified in the configuration file.
* This should only be called after ALL LP types have been registered in
* This should only be called after ALL LP types have been registered in
* codes */
codes_mapping_setup();
......@@ -233,7 +234,7 @@ int main(
assert(num_nets==1);
net_id = *net_ids;
free(net_ids);
/* in this example, we are using simplenet, which simulates point to point
/* in this example, we are using simplenet, which simulates point to point
* communication between any two entities (other networks are trickier to
* setup). Hence: */
if(net_id != SIMPLENET)
......@@ -242,7 +243,7 @@ int main(
MPI_Finalize();
return 0;
}
/* calculate the number of servers in this simulation,
* ignoring annotations */
num_servers = codes_mapping_get_lp_count(group_name, 0, "server", NULL, 1);
......@@ -253,7 +254,7 @@ int main(
configuration_get_value_int(&config, param_group_nm, num_reqs_key, NULL, &num_reqs);
configuration_get_value_int(&config, param_group_nm, payload_sz_key, NULL, &payload_sz);
/* begin simulation */
/* begin simulation */
tw_run();
/* model-net has the capability of outputting network transmission stats */
......@@ -270,7 +271,7 @@ const tw_lptype* svr_get_lp_type()
static void svr_add_lp_type()
{
/* lp_type_register should be called exactly once per process per
/* lp_type_register should be called exactly once per process per
* LP type */
lp_type_register("server", svr_get_lp_type());
}
......@@ -282,7 +283,7 @@ static void svr_init(
tw_event *e;
svr_msg *m;
tw_stime kickoff_time;
memset(ns, 0, sizeof(*ns));
/* each server sends a dummy event to itself that will kick off the real
......@@ -290,12 +291,12 @@ static void svr_init(
*/
/* skew each kickoff event slightly to help avoid event ties later on */
kickoff_time = g_tw_lookahead + tw_rand_unif(lp->rng);
kickoff_time = g_tw_lookahead + tw_rand_unif(lp->rng);
/* first create the event (time arg is an offset, not absolute time) */
e = tw_event_new(lp->gid, kickoff_time, lp);
/* after event is created, grab the allocated message and set msg-specific
* data */
* data */
m = tw_event_data(e);
m->svr_event_type = KICKOFF;
/* event is ready to be processed, send it off */
......@@ -324,7 +325,7 @@ static void svr_event(
handle_kickoff_event(ns, b, m, lp);
break;
case LOCAL:
handle_local_event(ns, b, m, lp);
handle_local_event(ns, b, m, lp);
break;
default:
printf("\n Invalid message type %d ", m->svr_event_type);
......@@ -353,7 +354,7 @@ static void svr_rev_event(
handle_kickoff_rev_event(ns, b, m, lp);
break;
case LOCAL:
handle_local_rev_event(ns, b, m, lp);
handle_local_rev_event(ns, b, m, lp);
break;
default:
assert(0);
......@@ -368,7 +369,7 @@ static void svr_finalize(
svr_state * ns,
tw_lp * lp)
{
printf("server %llu recvd %d bytes in %lf seconds, %lf MiB/s sent_count %d recvd_count %d local_count %d \n",
printf("server %llu recvd %d bytes in %lf seconds, %lf MiB/s sent_count %d recvd_count %d local_count %d \n",
(unsigned long long)(lp->gid/2),
payload_sz*ns->msg_recvd_count,
ns_to_s(ns->end_ts-ns->start_ts),
......@@ -395,7 +396,7 @@ static tw_stime s_to_ns(tw_stime ns)
tw_lpid get_next_server(tw_lpid sender_id)
{
tw_lpid rtn_id;
/* first, get callers LP and group info from codes-mapping. Caching this
/* first, get callers LP and group info from codes-mapping. Caching this
* info in the LP struct isn't a bad idea for preventing a huge number of
* lookups */
char grp_name[MAX_NAME_LENGTH], lp_type_name[MAX_NAME_LENGTH],
......@@ -404,8 +405,8 @@ tw_lpid get_next_server(tw_lpid sender_id)
int dest_rep_id;
codes_mapping_get_lp_info(sender_id, grp_name, &grp_id, lp_type_name,
&lp_type_id, annotation, &grp_rep_id, &off);
/* in this example, we assume that, for our group of servers, each
* "repetition" consists of a single server/NIC pair. Hence, we grab the
/* in this example, we assume that, for our group of servers, each
* "repetition" consists of a single server/NIC pair. Hence, we grab the
* server ID for the next repetition, looping around if necessary */
num_reps = codes_mapping_get_group_reps(grp_name);
dest_rep_id = (grp_rep_id+1) % num_reps;
......@@ -429,7 +430,7 @@ static void handle_kickoff_event(
/* normally, when using ROSS, events are allocated as a result of the event
* creation process. However, since we are now asking model-net to
* communicate with an entity on our behalf, we need to generate both the
* message to the recipient and an optional callback message
* message to the recipient and an optional callback message
* - thankfully, memory need not persist past the model_net_event call - it
* copies the messages */
svr_msg m_local;
......@@ -443,9 +444,9 @@ static void handle_kickoff_event(
/* record when transfers started on this server */
ns->start_ts = tw_now(lp);
/* each server sends a request to the next highest server
/* each server sends a request to the next highest server
* In this simulation, LP determination is simple: LPs are assigned
* round robin as in serv_1, net_1, serv_2, net_2, etc.
* round robin as in serv_1, net_1, serv_2, net_2, etc.
* However, that may not always be the case, so we also show a more
* complicated way to map through codes_mapping */
if (use_brute_force_map)
......@@ -457,13 +458,13 @@ static void handle_kickoff_event(
/* model-net needs to know about (1) higher-level destination LP which is a neighboring server in this case
* (2) struct and size of remote message and (3) struct and size of local message (a local message can be null) */
m->ret = model_net_event(net_id, "test", dest_id, payload_sz, 0.0, sizeof(svr_msg),
m->ret = model_net_event(net_id, "test", dest_id, payload_sz, 0.0, sizeof(svr_msg),
(const void*)&m_remote, sizeof(svr_msg), (const void*)&m_local, lp);
ns->msg_sent_count++;
}
/* at the moment, no need for local callbacks from model-net, so we maintain a
* count for debugging purposes */
* count for debugging purposes */
static void handle_local_event(
svr_state * ns,
tw_bf * b,
......@@ -492,7 +493,7 @@ static void handle_ack_event(
* destination server */
/* safety check that this request got to the right server, both with our
* brute-force lp calculation and our more generic codes-mapping
* brute-force lp calculation and our more generic codes-mapping
* calculation */
assert(m->src == (lp->gid + offset)%(num_servers*2) &&
m->src == get_next_server(lp->gid));
......@@ -509,11 +510,11 @@ static void handle_ack_event(
m_remote.src = lp->gid;
/* send another request */
m->ret = model_net_event(net_id, "test", m->src, payload_sz, 0.0, sizeof(svr_msg),
m->ret = model_net_event(net_id, "test", m->src, payload_sz, 0.0, sizeof(svr_msg),
(const void*)&m_remote, sizeof(svr_msg), (const void*)&m_local, lp);
ns->msg_sent_count++;
m->incremented_flag = 1;
}
else
{
......@@ -541,7 +542,7 @@ static void handle_req_event(
m_remote.src = lp->gid;
/* safety check that this request got to the right server */
assert(lp->gid == (m->src + offset)%(num_servers*2) &&
lp->gid == get_next_server(m->src));
ns->msg_recvd_count++;
......@@ -550,8 +551,8 @@ static void handle_req_event(
/* simulated payload of 1 MiB */
/* also trigger a local event for completion of payload msg */
/* remote host will get an ack event */
m->ret = model_net_event(net_id, "test", m->src, payload_sz, 0.0, sizeof(svr_msg),
m->ret = model_net_event(net_id, "test", m->src, payload_sz, 0.0, sizeof(svr_msg),
(const void*)&m_remote, sizeof(svr_msg), (const void*)&m_local, lp);
return;
}
......@@ -582,7 +583,7 @@ static void handle_req_rev_event(
(void)b;
(void)m;
ns->msg_recvd_count--;
/* model-net has its own reverse computation support */
/* model-net has its own reverse computation support */
model_net_event_rc2(lp, &m->ret);
return;
......
......@@ -15,7 +15,7 @@
/**** BEGIN SIMULATION DATA STRUCTURES ****/
/* 'magic' numbers used as sanity check on events */
static int node_magic;
static int node_magic;
static int forwarder_magic;
/* counts of the various types of nodes in the example system */
......@@ -55,8 +55,8 @@ enum forwarder_event
};
typedef struct forwarder_state_s {
int id; // index w.r.t. forwarders in my group
int is_in_foo;
int id; // index w.r.t. forwarders in my group
int is_in_foo;
int fwd_node_count;
int fwd_forwarder_count;
} forwarder_state;
......@@ -98,7 +98,7 @@ void node_finalize(
// messages
int mult;
if (ns->is_in_foo){
mult = 1;
mult = 1;
}
else{
mult = (num_foo_nodes / num_bar_nodes) +
......@@ -197,7 +197,7 @@ void node_event_handler(
tw_lp * lp){
(void)b;
assert(m->h.magic == node_magic);
switch (m->h.event_type){
case NODE_KICKOFF:
// nodes from foo ping to nodes in bar
......@@ -223,7 +223,8 @@ static tw_lptype node_lp = {
(pre_run_f) NULL,
(event_f) node_event_handler,
(revent_f) NULL,
(final_f) node_finalize,
(commit_f) NULL,
(final_f) node_finalize,
(map_f) codes_mapping,
sizeof(node_state),
};
......@@ -243,7 +244,7 @@ void node_register(){
void forwarder_lp_init(
forwarder_state * ns,
tw_lp * lp){
// like nodes, forwarders in this example are addressed logically
// like nodes, forwarders in this example are addressed logically
ns->id = codes_mapping_get_lp_relative_id(lp->gid, 1, 0);
int id_all = codes_mapping_get_lp_relative_id(lp->gid, 0, 0);
ns->is_in_foo = (id_all < num_foo_forwarders);
......@@ -261,7 +262,7 @@ void handle_forwarder_fwd(
forwarder_state * ns,
forwarder_msg * m,
tw_lp * lp){
// compute the forwarder lpid to forward to
// compute the forwarder lpid to forward to
int mod;
const char * dest_group;
char * category;
......@@ -295,13 +296,13 @@ void handle_forwarder_recv(
forwarder_state * ns,
forwarder_msg * m,
tw_lp * lp) {
// compute the node to relay the message to
// compute the node to relay the message to
const char * dest_group;
const char * annotation;
char * category;
int net_id;
if (ns->is_in_foo){
dest_group = "FOO_CLUSTER";
dest_group = "FOO_CLUSTER";
annotation = "foo";
category = "pong";
net_id = net_id_foo;
......@@ -354,7 +355,8 @@ static tw_lptype forwarder_lp = {
(pre_run_f) NULL,
(event_f) forwarder_event_handler,
(revent_f) NULL,
(final_f) forwarder_finalize,
(commit_f) NULL,
(final_f) forwarder_finalize,
(map_f) codes_mapping,
sizeof(forwarder_state),
};
......@@ -395,16 +397,16 @@ int main(int argc, char *argv[])
/* ROSS initialization function calls */
tw_opt_add(app_opt); /* add user-defined args */
/* initialize ROSS and parse args. NOTE: tw_init calls MPI_Init */
tw_init(&argc, &argv);
tw_init(&argc, &argv);
if (!conf_file_name[0]) {
tw_error(TW_LOC,
tw_error(TW_LOC,
"Expected \"codes-config\" option, please see --help.\n");
return 1;
}
/* loading the config file into the codes-mapping utility, giving us the
* parsed config object in return.
* parsed config object in return.
* "config" is a global var defined by codes-mapping */
if (configuration_load(conf_file_name, MPI_COMM_WORLD, &config)){
tw_error(TW_LOC, "Error loading config file %s.\n", conf_file_name);
......@@ -470,7 +472,7 @@ int main(int argc, char *argv[])
}
free(net_ids);
/* begin simulation */
/* begin simulation */
tw_run();
tw_end();
......
......@@ -80,7 +80,7 @@ static double sampling_end_time = 3000000000;
static int enable_debug = 1;
/* MPI_OP_GET_NEXT is for getting next MPI operation when the previous operation completes.
* MPI_SEND_ARRIVED is issued when a MPI message arrives at its destination (the message is transported by model-net and an event is invoked when it arrives.
* MPI_SEND_ARRIVED is issued when a MPI message arrives at its destination (the message is transported by model-net and an event is invoked when it arrives.
* MPI_SEND_POSTED is issued when a MPI message has left the source LP (message is transported via model-net). */
enum MPI_NW_EVENTS
{
......@@ -192,14 +192,14 @@ struct nw_state
/* data for handling reverse computation.
* saved_matched_req holds the request ID of matched receives/sends for wait operations.
* ptr_match_op holds the matched MPI operation which are removed from the queues when a send is matched with the receive in forward event handler.
* ptr_match_op holds the matched MPI operation which are removed from the queues when a send is matched with the receive in forward event handler.
* network event being sent. op is the MPI operation issued by the network workloads API. rv_data holds the data for reverse computation (TODO: Fill this data structure only when the simulation runs in optimistic mode). */
struct nw_message
{
// forward message handler
// forward message handler
int msg_type;
int op_type;
struct
{
tw_lpid src_rank;
......@@ -210,7 +210,7 @@ struct nw_message
double sim_start_time;
// for callbacks - time message was received
double msg_send_time;
int16_t req_id;
int16_t req_id;
int tag;
int app_id;
int found_match;
......@@ -306,7 +306,7 @@ static int clear_completed_reqs(nw_state * s,
struct qlist_head * ent = NULL;
qlist_for_each(ent, &s->completed_reqs)
{
struct completed_requests* current =
struct completed_requests* current =
qlist_entry(ent, completed_requests, ql);
if(current->req_id == reqs[i])
{
......@@ -318,15 +318,15 @@ static int clear_completed_reqs(nw_state * s,
}
return matched;
}
static void add_completed_reqs(nw_state * s,
static void add_completed_reqs(nw_state * s,
tw_lp * lp,
int count)
{
int i;
for( i = 0; i < count; i++)
{
struct completed_requests * req = rc_stack_pop(s->matched_reqs);
qlist_add(&req->ql, &s->completed_reqs);
struct completed_requests * req = rc_stack_pop(s->matched_reqs);
qlist_add(&req->ql, &s->completed_reqs);
}
}
......@@ -337,17 +337,17 @@ static tw_lpid rank_to_lpid(int rank)
}
static int notify_posted_wait(nw_state* s,
tw_bf * bf, nw_message * m, tw_lp * lp,
tw_bf * bf, nw_message * m, tw_lp * lp,
dumpi_req_id completed_req)
{
struct pending_waits* wait_elem = s->wait_op;
int wait_completed = 0;
m->fwd.wait_completed = 0;
if(!wait_elem)
return 0;
int op_type = wait_elem->op_type;
if(op_type == CODES_WK_WAIT &&
......@@ -355,8 +355,8 @@ static int notify_posted_wait(nw_state* s,
{
wait_completed = 1;
}
else if(op_type == CODES_WK_WAITALL
|| op_type == CODES_WK_WAITANY
else if(op_type == CODES_WK_WAITALL
|| op_type == CODES_WK_WAITANY
|| op_type == CODES_WK_WAITSOME)
{
int i;
......@@ -377,7 +377,7 @@ static int notify_posted_wait(nw_state* s,
fprintf(workload_log, "\n(%lf) APP ID %d MPI WAITALL COMPLETED AT %ld ", s->app_id, tw_now(lp), s->nw_id);
wait_completed = 1;
}
m->fwd.wait_completed = 1;
}
}
......@@ -436,7 +436,7 @@ static void codes_exec_mpi_wait(nw_state* s, tw_lp* lp, struct codes_workload_op
}
static void codes_exec_mpi_wait_all_rc(
nw_state* s,
nw_state* s,
tw_bf * bf,
nw_message * m,
tw_lp* lp)
......@@ -466,15 +466,15 @@ static void codes_exec_mpi_wait_all_rc(
return;
}
static void codes_exec_mpi_wait_all(
nw_state* s,
nw_state* s,
tw_bf * bf,
nw_message * m,
tw_lp* lp,
tw_lp* lp,
struct codes_workload_op * mpi_op)
{
if(enable_debug)
fprintf(workload_log, "\n MPI WAITALL POSTED AT %ld ", s->nw_id);
if(enable_sampling)
{
bf->c1 = 1;
......@@ -512,7 +512,7 @@ static void codes_exec_mpi_wait_all(
print_waiting_reqs(mpi_op->u.waits.req_ids, count);
print_completed_queue(&s->completed_reqs);
}*/
/* check number of completed irecvs in the completion queue */
/* check number of completed irecvs in the completion queue */
for(i = 0; i < count; i++)
{
dumpi_req_id req_id = mpi_op->u.waits.req_ids[i];
......@@ -555,13 +555,13 @@ static void codes_exec_mpi_wait_all(
return;
}
/* search for a matching mpi operation and remove it from the list.
* Record the index in the list from where the element got deleted.
/* search for a matching mpi operation and remove it from the list.
* Record the index in the list from where the element got deleted.
* Index is used for inserting the element once again in the queue for reverse computation. */
static int rm_matching_rcv(nw_state * ns,
static int rm_matching_rcv(nw_state * ns,
tw_bf * bf,
nw_message * m,
tw_lp * lp,
nw_message * m,
tw_lp * lp,
mpi_msgs_queue * qitem)
{
int matched = 0;
......@@ -580,24 +580,24 @@ static int rm_matching_rcv(nw_state * ns,
}
++index;
}
if(matched)
{
m->rc.saved_recv_time = ns->recv_time;
ns->recv_time += (tw_now(lp) - qi->req_init_time);
if(qi->op_type == CODES_WK_IRECV)
update_completed_queue(ns, bf, m, lp, qi->req_id);
qlist_del(&qi->ql);
rc_stack_push(lp, qi, free, ns->processed_ops);
return index;
}
return -1;
}
static int rm_matching_send(nw_state * ns,
static int rm_matching_send(nw_state * ns,
tw_bf * bf,
nw_message * m,
tw_lp * lp, mpi_msgs_queue * qitem)
......@@ -609,7 +609,7 @@ static int rm_matching_send(nw_state * ns,
int index = 0;
qlist_for_each(ent, &ns->arrival_queue){
qi = qlist_entry(ent, mpi_msgs_queue, ql);
if((qi->num_bytes == qitem->num_bytes)
if((qi->num_bytes == qitem->num_bytes)
&& (qi->tag == qitem->tag || qitem->tag == -1)
&& ((qi->source_rank == qitem->source_rank) || qitem->source_rank == -1))
{
......@@ -635,7 +635,7 @@ static int rm_matching_send(nw_state * ns,
}
static void codes_issue_next_event_rc(tw_lp * lp)
{
tw_rand_reverse_unif(lp->rng);
tw_rand_reverse_unif(lp->rng);
}
/* Trigger getting next event at LP */
......@@ -667,19 +667,19 @@ static void codes_exec_comp_delay(
ts = s_to_ns(mpi_op->u.delay.seconds);
ts += g_tw_lookahead + 0.1 + tw_rand_exponential(lp->rng, noise);
e = tw_event_new( lp->gid, ts , lp );
msg = tw_event_data(e);
msg->msg_type = MPI_OP_GET_NEXT;
tw_event_send(e);
tw_event_send(e);
}
/* reverse computation operation for MPI irecv */
static void codes_exec_mpi_recv_rc(
nw_state* ns,
tw_bf * bf,
nw_message* m,
nw_state* ns,
tw_bf * bf,
nw_message* m,
tw_lp* lp)
{
num_bytes_recvd -= m->rc.saved_num_bytes;
......@@ -687,10 +687,10 @@ static void codes_exec_mpi_recv_rc(
if(m->fwd.found_match >= 0)
{
ns->recv_time = m->rc.saved_recv_time;
int queue_count = qlist_count(&ns->arrival_queue);
mpi_msgs_queue * qi = rc_stack_pop(ns->processed_ops);
int queue_count = qlist_count(&ns->arrival_queue);
mpi_msgs_queue * qi = rc_stack_pop(ns->processed_ops);
if(!m->fwd.found_match)
{
qlist_add(&qi->ql, &ns->arrival_queue);
......@@ -699,7 +699,7 @@ static void codes_exec_mpi_recv_rc(
{
qlist_add_tail(&qi->ql, &ns->arrival_queue);
}
else if(m->fwd.found_match > 0 && m->fwd.found_match < queue_count)
else if(m->fwd.found_match > 0 && m->fwd.found_match < queue_count)
{
int index = 1;
struct qlist_head * ent = NULL;
......@@ -710,7 +710,7 @@ static void codes_exec_mpi_recv_rc(
qlist_add(&qi->ql, ent);
break;
}
index++;
index++;
}
}
if(qi->op_type == CODES_WK_IRECV)
......@@ -721,21 +721,21 @@ static void codes_exec_mpi_recv_rc(
}
else if(m->fwd.found_match < 0)
{
struct qlist_head * ent = qlist_pop_back(&ns->pending_recvs_queue);
struct qlist_head * ent = qlist_pop_back(&ns->pending_recvs_queue);
mpi_msgs_queue * qi = qlist_entry(ent, mpi_msgs_queue, ql);
free(qi);
if(m->op_type == CODES_WK_IRECV)
codes_issue_next_event_rc(lp);
}
}
/* Execute MPI Irecv operation (non-blocking receive) */
/* Execute MPI Irecv operation (non-blocking receive) */
static void codes_exec_mpi_recv(
nw_state* s,
nw_state* s,
tw_bf * bf,
nw_message * m,
tw_lp* lp,
nw_message * m,
tw_lp* lp,
struct codes_workload_op * mpi_op)
{
/* Once an irecv is posted, list of completed sends is checked to find a matching isend.
......@@ -767,18 +767,18 @@ static void codes_exec_mpi_recv(
{
m->fwd.found_match = -1;
qlist_add_tail(&recv_op->ql, &s->pending_recvs_queue);
/* for mpi irecvs, this is a non-blocking receive so just post it and move on with the trace read. */
if(mpi_op->op_type == CODES_WK_IRECV)
{
codes_issue_next_event(lp);
codes_issue_next_event(lp);
return;
}
}
else
{
m->fwd.found_match = found_matching_sends;
codes_issue_next_event(lp);
codes_issue_next_event(lp);
rc_stack_push(lp, recv_op, free, s->processed_ops);
}
}
......@@ -792,15 +792,15 @@ int get_global_id_of_job_rank(tw_lpid job_rank, int app_id)
return global_rank;
}
/* executes MPI send and isend operations */
static void codes_exec_mpi_send(nw_state* s,
static void codes_exec_mpi_send(nw_state* s,
tw_bf * bf,
nw_message * m,
tw_lp* lp,
tw_lp* lp,
struct codes_workload_op * mpi_op)
{
/* model-net event */
int global_dest_rank = mpi_op->u.send.dest_rank;
if(alloc_spec)
{
global_dest_rank = get_global_id_of_job_rank(mpi_op->u.send.dest_rank, s->app_id);
......@@ -809,7 +809,7 @@ static void codes_exec_mpi_send(nw_state* s,
m->rc.saved_num_bytes = mpi_op->u.send.num_bytes;
/* model-net event */
tw_lpid dest_rank;
codes_mapping_get_lp_info(lp->gid, lp_group_name, &mapping_grp_id,
codes_mapping_get_lp_info(lp->gid, lp_group_name, &mapping_grp_id,
lp_type_name, &mapping_type_id, annotation