Commit 4ede6937 authored by Misbah Mubarak's avatar Misbah Mubarak
Browse files

Merge branch 'opt-debug-br' into 'master'

Opt debug br

See merge request !37
parents 7039c648 2f3b89ad
Contributors to date (in chronological order, with current affiliations)
- Ning Liu, IBM
- Jason Cope, DDN
- Philip Carns, Argonne National Labs
- Misbah Mubarak, Argonne National Labs
- Shane Snyder, Argonne National Labs
- Jonathan P. Jenkins
- Noah Wolfe, RPI
- Nikhil Jain, Lawrence Livermore Labs
- Matthieu Dorier, Argonne National Labs
- Caitlin Ross, RPI
- Xu Yang, Amazon
- Jens Domke, Tokyo Institute of Tech.
- Xin Wang, IIT
Contributions of external (non-Argonne) collaborators: Contributions of external (non-Argonne) collaborators:
Nikhil Jain, Abhinav Bhatele (LLNL) Nikhil Jain, Abhinav Bhatele (LLNL)
......
...@@ -194,7 +194,7 @@ struct codes_workload_op ...@@ -194,7 +194,7 @@ struct codes_workload_op
/* TODO: not sure why source rank is here */ /* TODO: not sure why source rank is here */
int source_rank;/* source rank of MPI send message */ int source_rank;/* source rank of MPI send message */
int dest_rank; /* dest rank of MPI send message */ int dest_rank; /* dest rank of MPI send message */
uint64_t num_bytes; /* number of bytes to be transferred over the network */ int64_t num_bytes; /* number of bytes to be transferred over the network */
int16_t data_type; /* MPI data type to be matched with the recv */ int16_t data_type; /* MPI data type to be matched with the recv */
int count; /* number of elements to be received */ int count; /* number of elements to be received */
int tag; /* tag of the message */ int tag; /* tag of the message */
...@@ -204,7 +204,7 @@ struct codes_workload_op ...@@ -204,7 +204,7 @@ struct codes_workload_op
/* TODO: not sure why source rank is here */ /* TODO: not sure why source rank is here */
int source_rank;/* source rank of MPI recv message */ int source_rank;/* source rank of MPI recv message */
int dest_rank;/* dest rank of MPI recv message */ int dest_rank;/* dest rank of MPI recv message */
uint64_t num_bytes; /* number of bytes to be transferred over the network */ int64_t num_bytes; /* number of bytes to be transferred over the network */
int16_t data_type; /* MPI data type to be matched with the send */ int16_t data_type; /* MPI data type to be matched with the send */
int count; /* number of elements to be sent */ int count; /* number of elements to be sent */
int tag; /* tag of the message */ int tag; /* tag of the message */
......
...@@ -50,7 +50,7 @@ char offset_file[8192]; ...@@ -50,7 +50,7 @@ char offset_file[8192];
static int wrkld_id; static int wrkld_id;
static int num_net_traces = 0; static int num_net_traces = 0;
static int num_dumpi_traces = 0; static int num_dumpi_traces = 0;
static uint64_t EAGER_THRESHOLD = 8192; static int64_t EAGER_THRESHOLD = 8192;
static int alloc_spec = 0; static int alloc_spec = 0;
static tw_stime self_overhead = 10.0; static tw_stime self_overhead = 10.0;
...@@ -155,7 +155,7 @@ struct mpi_msgs_queue ...@@ -155,7 +155,7 @@ struct mpi_msgs_queue
int tag; int tag;
int source_rank; int source_rank;
int dest_rank; int dest_rank;
uint64_t num_bytes; int64_t num_bytes;
tw_stime req_init_time; tw_stime req_init_time;
dumpi_req_id req_id; dumpi_req_id req_id;
struct qlist_head ql; struct qlist_head ql;
...@@ -287,7 +287,7 @@ struct nw_message ...@@ -287,7 +287,7 @@ struct nw_message
{ {
tw_lpid src_rank; tw_lpid src_rank;
int dest_rank; int dest_rank;
uint64_t num_bytes; int64_t num_bytes;
int num_matched; int num_matched;
int data_type; int data_type;
double sim_start_time; double sim_start_time;
...@@ -305,7 +305,7 @@ struct nw_message ...@@ -305,7 +305,7 @@ struct nw_message
double saved_recv_time; double saved_recv_time;
double saved_wait_time; double saved_wait_time;
double saved_delay; double saved_delay;
int32_t saved_num_bytes; int64_t saved_num_bytes;
} rc; } rc;
}; };
...@@ -558,7 +558,7 @@ void finish_bckgnd_traffic( ...@@ -558,7 +558,7 @@ void finish_bckgnd_traffic(
(void)b; (void)b;
(void)msg; (void)msg;
ns->is_finished = 1; ns->is_finished = 1;
lprintf("\n LP %llu completed sending data %lu completed at time %lf ", lp->gid, ns->gen_data, tw_now(lp)); lprintf("\n LP %llu completed sending data %lu completed at time %lf ", LLU(lp->gid), ns->gen_data, tw_now(lp));
return; return;
} }
...@@ -671,13 +671,13 @@ void arrive_syn_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp) ...@@ -671,13 +671,13 @@ void arrive_syn_tr(nw_state * s, tw_bf * bf, nw_message * m, tw_lp * lp)
num_syn_bytes_recvd += data; num_syn_bytes_recvd += data;
} }
/* Debugging functions, may generate unused function warning */ /* Debugging functions, may generate unused function warning */
static void print_waiting_reqs(uint32_t * reqs, int count) /*static void print_waiting_reqs(uint32_t * reqs, int count)
{ {
lprintf("\n Waiting reqs: %d count", count); lprintf("\n Waiting reqs: %d count", count);
int i; int i;
for(i = 0; i < count; i++ ) for(i = 0; i < count; i++ )
lprintf(" %d ", reqs[i]); lprintf(" %d ", reqs[i]);
} }*/
static void print_msgs_queue(struct qlist_head * head, int is_send) static void print_msgs_queue(struct qlist_head * head, int is_send)
{ {
if(is_send) if(is_send)
...@@ -690,10 +690,10 @@ static void print_msgs_queue(struct qlist_head * head, int is_send) ...@@ -690,10 +690,10 @@ static void print_msgs_queue(struct qlist_head * head, int is_send)
qlist_for_each(ent, head) qlist_for_each(ent, head)
{ {
current = qlist_entry(ent, mpi_msgs_queue, ql); current = qlist_entry(ent, mpi_msgs_queue, ql);
printf(" \n Source %d Dest %d bytes %llu tag %d ", current->source_rank, current->dest_rank, current->num_bytes, current->tag); printf(" \n Source %d Dest %d bytes %"PRId64" tag %d ", current->source_rank, current->dest_rank, current->num_bytes, current->tag);
} }
} }
static void print_completed_queue(tw_lp * lp, struct qlist_head * head) /*static void print_completed_queue(tw_lp * lp, struct qlist_head * head)
{ {
// printf("\n Completed queue: "); // printf("\n Completed queue: ");
struct qlist_head * ent = NULL; struct qlist_head * ent = NULL;
...@@ -704,7 +704,7 @@ static void print_completed_queue(tw_lp * lp, struct qlist_head * head) ...@@ -704,7 +704,7 @@ static void print_completed_queue(tw_lp * lp, struct qlist_head * head)
current = qlist_entry(ent, completed_requests, ql); current = qlist_entry(ent, completed_requests, ql);
tw_output(lp, " %llu ", current->req_id); tw_output(lp, " %llu ", current->req_id);
} }
} }*/
static int clear_completed_reqs(nw_state * s, static int clear_completed_reqs(nw_state * s,
tw_lp * lp, tw_lp * lp,
int * reqs, int count) int * reqs, int count)
...@@ -826,14 +826,14 @@ static int notify_posted_wait(nw_state* s, ...@@ -826,14 +826,14 @@ static int notify_posted_wait(nw_state* s,
printf("\n Num completed %d count %d LP %llu ", printf("\n Num completed %d count %d LP %llu ",
wait_elem->num_completed, wait_elem->num_completed,
wait_elem->count, wait_elem->count,
lp->gid); LLU(lp->gid));
// if(wait_elem->num_completed > wait_elem->count) // if(wait_elem->num_completed > wait_elem->count)
// tw_lp_suspend(lp, 1, 0); // tw_lp_suspend(lp, 1, 0);
if(wait_elem->num_completed >= wait_elem->count) if(wait_elem->num_completed >= wait_elem->count)
{ {
if(enable_debug) if(enable_debug)
fprintf(workload_log, "\n(%lf) APP ID %d MPI WAITALL COMPLETED AT %llu ", tw_now(lp), s->app_id, s->nw_id); fprintf(workload_log, "\n(%lf) APP ID %d MPI WAITALL COMPLETED AT %llu ", tw_now(lp), s->app_id, LLU(s->nw_id));
wait_completed = 1; wait_completed = 1;
} }
...@@ -963,7 +963,7 @@ static void codes_exec_mpi_wait_all( ...@@ -963,7 +963,7 @@ static void codes_exec_mpi_wait_all(
struct codes_workload_op * mpi_op) struct codes_workload_op * mpi_op)
{ {
if(enable_debug) if(enable_debug)
fprintf(workload_log, "\n MPI WAITALL POSTED AT %llu ", s->nw_id); fprintf(workload_log, "\n MPI WAITALL POSTED AT %llu ", LLU(s->nw_id));
if(enable_sampling) if(enable_sampling)
{ {
...@@ -1372,7 +1372,7 @@ static void codes_exec_mpi_send(nw_state* s, ...@@ -1372,7 +1372,7 @@ static void codes_exec_mpi_send(nw_state* s,
} }
if(lp->gid == TRACK_LP) if(lp->gid == TRACK_LP)
printf("\n Sender rank %llu global dest rank %d dest-rank %d bytes %lld Tag %d", s->nw_id, global_dest_rank, mpi_op->u.send.dest_rank, mpi_op->u.send.num_bytes, mpi_op->u.send.tag); printf("\n Sender rank %llu global dest rank %d dest-rank %d bytes %"PRIu64" Tag %d", LLU(s->nw_id), global_dest_rank, mpi_op->u.send.dest_rank, mpi_op->u.send.num_bytes, mpi_op->u.send.tag);
m->rc.saved_num_bytes = mpi_op->u.send.num_bytes; m->rc.saved_num_bytes = mpi_op->u.send.num_bytes;
/* model-net event */ /* model-net event */
tw_lpid dest_rank = codes_mapping_get_lpid_from_relative(global_dest_rank, NULL, "nw-lp", NULL, 0); tw_lpid dest_rank = codes_mapping_get_lpid_from_relative(global_dest_rank, NULL, "nw-lp", NULL, 0);
...@@ -1471,12 +1471,12 @@ static void codes_exec_mpi_send(nw_state* s, ...@@ -1471,12 +1471,12 @@ static void codes_exec_mpi_send(nw_state* s,
{ {
if(mpi_op->op_type == CODES_WK_ISEND) if(mpi_op->op_type == CODES_WK_ISEND)
{ {
fprintf(workload_log, "\n (%lf) APP %d MPI ISEND SOURCE %llu DEST %d TAG %d BYTES %llu ", fprintf(workload_log, "\n (%lf) APP %d MPI ISEND SOURCE %llu DEST %d TAG %d BYTES %"PRId64,
tw_now(lp), s->app_id, s->nw_id, global_dest_rank, mpi_op->u.send.tag, mpi_op->u.send.num_bytes); tw_now(lp), s->app_id, LLU(s->nw_id), global_dest_rank, mpi_op->u.send.tag, mpi_op->u.send.num_bytes);
} }
else else
fprintf(workload_log, "\n (%lf) APP ID %d MPI SEND SOURCE %llu DEST %d TAG %d BYTES %llu ", fprintf(workload_log, "\n (%lf) APP ID %d MPI SEND SOURCE %llu DEST %d TAG %d BYTES %"PRId64,
tw_now(lp), s->app_id, s->nw_id, global_dest_rank, mpi_op->u.send.tag, mpi_op->u.send.num_bytes); tw_now(lp), s->app_id, LLU(s->nw_id), global_dest_rank, mpi_op->u.send.tag, mpi_op->u.send.num_bytes);
} }
/* isend executed, now get next MPI operation from the queue */ /* isend executed, now get next MPI operation from the queue */
if(mpi_op->op_type == CODES_WK_ISEND && !is_rend) if(mpi_op->op_type == CODES_WK_ISEND && !is_rend)
...@@ -1651,7 +1651,7 @@ static void update_arrival_queue(nw_state* s, tw_bf * bf, nw_message * m, tw_lp ...@@ -1651,7 +1651,7 @@ static void update_arrival_queue(nw_state* s, tw_bf * bf, nw_message * m, tw_lp
{ {
if(s->app_id != m->fwd.app_id) if(s->app_id != m->fwd.app_id)
printf("\n Received message for app %d my id %d my rank %llu ", printf("\n Received message for app %d my id %d my rank %llu ",
m->fwd.app_id, s->app_id, s->nw_id); m->fwd.app_id, s->app_id, LLU(s->nw_id));
assert(s->app_id == m->fwd.app_id); assert(s->app_id == m->fwd.app_id);
//if(s->local_rank != m->fwd.dest_rank) //if(s->local_rank != m->fwd.dest_rank)
...@@ -2241,14 +2241,14 @@ void nw_test_finalize(nw_state* s, tw_lp* lp) ...@@ -2241,14 +2241,14 @@ void nw_test_finalize(nw_state* s, tw_lp* lp)
qlist_for_each(ent, &s->msg_sz_list) qlist_for_each(ent, &s->msg_sz_list)
{ {
tmp_msg = qlist_entry(ent, struct msg_size_info, ql); tmp_msg = qlist_entry(ent, struct msg_size_info, ql);
printf("\n Rank %d Msg size %lld num_msgs %d agg_latency %f avg_latency %f", printf("\n Rank %d Msg size %"PRId64" num_msgs %d agg_latency %f avg_latency %f",
s->local_rank, tmp_msg->msg_size, tmp_msg->num_msgs, tmp_msg->agg_latency, tmp_msg->avg_latency); s->local_rank, tmp_msg->msg_size, tmp_msg->num_msgs, tmp_msg->agg_latency, tmp_msg->avg_latency);
//fprintf(msg_size_log, "\n Rank %d Msg size %d num_msgs %d agg_latency %f avg_latency %f", //fprintf(msg_size_log, "\n Rank %d Msg size %d num_msgs %d agg_latency %f avg_latency %f",
// s->local_rank, tmp_msg->msg_size, tmp_msg->num_msgs, tmp_msg->agg_latency, tmp_msg->avg_latency); // s->local_rank, tmp_msg->msg_size, tmp_msg->num_msgs, tmp_msg->agg_latency, tmp_msg->avg_latency);
if(s->local_rank == 0) if(s->local_rank == 0)
{ {
fprintf(msg_size_log, "\n %llu %lld %d %f", fprintf(msg_size_log, "\n %llu %"PRId64" %d %f",
s->nw_id, tmp_msg->msg_size, tmp_msg->num_msgs, tmp_msg->avg_latency); LLU(s->nw_id), tmp_msg->msg_size, tmp_msg->num_msgs, tmp_msg->avg_latency);
} }
} }
} }
...@@ -2259,11 +2259,11 @@ void nw_test_finalize(nw_state* s, tw_lp* lp) ...@@ -2259,11 +2259,11 @@ void nw_test_finalize(nw_state* s, tw_lp* lp)
{ {
unmatched = 1; unmatched = 1;
printf("\n LP %llu unmatched irecvs %d unmatched sends %d Total sends %ld receives %ld collectives %ld delays %ld wait alls %ld waits %ld send time %lf wait %lf", printf("\n LP %llu unmatched irecvs %d unmatched sends %d Total sends %ld receives %ld collectives %ld delays %ld wait alls %ld waits %ld send time %lf wait %lf",
lp->gid, count_irecv, count_isend, s->num_sends, s->num_recvs, s->num_cols, s->num_delays, s->num_waitall, s->num_wait, s->send_time, s->wait_time); LLU(lp->gid), count_irecv, count_isend, s->num_sends, s->num_recvs, s->num_cols, s->num_delays, s->num_waitall, s->num_wait, s->send_time, s->wait_time);
} }
written += sprintf(s->output_buf + written, "\n %llu %llu %ld %ld %ld %ld %lf %lf %lf %d", lp->gid, s->nw_id, s->num_sends, s->num_recvs, s->num_bytes_sent, written += sprintf(s->output_buf + written, "\n %llu %llu %ld %ld %ld %ld %lf %lf %lf %d", LLU(lp->gid), LLU(s->nw_id), s->num_sends, s->num_recvs, s->num_bytes_sent,
s->num_bytes_recvd, s->send_time, s->elapsed_time - s->compute_time, s->compute_time, s->app_id); s->num_bytes_recvd, s->send_time, s->elapsed_time - s->compute_time, s->compute_time, s->app_id);
lp_io_write(lp->gid, "mpi-replay-stats", written, s->output_buf); lp_io_write(lp->gid, (char*)"mpi-replay-stats", written, s->output_buf);
if(s->elapsed_time - s->compute_time > max_comm_time) if(s->elapsed_time - s->compute_time > max_comm_time)
max_comm_time = s->elapsed_time - s->compute_time; max_comm_time = s->elapsed_time - s->compute_time;
...@@ -2294,9 +2294,9 @@ void nw_test_finalize(nw_state* s, tw_lp* lp) ...@@ -2294,9 +2294,9 @@ void nw_test_finalize(nw_state* s, tw_lp* lp)
written = 0; written = 0;
if(debug_cols) if(debug_cols)
written += sprintf(s->col_stats + written, "%lld \t %lf \n", s->nw_id, ns_to_s(s->all_reduce_time / s->num_all_reduce)); written += sprintf(s->col_stats + written, "%llu \t %lf \n", LLU(s->nw_id), ns_to_s(s->all_reduce_time / s->num_all_reduce));
lp_io_write(lp->gid, "avg-all-reduce-time", written, s->col_stats); lp_io_write(lp->gid, (char*)"avg-all-reduce-time", written, s->col_stats);
avg_time += s->elapsed_time; avg_time += s->elapsed_time;
avg_comm_time += (s->elapsed_time - s->compute_time); avg_comm_time += (s->elapsed_time - s->compute_time);
...@@ -2659,7 +2659,7 @@ int modelnet_mpi_replay(MPI_Comm comm, int* argc, char*** argv ) ...@@ -2659,7 +2659,7 @@ int modelnet_mpi_replay(MPI_Comm comm, int* argc, char*** argv )
assert(num_net_traces); assert(num_net_traces);
if(!g_tw_mynode) if(!g_tw_mynode)
printf("\n Total bytes sent %llu recvd %llu \n max runtime %lf ns avg runtime %lf \n max comm time %lf avg comm time %lf \n max send time %lf avg send time %lf \n max recv time %lf avg recv time %lf \n max wait time %lf avg wait time %lf \n", printf("\n Total bytes sent %lld recvd %lld \n max runtime %lf ns avg runtime %lf \n max comm time %lf avg comm time %lf \n max send time %lf avg send time %lf \n max recv time %lf avg recv time %lf \n max wait time %lf avg wait time %lf \n",
total_bytes_sent, total_bytes_sent,
total_bytes_recvd, total_bytes_recvd,
max_run_time, avg_run_time/num_net_traces, max_run_time, avg_run_time/num_net_traces,
......
...@@ -2431,7 +2431,7 @@ dragonfly_terminal_final( terminal_state * s, ...@@ -2431,7 +2431,7 @@ dragonfly_terminal_final( terminal_state * s,
if(!s->terminal_id) if(!s->terminal_id)
written = sprintf(s->output_buf, "# Format <LP id> <Terminal ID> <Total Data Size> <Aggregate packet latency> <# Flits/Packets finished> <Avg hops> <Busy Time>"); written = sprintf(s->output_buf, "# Format <LP id> <Terminal ID> <Total Data Size> <Aggregate packet latency> <# Flits/Packets finished> <Avg hops> <Busy Time>");
written += sprintf(s->output_buf + written, "\n %llu %u %lld %lf %ld %lf %lf", written += sprintf(s->output_buf + written, "\n %llu %u %"PRId64" %lf %ld %lf %lf",
LLU(lp->gid), s->terminal_id, s->total_msg_size, (double)s->total_time/s->finished_packets, LLU(lp->gid), s->terminal_id, s->total_msg_size, (double)s->total_time/s->finished_packets,
s->finished_packets, (double)s->total_hops/s->finished_chunks, s->finished_packets, (double)s->total_hops/s->finished_chunks,
s->busy_time); s->busy_time);
......
...@@ -1027,7 +1027,7 @@ static void dimension_order_routing( nodes_state * s, ...@@ -1027,7 +1027,7 @@ static void dimension_order_routing( nodes_state * s,
int * dir ) int * dir )
{ {
int dest[s->params->n_dims]; int dest[s->params->n_dims];
int dest_id; int dest_id = -1;
/* dummys - check later */ /* dummys - check later */
*dim = -1; *dim = -1;
......
...@@ -327,7 +327,7 @@ void codes_workload_print_op( ...@@ -327,7 +327,7 @@ void codes_workload_print_op(
break; break;
case CODES_WK_SEND: case CODES_WK_SEND:
fprintf(f, "op: app:%d rank:%d type:send " fprintf(f, "op: app:%d rank:%d type:send "
"src:%d dst:%d bytes:%llu type:%d count:%d tag:%d " "src:%d dst:%d bytes:%"PRIu64" type:%d count:%d tag:%d "
"start:%.5e end:%.5e\n", "start:%.5e end:%.5e\n",
app_id, rank, app_id, rank,
op->u.send.source_rank, op->u.send.dest_rank, op->u.send.source_rank, op->u.send.dest_rank,
...@@ -337,7 +337,7 @@ void codes_workload_print_op( ...@@ -337,7 +337,7 @@ void codes_workload_print_op(
break; break;
case CODES_WK_RECV: case CODES_WK_RECV:
fprintf(f, "op: app:%d rank:%d type:recv " fprintf(f, "op: app:%d rank:%d type:recv "
"src:%d dst:%d bytes:%llu type:%d count:%d tag:%d " "src:%d dst:%d bytes:%"PRIu64" type:%d count:%d tag:%d "
"start:%.5e end:%.5e\n", "start:%.5e end:%.5e\n",
app_id, rank, app_id, rank,
op->u.recv.source_rank, op->u.recv.dest_rank, op->u.recv.source_rank, op->u.recv.dest_rank,
...@@ -347,7 +347,7 @@ void codes_workload_print_op( ...@@ -347,7 +347,7 @@ void codes_workload_print_op(
break; break;
case CODES_WK_ISEND: case CODES_WK_ISEND:
fprintf(f, "op: app:%d rank:%d type:isend " fprintf(f, "op: app:%d rank:%d type:isend "
"src:%d dst:%d bytes:%llu type:%d count:%d tag:%d " "src:%d dst:%d bytes:%"PRIu64" type:%d count:%d tag:%d "
"start:%.5e end:%.5e\n", "start:%.5e end:%.5e\n",
app_id, rank, app_id, rank,
op->u.send.source_rank, op->u.send.dest_rank, op->u.send.source_rank, op->u.send.dest_rank,
...@@ -357,7 +357,7 @@ void codes_workload_print_op( ...@@ -357,7 +357,7 @@ void codes_workload_print_op(
break; break;
case CODES_WK_IRECV: case CODES_WK_IRECV:
fprintf(f, "op: app:%d rank:%d type:irecv " fprintf(f, "op: app:%d rank:%d type:irecv "
"src:%d dst:%d bytes:%llu type:%d count:%d tag:%d " "src:%d dst:%d bytes:%"PRIu64" type:%d count:%d tag:%d "
"start:%.5e end:%.5e\n", "start:%.5e end:%.5e\n",
app_id, rank, app_id, rank,
op->u.recv.source_rank, op->u.recv.dest_rank, op->u.recv.source_rank, op->u.recv.dest_rank,
......
...@@ -109,12 +109,12 @@ static inline double time_to_us_lf(dumpi_clock t){ ...@@ -109,12 +109,12 @@ static inline double time_to_us_lf(dumpi_clock t){
static inline double time_to_ns_lf(dumpi_clock t){ static inline double time_to_ns_lf(dumpi_clock t){
return (double) t.sec * 1e9 + (double) t.nsec; return (double) t.sec * 1e9 + (double) t.nsec;
} }
static int32_t get_unique_req_id(int32_t request_id) /*static int32_t get_unique_req_id(int32_t request_id)
{ {
uint32_t pc = 0, pb = 0; uint32_t pc = 0, pb = 0;
bj_hashlittle2(&request_id, sizeof(int32_t), &pc, &pb); bj_hashlittle2(&request_id, sizeof(int32_t), &pc, &pb);
return pc; return pc;
} }*/
/*static inline double time_to_s_lf(dumpi_clock t){ /*static inline double time_to_s_lf(dumpi_clock t){
return (double) t.sec + (double) t.nsec / 1e9; return (double) t.sec + (double) t.nsec / 1e9;
}*/ }*/
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment