model-net-lp.c 24.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */

#include <stddef.h>
#include <assert.h>
#include "codes/model-net.h"
#include "codes/model-net-method.h"
#include "codes/model-net-lp.h"
12
#include "codes/model-net-sched.h"
13 14 15 16 17 18 19 20
#include "codes/codes_mapping.h"
#include "codes/jenkins-hash.h"

#define MN_NAME "model_net_base"

/**** BEGIN SIMULATION DATA STRUCTURES ****/

int model_net_base_magic;
21
int mn_sample_enabled = 0;
22 23 24 25 26

// message-type specific offsets - don't want to get bitten later by alignment
// issues...
static int msg_offsets[MAX_NETS];

27 28
typedef struct model_net_base_params_s {
    model_net_sched_cfg_params sched_params;
29
    uint64_t packet_size;
30
    int use_recv_queue;
31 32
} model_net_base_params;

33
/* annotation-specific parameters (unannotated entry occurs at the
34 35 36 37 38
 * last index) */
static int                       num_params = 0;
static const char              * annos[CONFIGURATION_MAX_ANNOS];
static model_net_base_params     all_params[CONFIGURATION_MAX_ANNOS];

39
static tw_stime mn_sample_interval = 0.0;
40
static tw_stime mn_sample_end = 0.0;
41

42 43
typedef struct model_net_base_state {
    int net_id;
44
    // whether scheduler loop is running
45
    int in_sched_send_loop, in_sched_recv_loop;
46 47 48
    // unique message id counter. This doesn't get decremented on RC to prevent
    // optimistic orderings using "stale" ids
    uint64_t msg_id;
49 50
    // model-net schedulers
    model_net_sched *sched_send, *sched_recv;
51 52
    // parameters
    const model_net_base_params * params;
53 54 55
    // lp type and state of underlying model net method - cache here so we
    // don't have to constantly look up
    const tw_lptype *sub_type;
56
    const st_model_types *sub_model_type;
57 58 59
    void *sub_state;
} model_net_base_state;

60

61 62 63 64
/**** END SIMULATION DATA STRUCTURES ****/

/**** BEGIN LP, EVENT PROCESSING FUNCTION DECLS ****/

65
/* ROSS LP processing functions */
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static void model_net_base_lp_init(
        model_net_base_state * ns,
        tw_lp * lp);
static void model_net_base_event(
        model_net_base_state * ns,
        tw_bf * b,
        model_net_wrap_msg * m,
        tw_lp * lp);
static void model_net_base_event_rc(
        model_net_base_state * ns,
        tw_bf * b,
        model_net_wrap_msg * m,
        tw_lp * lp);
static void model_net_base_finalize(
        model_net_base_state * ns,
        tw_lp * lp);

/* event type handlers */
static void handle_new_msg(
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp);
static void handle_sched_next(
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp);
static void handle_new_msg_rc(
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp);
static void handle_sched_next_rc(
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp);

/* ROSS function pointer table for this LP */
tw_lptype model_net_base_lp = {
107 108 109 110
    (init_f) model_net_base_lp_init,
    (pre_run_f) NULL,
    (event_f) model_net_base_event,
    (revent_f) model_net_base_event_rc,
111 112
    (commit_f) NULL,
    (final_f)  model_net_base_finalize,
113 114
    (map_f) codes_mapping,
    sizeof(model_net_base_state),
115 116
};

117 118 119 120
/* setup for the ROSS event tracing
 * can have a different function for  rbev_trace_f and ev_trace_f
 * but right now it is set to the same function for both
 */
121
void mn_event_collect(model_net_wrap_msg *m, tw_lp *lp, char *buffer, int *collect_flag)
122
{
123 124 125
    // assigning large numbers to message types to make it easier to
    // determine which messages are model net base LP msgs
    int type;
126
    void * sub_msg;
127 128 129
    switch (m->h.event_type){
        case MN_BASE_NEW_MSG:
            type = 9000;
130
            memcpy(buffer, &type, sizeof(type));
131 132 133
            break;
        case MN_BASE_SCHED_NEXT:
            type = 9001;
134
            memcpy(buffer, &type, sizeof(type));
135 136 137
            break;
        case MN_BASE_SAMPLE: 
            type = 9002;
138
            memcpy(buffer, &type, sizeof(type));
139 140
            break;
        case MN_BASE_PASS:
141
            sub_msg = ((char*)m)+msg_offsets[((model_net_base_state*)lp->cur_state)->net_id];
142 143
            if (g_st_ev_trace == RB_TRACE || g_st_ev_trace == COMMIT_TRACE)
                (((model_net_base_state*)lp->cur_state)->sub_model_type->rbev_trace)(sub_msg, lp, buffer, collect_flag);
144
            else if (g_st_ev_trace == FULL_TRACE)
145
                (((model_net_base_state*)lp->cur_state)->sub_model_type->ev_trace)(sub_msg, lp, buffer, collect_flag);
146
            break;
147
        default:  // this shouldn't happen, but can help detect an issue
148 149 150
            type = 9004;
            break;
    }
151 152
}

153 154 155 156 157 158 159 160 161 162
void mn_model_stat_collect(model_net_base_state *s, tw_lp *lp, char *buffer)
{
    // need to call the model level stats collection fn
    (*s->sub_model_type->model_stat_fn)(s->sub_state, lp, buffer);
    return;
}

st_model_types mn_model_types[MAX_NETS];

st_model_types mn_model_base_type = {
163
    (rbev_trace_f) mn_event_collect,
164
     sizeof(int),
165
     (ev_trace_f) mn_event_collect,
166
     sizeof(int),
167 168
     (model_stat_f) mn_model_stat_collect,
     0
169 170
};

171 172 173 174
/**** END LP, EVENT PROCESSING FUNCTION DECLS ****/

/**** BEGIN IMPLEMENTATIONS ****/

175
void model_net_enable_sampling(tw_stime interval, tw_stime end)
176 177
{
    mn_sample_interval = interval;
178
    mn_sample_end = end;
179 180 181 182 183 184 185 186 187 188 189
    mn_sample_enabled = 1;
}

int model_net_sampling_enabled(void)
{
    return mn_sample_enabled;
}

// schedule sample event - want to be precise, so no noise here
static void issue_sample_event(tw_lp *lp)
{
190 191 192 193 194 195
    if (tw_now(lp) + mn_sample_interval < mn_sample_end + 0.0001) {
        tw_event *e = tw_event_new(lp->gid, mn_sample_interval, lp);
        model_net_wrap_msg *m = tw_event_data(e);
        msg_set_header(model_net_base_magic, MN_BASE_SAMPLE, lp->gid, &m->h);
        tw_event_send(e);
    }
196 197
}

198 199 200 201
void model_net_base_register(int *do_config_nets){
    // here, we initialize ALL lp types to use the base type
    for (int i = 0; i < MAX_NETS; i++){
        if (do_config_nets[i]){
202 203 204 205 206 207 208
            // some model-net lps need custom registration hooks (dragonfly).
            // Those that don't NULL out the reg. function
            if (method_array[i]->mn_register == NULL)
                lp_type_register(model_net_lp_config_names[i],
                        &model_net_base_lp);
            else
                method_array[i]->mn_register(&model_net_base_lp);
209
            if (g_st_ev_trace || g_st_model_stats) // for ROSS event tracing
210
            {
211 212 213 214
                memcpy(&mn_model_types[i], &mn_model_base_type, sizeof(st_model_types));

                if (method_array[i]->mn_model_stat_register == NULL)
                    st_model_type_register(model_net_lp_config_names[i], &mn_model_types[i]);
215
                else
216
                    method_array[i]->mn_model_stat_register(&mn_model_types[i]);
217
            }
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
        }
    }
}

static void base_read_config(const char * anno, model_net_base_params *p){
    char sched[MAX_NAME_LENGTH];
    long int packet_size_l = 0;
    uint64_t packet_size;
    int ret;

    ret = configuration_get_value(&config, "PARAMS", "modelnet_scheduler",
            anno, sched, MAX_NAME_LENGTH);
    configuration_get_value_longint(&config, "PARAMS", "packet_size", anno,
            &packet_size_l);
    packet_size = packet_size_l;

    if (ret > 0){
        int i;
        for (i = 0; i < MAX_SCHEDS; i++){
            if (strcmp(sched_names[i], sched) == 0){
238
                p->sched_params.type = i;
239 240 241 242 243
                break;
            }
        }
        if (i == MAX_SCHEDS){
            tw_error(TW_LOC,"Unknown value for PARAMS:modelnet-scheduler : "
244
                    "%s", sched);
245 246 247 248
        }
    }
    else{
        // default: FCFS
249
        p->sched_params.type = MN_SCHED_FCFS;
250 251
    }

252 253
    // get scheduler-specific parameters
    if (p->sched_params.type == MN_SCHED_PRIO){
254
        // prio scheduler uses default parameters
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
        int             * num_prios = &p->sched_params.u.prio.num_prios;
        enum sched_type * sub_stype = &p->sched_params.u.prio.sub_stype;
        // number of priorities to allocate
        ret = configuration_get_value_int(&config, "PARAMS",
                "prio-sched-num-prios", anno, num_prios);
        if (ret != 0)
            *num_prios = 10;

        ret = configuration_get_value(&config, "PARAMS",
                "prio-sched-sub-sched", anno, sched, MAX_NAME_LENGTH);
        if (ret == 0)
            *sub_stype = MN_SCHED_FCFS;
        else{
            int i;
            for (i = 0; i < MAX_SCHEDS; i++){
                if (strcmp(sched_names[i], sched) == 0){
                    *sub_stype = i;
                    break;
                }
            }
            if (i == MAX_SCHEDS){
                tw_error(TW_LOC, "Unknown value for "
                        "PARAMS:prio-sched-sub-sched %s", sched);
            }
            else if (i == MN_SCHED_PRIO){
                tw_error(TW_LOC, "priority scheduler cannot be used as a "
                        "priority scheduler's sub sched "
                        "(PARAMS:prio-sched-sub-sched)");
            }
        }
285 286
    }

287 288 289 290 291 292 293 294 295 296 297 298 299
    if (p->sched_params.type == MN_SCHED_FCFS_FULL ||
            (p->sched_params.type == MN_SCHED_PRIO &&
             p->sched_params.u.prio.sub_stype == MN_SCHED_FCFS_FULL)){
        // override packet size to something huge (leave a bit in the unlikely
        // case that an op using packet size causes overflow)
        packet_size = 1ull << 62;
    }
    else if (!packet_size &&
            (p->sched_params.type != MN_SCHED_FCFS_FULL ||
             (p->sched_params.type == MN_SCHED_PRIO &&
              p->sched_params.u.prio.sub_stype != MN_SCHED_FCFS_FULL))){
        packet_size = 512;
        fprintf(stderr, "WARNING, no packet size specified, setting packet "
Jonathan Jenkins's avatar
Jonathan Jenkins committed
300
                "size to %llu\n", LLU(packet_size));
301 302 303
    }


304 305 306 307
    p->packet_size = packet_size;
}

void model_net_base_configure(){
308 309 310 311 312
    uint32_t h1=0, h2=0;

    bj_hashlittle2(MN_NAME, strlen(MN_NAME), &h1, &h2);
    model_net_base_magic = h1+h2;

313 314 315
    // set up offsets - doesn't matter if they are actually used or not
    msg_offsets[SIMPLENET] =
        offsetof(model_net_wrap_msg, msg.m_snet);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
316 317
    msg_offsets[SIMPLEP2P] =
        offsetof(model_net_wrap_msg, msg.m_sp2p);
318 319 320
    msg_offsets[TORUS] =
        offsetof(model_net_wrap_msg, msg.m_torus);
    msg_offsets[DRAGONFLY] =
321
        offsetof(model_net_wrap_msg, msg.m_dfly);
322 323
    // note: dragonfly router uses the same event struct
    msg_offsets[DRAGONFLY_ROUTER] =
324
        offsetof(model_net_wrap_msg, msg.m_dfly);
325
    msg_offsets[DRAGONFLY_CUSTOM] =
326
        offsetof(model_net_wrap_msg, msg.m_custom_dfly);
327
    msg_offsets[DRAGONFLY_CUSTOM_ROUTER] =
328
        offsetof(model_net_wrap_msg, msg.m_custom_dfly);
329 330
    msg_offsets[SLIMFLY] =
        offsetof(model_net_wrap_msg, msg.m_slim);
331 332
    msg_offsets[FATTREE] =
	offsetof(model_net_wrap_msg, msg.m_fat);
333 334
    msg_offsets[LOGGP] =
        offsetof(model_net_wrap_msg, msg.m_loggp);
Nikhil's avatar
Nikhil committed
335 336 337 338
    msg_offsets[EXPRESS_MESH] =
        offsetof(model_net_wrap_msg, msg.m_em);
    msg_offsets[EXPRESS_MESH_ROUTER] =
        offsetof(model_net_wrap_msg, msg.m_em);
339

340 341 342 343 344 345 346 347 348 349
    // perform the configuration(s)
    // This part is tricky, as we basically have to look up all annotations that
    // have LP names of the form modelnet_*. For each of those, we need to read
    // the base parameters
    // - the init is a little easier as we can use the LP-id to look up the
    // annotation

    // first grab all of the annotations and store locally
    for (int c = 0; c < lpconf.lpannos_count; c++){
        const config_anno_map_t *amap = &lpconf.lpannos[c];
350
        if (strncmp("modelnet_", amap->lp_name.ptr, 9) == 0){
351 352 353
            for (int n = 0; n < amap->num_annos; n++){
                int a;
                for (a = 0; a < num_params; a++){
354 355
                    if (annos[a] != NULL && amap->annotations[n].ptr != NULL &&
                            strcmp(amap->annotations[n].ptr, annos[a]) == 0){
356 357 358 359 360
                        break;
                    }
                }
                if (a == num_params){
                    // found a new annotation
361
                    annos[num_params++] = amap->annotations[n].ptr;
362 363 364 365 366 367 368 369 370 371 372 373 374
                }
            }
            if (amap->has_unanno_lp){
                int a;
                for (a = 0; a < num_params; a++){
                    if (annos[a] == NULL)
                        break;
                }
                if (a == num_params){
                    // found a new (empty) annotation
                    annos[num_params++] = NULL;
                }
            }
375 376
        }
    }
377 378 379 380 381 382

    // now that we have all of the annos for all of the networks, loop through
    // and read the configs
    for (int i = 0; i < num_params; i++){
        base_read_config(annos[i], &all_params[i]);
    }
383 384 385 386 387 388
}

void model_net_base_lp_init(
        model_net_base_state * ns,
        tw_lp * lp){
    // obtain the underlying lp type through codes-mapping
389
    char lp_type_name[MAX_NAME_LENGTH], anno[MAX_NAME_LENGTH];
390 391
    int dummy;

392
    codes_mapping_get_lp_info(lp->gid, NULL, &dummy,
393 394
            lp_type_name, &dummy, anno, &dummy, &dummy);

395 396
    ns->msg_id = 0;

397 398 399 400 401 402 403 404
    // get annotation-specific parameters
    for (int i = 0; i < num_params; i++){
        if ((anno[0]=='\0' && annos[i] == NULL) ||
                strcmp(anno, annos[i]) == 0){
            ns->params = &all_params[i];
            break;
        }
    }
405 406 407 408 409 410 411 412 413

    // find the corresponding method name / index
    for (int i = 0; i < MAX_NETS; i++){
        if (strcmp(model_net_lp_config_names[i], lp_type_name) == 0){
            ns->net_id = i;
            break;
        }
    }

414 415
    ns->sched_send = malloc(sizeof(model_net_sched));
    ns->sched_recv = malloc(sizeof(model_net_sched));
416
    // init both the sender queue and the 'receiver' queue
417 418 419 420
    model_net_sched_init(&ns->params->sched_params, 0, method_array[ns->net_id],
            ns->sched_send);
    model_net_sched_init(&ns->params->sched_params, 1, method_array[ns->net_id],
            ns->sched_recv);
421

422
    ns->sub_type = model_net_get_lp_type(ns->net_id);
423

424 425 426 427 428 429
    /* some ROSS instrumentation setup */
    if (g_st_ev_trace || g_st_model_stats)
    {
        ns->sub_model_type = model_net_get_model_stat_type(ns->net_id);
        mn_model_types[ns->net_id].mstat_sz = ns->sub_model_type->mstat_sz;
    }
430

431 432 433 434 435 436
    // NOTE: some models actually expect LP state to be 0 initialized...
    // *cough anything that uses mn_stats_array cough*
    ns->sub_state = calloc(1, ns->sub_type->state_sz);

    // initialize the model-net method
    ns->sub_type->init(ns->sub_state, lp);
437 438 439 440 441 442

    // check validity of sampling function
    event_f  sample  = method_array[ns->net_id]->mn_sample_fn;
    revent_f rsample = method_array[ns->net_id]->mn_sample_rc_fn;
    if (model_net_sampling_enabled()) {
        if (sample == NULL) {
443 444 445
            /* MM: Commented out temporarily--- */
            //tw_error(TW_LOC,
            //        "Sampling requested for a model that doesn't provide it\n");
446 447 448 449
        }
        else if (rsample == NULL &&
                (g_tw_synchronization_protocol == OPTIMISTIC ||
                 g_tw_synchronization_protocol == OPTIMISTIC_DEBUG)) {
450 451 452
            /* MM: Commented out temporarily--- */
            //tw_error(TW_LOC,
            //        "Sampling requested for a model that doesn't provide it\n");
453 454 455 456 457 458 459 460
        }
        else {
            init_f sinit = method_array[ns->net_id]->mn_sample_init_fn;
            if (sinit != NULL)
                sinit(ns->sub_state, lp);
            issue_sample_event(lp);
        }
    }
461 462 463 464 465 466 467
}

void model_net_base_event(
        model_net_base_state * ns,
        tw_bf * b,
        model_net_wrap_msg * m,
        tw_lp * lp){
468 469

    if(m->h.magic != model_net_base_magic)
470
        printf("\n LP ID mismatched %llu ", lp->gid);
471

472
    assert(m->h.magic == model_net_base_magic);
473 474

    void * sub_msg;
475
    switch (m->h.event_type){
476 477 478 479 480 481
        case MN_BASE_NEW_MSG:
            handle_new_msg(ns, b, m, lp);
            break;
        case MN_BASE_SCHED_NEXT:
            handle_sched_next(ns, b, m, lp);
            break;
482 483 484 485 486 487 488
        case MN_BASE_SAMPLE: ;
            event_f sample = method_array[ns->net_id]->mn_sample_fn;
            assert(model_net_sampling_enabled() && sample != NULL);
            sub_msg = ((char*)m)+msg_offsets[ns->net_id];
            sample(ns->sub_state, b, sub_msg, lp);
            issue_sample_event(lp);
            break;
489
        case MN_BASE_PASS: ;
490
            sub_msg = ((char*)m)+msg_offsets[ns->net_id];
491 492 493 494 495 496 497 498 499 500 501 502 503 504
            ns->sub_type->event(ns->sub_state, b, sub_msg, lp);
            break;
        /* ... */
        default:
            assert(!"model_net_base event type not known");
            break;
    }
}

void model_net_base_event_rc(
        model_net_base_state * ns,
        tw_bf * b,
        model_net_wrap_msg * m,
        tw_lp * lp){
505
    assert(m->h.magic == model_net_base_magic);
506 507

    void * sub_msg;
508
    switch (m->h.event_type){
509 510 511 512 513 514
        case MN_BASE_NEW_MSG:
            handle_new_msg_rc(ns, b, m, lp);
            break;
        case MN_BASE_SCHED_NEXT:
            handle_sched_next_rc(ns, b, m, lp);
            break;
515 516 517 518 519 520
        case MN_BASE_SAMPLE: ;
            revent_f sample_rc = method_array[ns->net_id]->mn_sample_rc_fn;
            assert(model_net_sampling_enabled() && sample_rc != NULL);
            sub_msg = ((char*)m)+msg_offsets[ns->net_id];
            sample_rc(ns->sub_state, b, sub_msg, lp);
            break;
521
        case MN_BASE_PASS: ;
522
            sub_msg = ((char*)m)+msg_offsets[ns->net_id];
523 524 525 526 527 528 529 530 531 532 533 534
            ns->sub_type->revent(ns->sub_state, b, sub_msg, lp);
            break;
        /* ... */
        default:
            assert(!"model_net_base event type not known");
            break;
    }
}

void model_net_base_finalize(
        model_net_base_state * ns,
        tw_lp * lp){
535 536 537
    final_f sfini = method_array[ns->net_id]->mn_sample_fini_fn;
    if (sfini != NULL)
        sfini(ns->sub_state, lp);
538 539 540 541
    ns->sub_type->final(ns->sub_state, lp);
    free(ns->sub_state);
}

542
/// bitfields used:
543
/// c31 - we initiated a sched_next event
544 545 546 547 548
void handle_new_msg(
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp){
549
    // simply pass down to the scheduler
550
    model_net_request *r = &m->msg.m_base.req;
551 552
    // don't forget to set packet size, now that we're responsible for it!
    r->packet_size = ns->params->packet_size;
553
    r->msg_id = ns->msg_id++;
554 555
    void * m_data = m+1;
    void *remote = NULL, *local = NULL;
556
    if (r->remote_event_size > 0){
557 558
        remote = m_data;
        m_data = (char*)m_data + r->remote_event_size;
559 560
    }
    if (r->self_event_size > 0){
561
        local = m_data;
562
    }
563

564
    // set message-specific params
565 566
    int is_from_remote = m->msg.m_base.is_from_remote;
    model_net_sched *ss = is_from_remote ? ns->sched_recv : ns->sched_send;
567
    int *in_sched_loop = is_from_remote  ?
568 569 570
        &ns->in_sched_recv_loop : &ns->in_sched_send_loop;
    model_net_sched_add(r, &m->msg.m_base.sched_params, r->remote_event_size,
            remote, r->self_event_size, local, ss, &m->msg.m_base.rc, lp);
571

572
    if (*in_sched_loop == 0){
573 574
        b->c31 = 1;
        /* No need to issue an extra sched-next event if we're currently idle */
575
        *in_sched_loop = 1;
576 577 578 579 580
        /* NOTE: we can do this because the sched rc struct in the event is
         * *very* lightly used (there's harmless overlap in usage for the
         * priority scheduler) */
        handle_sched_next(ns, b, m, lp);
        assert(*in_sched_loop); // we shouldn't have fallen out of the loop
581 582
    }
}
583 584 585

void handle_new_msg_rc(
        model_net_base_state *ns,
586
        tw_bf *b,
587 588
        model_net_wrap_msg *m,
        tw_lp *lp){
589 590
    int is_from_remote = m->msg.m_base.is_from_remote;
    model_net_sched *ss = is_from_remote ? ns->sched_recv : ns->sched_send;
591
    int *in_sched_loop = is_from_remote  ?
592 593
        &ns->in_sched_recv_loop : &ns->in_sched_send_loop;

594 595
    if (b->c31) {
        handle_sched_next_rc(ns, b, m, lp);
596
        *in_sched_loop = 0;
597
    }
598
    model_net_sched_add_rc(ss, &m->msg.m_base.rc, lp);
599
}
600 601 602 603

/// bitfields used
/// c0 - scheduler loop is finished
void handle_sched_next(
604 605 606 607
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp){
608
    tw_stime poffset;
609 610 611 612 613
    int is_from_remote = m->msg.m_base.is_from_remote;
    model_net_sched * ss = is_from_remote ? ns->sched_recv : ns->sched_send;
    int *in_sched_loop = is_from_remote ?
        &ns->in_sched_recv_loop : &ns->in_sched_send_loop;
    int ret = model_net_sched_next(&poffset, ss, m+1, &m->msg.m_base.rc, lp);
614 615 616 617
    // we only need to know whether scheduling is finished or not - if not,
    // go to the 'next iteration' of the loop
    if (ret == -1){
        b->c0 = 1;
618
        *in_sched_loop = 0;
619
    }
620 621 622
    // Currently, only a subset of the network implementations use the
    // callback-based scheduling loop (model_net_method_idle_event).
    // For all others, we need to schedule the next packet
623
    // immediately
Jonathan Jenkins's avatar
Jonathan Jenkins committed
624
    else if (ns->net_id == SIMPLEP2P || ns->net_id == TORUS){
625
        tw_event *e = tw_event_new(lp->gid,
626
                poffset+codes_local_latency(lp), lp);
627
        model_net_wrap_msg *m_wrap = tw_event_data(e);
628
        msg_set_header(model_net_base_magic, MN_BASE_SCHED_NEXT, lp->gid,
629 630
                &m_wrap->h);
        m_wrap->msg.m_base.is_from_remote = is_from_remote;
631 632
        // no need to set m_base here
        tw_event_send(e);
633 634
    }
}
635

636 637 638 639 640
void handle_sched_next_rc(
        model_net_base_state * ns,
        tw_bf *b,
        model_net_wrap_msg * m,
        tw_lp * lp){
641 642 643 644
    int is_from_remote = m->msg.m_base.is_from_remote;
    model_net_sched * ss = is_from_remote ? ns->sched_recv : ns->sched_send;
    int *in_sched_loop = is_from_remote ?
        &ns->in_sched_recv_loop : &ns->in_sched_send_loop;
645

646
    model_net_sched_next_rc(ss, m+1, &m->msg.m_base.rc, lp);
647
    if (b->c0){
648
        *in_sched_loop = 1;
649
    }
Jonathan Jenkins's avatar
Jonathan Jenkins committed
650
    else if (ns->net_id == SIMPLEP2P || ns->net_id == TORUS){
651 652
        codes_local_latency_reverse(lp);
    }
653 654 655 656 657 658 659 660 661 662 663 664 665
}

/**** END IMPLEMENTATIONS ****/

tw_event * model_net_method_event_new(
        tw_lpid dest_gid,
        tw_stime offset_ts,
        tw_lp *sender,
        int net_id,
        void **msg_data,
        void **extra_data){
    tw_event *e = tw_event_new(dest_gid, offset_ts, sender);
    model_net_wrap_msg *m_wrap = tw_event_data(e);
666 667
    msg_set_header(model_net_base_magic, MN_BASE_PASS, sender->gid,
            &m_wrap->h);
668 669 670 671 672 673 674 675
    *msg_data = ((char*)m_wrap)+msg_offsets[net_id];
    // extra_data is optional
    if (extra_data != NULL){
        *extra_data = m_wrap + 1;
    }
    return e;
}

676 677 678 679 680 681 682 683 684 685 686 687
void model_net_method_send_msg_recv_event(
        tw_lpid final_dest_lp,
        tw_lpid dest_mn_lp,
        tw_lpid src_lp, // the "actual" source (as opposed to the model net lp)
        uint64_t msg_size,
        int is_pull,
        uint64_t pull_size,
        int remote_event_size,
        const mn_sched_params *sched_params,
        const char * category,
        int net_id,
        void * msg,
688
        tw_stime offset,
689
        tw_lp *sender){
690
    tw_event *e =
691
        tw_event_new(dest_mn_lp, offset+codes_local_latency(sender), sender);
692 693 694 695 696 697 698 699
    model_net_wrap_msg *m = tw_event_data(e);
    msg_set_header(model_net_base_magic, MN_BASE_NEW_MSG, sender->gid, &m->h);

    if (sched_params != NULL)
        m->msg.m_base.sched_params = *sched_params;
    else
        model_net_sched_set_default_params(&m->msg.m_base.sched_params);

700 701 702
    model_net_request *r = &m->msg.m_base.req;
    r->final_dest_lp = final_dest_lp;
    r->src_lp = src_lp;
703
    // for "recv" events, set the "dest" to this LP in the case of a pull event
704 705 706 707 708 709 710 711 712
    r->dest_mn_lp = sender->gid;
    r->pull_size = pull_size;
    r->msg_size = msg_size;
    // TODO: document why we're setting packet_size this way
    r->packet_size = msg_size;
    r->net_id = net_id;
    r->is_pull = is_pull;
    r->remote_event_size = remote_event_size;
    r->self_event_size = 0;
713 714
    m->msg.m_base.is_from_remote = 1;

715 716
    strncpy(r->category, category, CATEGORY_NAME_MAX-1);
    r->category[CATEGORY_NAME_MAX-1] = '\0';
717 718 719 720 721 722 723 724 725

    if (remote_event_size > 0){
        void * m_dat = model_net_method_get_edata(net_id, msg);
        memcpy(m+1, m_dat, remote_event_size);
    }

    tw_event_send(e);
}

Jonathan Jenkins's avatar
Jonathan Jenkins committed
726 727 728 729
void model_net_method_send_msg_recv_event_rc(tw_lp *sender){
    codes_local_latency_reverse(sender);
}

730 731 732

void model_net_method_idle_event(tw_stime offset_ts, int is_recv_queue,
        tw_lp * lp){
733 734
    tw_event *e = tw_event_new(lp->gid, offset_ts, lp);
    model_net_wrap_msg *m_wrap = tw_event_data(e);
735 736
    msg_set_header(model_net_base_magic, MN_BASE_SCHED_NEXT, lp->gid,
            &m_wrap->h);
737
    m_wrap->msg.m_base.is_from_remote = is_recv_queue;
738 739 740
    tw_event_send(e);
}

741 742 743 744 745 746 747 748 749 750 751 752
void * model_net_method_get_edata(int net_id, void *msg){
    return (char*)msg + sizeof(model_net_wrap_msg) - msg_offsets[net_id];
}

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ft=c ts=8 sts=4 sw=4 expandtab
 */