resource-lp.c 16.9 KB
Newer Older
1 2 3 4 5 6
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
*/

7 8 9 10 11 12 13 14 15
#include <codes/codes-callback.h>
#include <codes/resource-lp.h>
#include <codes/resource.h>
#include <codes/codes_mapping.h>
#include <codes/configuration.h>
#include <codes/jenkins-hash.h>
#include <codes/quicklist.h>
#include <codes/lp-io.h>
#include <ross.h>
16 17
#include <assert.h>
#include <stdio.h>
18
#include <string.h>
19 20 21 22 23


/**** BEGIN SIMULATION DATA STRUCTURES ****/

static int resource_magic; /* use this as sanity check on events */
24 25 26 27 28

/* configuration globals (will be consumed by LP when they init) */
static uint64_t avail_unanno;
static uint64_t *avail_per_anno;
static const config_anno_map_t *anno_map;
29 30 31

typedef struct resource_state resource_state;
typedef struct resource_msg resource_msg;
32
typedef struct pending_op pending_op;
33 34 35 36 37 38

#define TOKEN_DUMMY ((resource_token_t)-1)

/* event types */
enum resource_event
{
39
    RESOURCE_GET = 100,
40
    RESOURCE_FREE,
41
    RESOURCE_DEQ,
42 43 44 45 46
    RESOURCE_RESERVE,
};

struct resource_state {
    resource r;
47 48 49 50 51 52
    /* pending operations - if OOM and we are using the 'blocking' method, 
     * then need to stash parameters.
     * Index 0 is the general pool, index 1.. are the reservation-specific
     * pools. We take advantage of resource_token_t's status as a simple 
     * array index to do the proper indexing */
    struct qlist_head pending[MAX_RESERVE+1];
53 54
};

55 56 57
/* following struct exists because we want to basically cache a message within
 * a message for rc (ewww) */
struct resource_msg_internal{
58 59 60 61
    msg_header h;
    /* request data */
    uint64_t req;
    resource_token_t tok; /* only for reserved calls */
62 63 64 65
    /* behavior when sending response to caller
     * 0 - send the callback immediately if resource unavailable. 
     * 1 - send the callback when memory is available (danger - deadlock
     * possible) */
66
    int block_on_unavail;
67
    /* callback data */
68 69
    struct codes_cb_params cb;
};
70 71 72

struct resource_msg {
    struct resource_msg_internal i, i_rc;
73 74 75
    // for RC (asides from the message itself): the previous minimum resource
    // value
    uint64_t min_avail_rc;
76 77 78 79 80
};

struct pending_op {
    struct resource_msg_internal m;
    struct qlist_head ql;
81 82 83 84 85 86 87 88
};

/**** END SIMULATION DATA STRUCTURES ****/

/**** BEGIN LP, EVENT PROCESSING FUNCTION DECLS ****/

/* ROSS LP processing functions */  
static void resource_lp_ind_init(
89 90
        resource_state * ns,
        tw_lp * lp);
91
static void resource_event_handler(
92 93 94 95
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
96
static void resource_rev_handler(
97 98 99 100
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
101
static void resource_finalize(
102 103
        resource_state * ns,
        tw_lp * lp);
104 105 106

/* ROSS function pointer table for this LP */
static tw_lptype resource_lp = {
107 108 109 110 111 112 113
    (init_f) resource_lp_ind_init,
    (pre_run_f) NULL,
    (event_f) resource_event_handler,
    (revent_f) resource_rev_handler,
    (final_f)  resource_finalize, 
    (map_f) codes_mapping,
    sizeof(resource_state),
114 115 116 117 118 119 120 121 122
};

/**** END LP, EVENT PROCESSING FUNCTION DECLS ****/

/**** BEGIN IMPLEMENTATIONS ****/

void resource_lp_ind_init(
        resource_state * ns,
        tw_lp * lp){
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
    // get my annotation
    const char * anno = codes_mapping_get_annotation_by_lpid(lp->gid);
    if (anno == NULL){
        resource_init(avail_unanno, &ns->r);
    }
    else{
        int idx = configuration_get_annotation_index(anno, anno_map);
        if (idx < 0){
            tw_error("resource LP %lu: unable to find annotation "
                    "%s in configuration\n", lp->gid, anno);
        }
        else{
            resource_init(avail_per_anno[idx], &ns->r);
        }
    }
138 139 140 141 142 143 144
    int i;
    for (i = 0; i < MAX_RESERVE+1; i++){
        INIT_QLIST_HEAD(&ns->pending[i]);
    }
}

static void resource_response(
145
        struct codes_cb_params const * p,
146 147
        tw_lp *lp,
        int ret,
148 149 150 151 152 153 154 155 156 157 158 159 160
        resource_token_t tok)
{
    SANITY_CHECK_CB(&p->info, resource_return);

    tw_event *e = tw_event_new(p->h.src, codes_local_latency(lp), lp);
    void * m = tw_event_data(e);

    GET_INIT_CB_PTRS(p, m, lp->gid, h, tag, rc, resource_return);

    rc->ret = ret;
    rc->tok = tok;

    tw_event_send(e);
161
}
162

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
static void resource_response_rc(tw_lp *lp){
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
 * c0 - enqueued a message 
 * c1 - sent an ack 
 * c2 - successfully got the resource */
static void handle_resource_get(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    int ret = 1;
    int send_ack = 1;
178 179
    // save the previous minimum for RC
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
180 181 182
    if (!qlist_empty(&ns->pending[m->i.tok]) || 
            (ret = resource_get(m->i.req, m->i.tok, &ns->r))){
        /* failed to receive data */
183 184 185 186 187 188 189 190 191 192 193
        if (ret == 2)
            tw_error(TW_LOC,
                    "resource LP %lu: invalid token %d passed in "
                    "(%d tokens created)\n",
                    lp->gid, m->i.tok, ns->r.num_tokens);
        else if (ret == -1)
            tw_error(TW_LOC,
                    "resource LP %lu: unsatisfiable request: "
                    "token %d, size %lu\n",
                    lp->gid, m->i.tok, m->i.req);

194 195 196
        if (m->i.block_on_unavail){
            /* queue up operation, save til later */
            b->c0 = 1;
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
197
            pending_op *op = (pending_op*)malloc(sizeof(pending_op));
198 199
            op->m = m->i; /* no need to set rc msg here */
            qlist_add_tail(&op->ql, &ns->pending[m->i.tok]);
200
            send_ack = 0;
201 202 203 204
        }
    }
    if (send_ack){
        b->c1 = 1;
205
        resource_response(&m->i.cb, lp, ret, TOKEN_DUMMY);
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
    }

    b->c2 = !ret;
}

/* bitfield usage:
 * c0 - enqueued a message 
 * c1 - sent an ack 
 * c2 - successfully got the resource */
static void handle_resource_get_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (b->c0){
        assert(!qlist_empty(&ns->pending[m->i.tok]));
        struct qlist_head *ql = qlist_pop_back(&ns->pending[m->i.tok]);
        free(qlist_entry(ql, pending_op, ql));
    }
    else if (b->c1){
        resource_response_rc(lp);
    }

    if (b->c2){
230 231
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r));
        assert(!resource_free(m->i.req, m->i.tok, &ns->r));
232 233 234 235 236 237 238 239 240 241 242
    }
}

static void handle_resource_free(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    assert(!resource_free(m->i.req, m->i.tok, &ns->r));
    /* create an event to pop the next queue item */
    tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp);
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
243
    resource_msg *m_deq = (resource_msg*)tw_event_data(e);
244 245 246 247 248 249 250 251 252 253 254 255 256 257
    msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
    m_deq->i.tok = m->i.tok; /* only tok is needed, all others grabbed from q */
    tw_event_send(e);
}
static void handle_resource_free_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    assert(!resource_get(m->i.req, m->i.tok, &ns->r));
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
258 259
 * c0 - queue was empty to begin with
 * c1 - assuming !c0, alloc succeeded */ 
260 261 262 263 264 265 266
static void handle_resource_deq(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (qlist_empty(&ns->pending[m->i.tok])){
        /* nothing to do */
267
        b->c0 = 1;
268 269 270 271 272
        return;
    }

    struct qlist_head *front = ns->pending[m->i.tok].next;
    pending_op *p = qlist_entry(front, pending_op, ql);
273
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
274
    int ret = resource_get(p->m.req, p->m.tok, &ns->r);
275
    assert(ret != 2 && ret != -1);
276
    if (!ret){
277
        b->c1 = 1;
278 279 280
        /* success, dequeue (saving as rc) and send to client */
        qlist_del(front);
        m->i_rc = p->m;
281
        resource_response(&p->m.cb, lp, ret, TOKEN_DUMMY);
282 283 284
        free(p);
        /* additionally attempt to dequeue next one down */
        tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp);
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
285
        resource_msg *m_deq = (resource_msg*)tw_event_data(e);
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
        msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
        /* only tok is needed, all others grabbed from q */
        m_deq->i.tok = m->i.tok; 
        tw_event_send(e);
    }
    /* else do nothing */
}

/* bitfield usage:
 * c0 - dequeue+alloc success */ 
static void handle_resource_deq_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
301
    if (b->c0){
302 303 304
        return;
    }

305
    if (b->c1){
306
        /* add operation back to the front of the queue */
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
307
        pending_op *op = (pending_op*)malloc(sizeof(pending_op));
308 309 310
        op->m = m->i_rc;
        qlist_add(&op->ql, &ns->pending[m->i.tok]);
        resource_response_rc(lp);
311
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r)); 
Jonathan Jenkins's avatar
Jonathan Jenkins committed
312
        assert(!resource_free(op->m.req, op->m.tok, &ns->r));
313 314 315 316 317 318 319 320 321 322 323 324 325
        /* reverse "deq next" op */
        codes_local_latency_reverse(lp);
    }
}

static void handle_resource_reserve(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    resource_token_t tok;
    int ret = resource_reserve(m->i.req, &tok, &ns->r);
    assert(!ret);
326
    resource_response(&m->i.cb, lp, ret, tok);
327 328 329 330 331 332 333 334 335 336 337 338
}
static void handle_resource_reserve_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    /* this reversal method is essentially a hack that relies on each
     * sequential reserve appending to the end of the list 
     * - we expect reserves to happen strictly at the beginning of the
     *   simulation */
    ns->r.num_tokens--;
    resource_response_rc(lp);
339 340 341 342 343 344 345
}

void resource_event_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
346 347
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
348
        case RESOURCE_GET:
349
            handle_resource_get(ns,b,m,lp);
350 351
            break;
        case RESOURCE_FREE:
352
            handle_resource_free(ns,b,m,lp);
353
            break;
354 355
        case RESOURCE_DEQ:
            handle_resource_deq(ns,b,m,lp);
356
            break;
357 358
        case RESOURCE_RESERVE:
            handle_resource_reserve(ns,b,m,lp);
359 360
            break;
        default:
361
            assert(0);
362 363 364 365 366 367 368
    }
}
void resource_rev_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
369 370
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
371
        case RESOURCE_GET:
372
            handle_resource_get_rc(ns,b,m,lp);
373 374
            break;
        case RESOURCE_FREE:
375
            handle_resource_free_rc(ns,b,m,lp);
376
            break;
377 378
        case RESOURCE_DEQ:
            handle_resource_deq_rc(ns,b,m,lp);
379
            break;
380 381
        case RESOURCE_RESERVE:
            handle_resource_reserve_rc(ns,b,m,lp);
382 383
            break;
        default:
384
            assert(0);
385 386 387 388 389 390
    }
}

void resource_finalize(
        resource_state * ns,
        tw_lp * lp){
391
    struct qlist_head *ent;
392 393 394 395 396 397 398
    for (int i = 0; i < MAX_RESERVE+1; i++){
        qlist_for_each(ent, &ns->pending[i]){
            fprintf(stderr, "WARNING: resource LP %lu has a pending allocation\n",
                    lp->gid);
        }
    }

Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
399
    char *out_buf = (char*)malloc(1<<12);
400
    int written;
401 402
    // see if I'm the "first" resource (currently doing it globally)
    if (codes_mapping_get_lp_relative_id(lp->gid, 0, 0) == 0){
403 404
        written = sprintf(out_buf, 
                "# format: <LP> <max used general> <max used token...>\n");
405
        lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
406 407 408
    }
    written = sprintf(out_buf, "%lu", lp->gid);

409
    // compute peak resource usage
410 411 412
    // TODO: wrap this up in the resource interface
    for (int i = 0; i < ns->r.num_tokens+1; i++){
        written += sprintf(out_buf+written, " %lu", ns->r.max[i]-ns->r.min_avail[i]);
413
    }
414
    written += sprintf(out_buf+written, "\n");
415
    lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
416 417 418 419 420 421 422 423
}

/**** END IMPLEMENTATIONS ****/

/**** BEGIN USER-FACING FUNCTIONS ****/
void resource_lp_init(){
    uint32_t h1=0, h2=0;

424
    bj_hashlittle2(RESOURCE_LP_NM, strlen(RESOURCE_LP_NM), &h1, &h2);
425 426
    resource_magic = h1+h2;

427
    lp_type_register(RESOURCE_LP_NM, &resource_lp);
428 429 430
}

void resource_lp_configure(){
431 432 433

    anno_map = codes_mapping_get_lp_anno_map(RESOURCE_LP_NM);
    avail_per_anno = (anno_map->num_annos > 0) ?
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
434
        (uint64_t*)malloc(anno_map->num_annos * sizeof(*avail_per_anno)) :
435 436
            NULL;
    // get the unannotated version
437
    long int avail;
438
    int ret;
439
    if (anno_map->has_unanno_lp > 0){
440
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
441
            "available", NULL, &avail);
442 443 444 445 446 447 448 449 450 451 452
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available for "
                    "resource LP\n");
            exit(1);
        }
        assert(avail > 0);
        avail_unanno = (uint64_t)avail;
    }
    for (uint64_t i = 0; i < anno_map->num_annos; i++){
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
453
            "available", anno_map->annotations[i].ptr, &avail);
454 455 456
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available@%s for "
457
                    "resource LP\n", anno_map->annotations[i].ptr);
458 459 460 461
            exit(1);
        }
        assert(avail > 0);
        avail_per_anno[i] = (uint64_t)avail;
462 463 464
    }
}

465
static void resource_lp_issue_event_base(
466
        enum resource_event type,
467 468
        uint64_t req,
        resource_token_t tok, /* only used in reserve_get/free */
469
        int block_on_unavail,
470
        tw_lp *sender,
471
        struct codes_mctx const * map_ctx,
472 473
        int return_tag,
        msg_header const *return_header,
474 475 476 477 478 479
        struct codes_cb_info const *cb)
{
    if (cb)
        SANITY_CHECK_CB(cb, resource_return);

    tw_lpid resource_lpid =
480
        codes_mctx_to_lpid(map_ctx, RESOURCE_LP_NM, sender->gid);
481 482

    tw_event *e = tw_event_new(resource_lpid, codes_local_latency(sender),
483 484
            sender);

485 486
    resource_msg *m = tw_event_data(e);

487 488 489 490
    msg_set_header(resource_magic, type, sender->gid, &m->i.h);
    m->i.req = req;
    m->i.tok = tok;
    m->i.block_on_unavail = block_on_unavail;
491
    if (map_ctx != NULL && cb != NULL && return_header != NULL) {
492
        m->i.cb.info = *cb;
493 494
        m->i.cb.h = *return_header;
        m->i.cb.tag = return_tag;
495 496
    }

497 498 499
    tw_event_send(e);
}

500
void resource_lp_get(
501 502 503
        uint64_t req,
        int block_on_unavail,
        tw_lp *sender,
504
        struct codes_mctx const * map_ctx,
505 506
        int return_tag,
        msg_header const *return_header,
507 508 509
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_GET, req, 0, block_on_unavail,
510
            sender, map_ctx, return_tag, return_header, cb);
511 512 513
}

/* no callback for frees thus far */
514 515 516 517 518 519 520
void resource_lp_free(
        uint64_t req,
        tw_lp *sender,
        struct codes_mctx const * map_ctx)
{
    resource_lp_issue_event_base(RESOURCE_FREE, req, 0, -1, sender, map_ctx,
            0, NULL, NULL);
521 522 523
}
void resource_lp_reserve(
        uint64_t req,
524
        int block_on_unavail,
525 526
        tw_lp *sender,
        struct codes_mctx const * map_ctx,
527 528
        int return_tag,
        msg_header const *return_header,
529 530 531
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_RESERVE, req, 0, block_on_unavail,
532
            sender, map_ctx, return_tag, return_header, cb);
533 534 535 536
}
void resource_lp_get_reserved(
        uint64_t req,
        resource_token_t tok,
537
        int block_on_unavail,
538 539
        tw_lp *sender,
        struct codes_mctx const * map_ctx,
540 541
        int return_tag,
        msg_header const *return_header,
542 543 544
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_GET, req, tok, block_on_unavail,
545
            sender, map_ctx, return_tag, return_header, cb);
546 547
}
void resource_lp_free_reserved(
548
        uint64_t req,
549
        resource_token_t tok,
550 551 552 553 554
        tw_lp *sender,
        struct codes_mctx const * map_ctx)
{
    resource_lp_issue_event_base(RESOURCE_FREE, req, tok, -1,
            sender, map_ctx, 0, NULL, NULL);
555 556 557 558 559
}

/* rc functions - thankfully, they only use codes-local-latency, so no need 
 * to pass in any arguments */

560
static void resource_lp_issue_event_base_rc(tw_lp *sender){
561 562 563 564
    codes_local_latency_reverse(sender);
}

void resource_lp_get_rc(tw_lp *sender){
565
    resource_lp_issue_event_base_rc(sender);
566 567
}
void resource_lp_free_rc(tw_lp *sender){
568
    resource_lp_issue_event_base_rc(sender);
569 570
}
void resource_lp_reserve_rc(tw_lp *sender){
571
    resource_lp_issue_event_base_rc(sender);
572 573
}
void resource_lp_get_reserved_rc(tw_lp *sender){
574
    resource_lp_issue_event_base_rc(sender);
575 576
}
void resource_lp_free_reserved_rc(tw_lp *sender){
577
    resource_lp_issue_event_base_rc(sender);
578 579 580 581 582 583 584 585 586 587
}

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ts=8 sts=4 sw=4 expandtab
 */