resource-lp.c 17.1 KB
Newer Older
1 2 3 4 5 6
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
*/

7 8 9 10 11 12 13 14 15
#include <codes/codes-callback.h>
#include <codes/resource-lp.h>
#include <codes/resource.h>
#include <codes/codes_mapping.h>
#include <codes/configuration.h>
#include <codes/jenkins-hash.h>
#include <codes/quicklist.h>
#include <codes/lp-io.h>
#include <ross.h>
16 17
#include <assert.h>
#include <stdio.h>
18
#include <string.h>
19 20 21 22 23


/**** BEGIN SIMULATION DATA STRUCTURES ****/

static int resource_magic; /* use this as sanity check on events */
24 25 26 27 28

/* configuration globals (will be consumed by LP when they init) */
static uint64_t avail_unanno;
static uint64_t *avail_per_anno;
static const config_anno_map_t *anno_map;
29 30 31

typedef struct resource_state resource_state;
typedef struct resource_msg resource_msg;
32
typedef struct pending_op pending_op;
33 34 35 36 37 38

#define TOKEN_DUMMY ((resource_token_t)-1)

/* event types */
enum resource_event
{
39
    RESOURCE_GET = 100,
40
    RESOURCE_FREE,
41
    RESOURCE_DEQ,
42 43 44 45 46
    RESOURCE_RESERVE,
};

struct resource_state {
    resource r;
47
    /* pending operations - if OOM and we are using the 'blocking' method,
48 49
     * then need to stash parameters.
     * Index 0 is the general pool, index 1.. are the reservation-specific
50
     * pools. We take advantage of resource_token_t's status as a simple
51 52
     * array index to do the proper indexing */
    struct qlist_head pending[MAX_RESERVE+1];
53 54
};

55 56 57
/* following struct exists because we want to basically cache a message within
 * a message for rc (ewww) */
struct resource_msg_internal{
58 59 60 61
    msg_header h;
    /* request data */
    uint64_t req;
    resource_token_t tok; /* only for reserved calls */
62
    /* behavior when sending response to caller
63
     * 0 - send the callback immediately if resource unavailable.
64 65
     * 1 - send the callback when memory is available (danger - deadlock
     * possible) */
66
    int block_on_unavail;
67
    /* callback data */
68 69
    struct codes_cb_params cb;
};
70 71 72

struct resource_msg {
    struct resource_msg_internal i, i_rc;
73 74 75
    // for RC (asides from the message itself): the previous minimum resource
    // value
    uint64_t min_avail_rc;
76 77 78 79 80
};

struct pending_op {
    struct resource_msg_internal m;
    struct qlist_head ql;
81 82 83 84 85 86
};

/**** END SIMULATION DATA STRUCTURES ****/

/**** BEGIN LP, EVENT PROCESSING FUNCTION DECLS ****/

87
/* ROSS LP processing functions */
88
static void resource_lp_ind_init(
89 90
        resource_state * ns,
        tw_lp * lp);
91
static void resource_event_handler(
92 93 94 95
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
96
static void resource_rev_handler(
97 98 99 100
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
101
static void resource_finalize(
102 103
        resource_state * ns,
        tw_lp * lp);
104 105 106

/* ROSS function pointer table for this LP */
static tw_lptype resource_lp = {
107 108 109 110
    (init_f) resource_lp_ind_init,
    (pre_run_f) NULL,
    (event_f) resource_event_handler,
    (revent_f) resource_rev_handler,
111 112
    (commit_f) NULL,
    (final_f)  resource_finalize,
113 114
    (map_f) codes_mapping,
    sizeof(resource_state),
115 116 117 118 119 120 121 122 123
};

/**** END LP, EVENT PROCESSING FUNCTION DECLS ****/

/**** BEGIN IMPLEMENTATIONS ****/

void resource_lp_ind_init(
        resource_state * ns,
        tw_lp * lp){
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
    // get my annotation
    const char * anno = codes_mapping_get_annotation_by_lpid(lp->gid);
    if (anno == NULL){
        resource_init(avail_unanno, &ns->r);
    }
    else{
        int idx = configuration_get_annotation_index(anno, anno_map);
        if (idx < 0){
            tw_error("resource LP %lu: unable to find annotation "
                    "%s in configuration\n", lp->gid, anno);
        }
        else{
            resource_init(avail_per_anno[idx], &ns->r);
        }
    }
139 140 141 142 143 144 145
    int i;
    for (i = 0; i < MAX_RESERVE+1; i++){
        INIT_QLIST_HEAD(&ns->pending[i]);
    }
}

static void resource_response(
146
        struct codes_cb_params const * p,
147 148
        tw_lp *lp,
        int ret,
149 150 151 152 153 154 155 156 157 158 159 160 161
        resource_token_t tok)
{
    SANITY_CHECK_CB(&p->info, resource_return);

    tw_event *e = tw_event_new(p->h.src, codes_local_latency(lp), lp);
    void * m = tw_event_data(e);

    GET_INIT_CB_PTRS(p, m, lp->gid, h, tag, rc, resource_return);

    rc->ret = ret;
    rc->tok = tok;

    tw_event_send(e);
162
}
163

164 165 166 167 168
static void resource_response_rc(tw_lp *lp){
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
169 170
 * c0 - enqueued a message
 * c1 - sent an ack
171 172 173 174 175 176 177 178
 * c2 - successfully got the resource */
static void handle_resource_get(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    int ret = 1;
    int send_ack = 1;
179 180
    // save the previous minimum for RC
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
181
    if (!qlist_empty(&ns->pending[m->i.tok]) ||
182 183
            (ret = resource_get(m->i.req, m->i.tok, &ns->r))){
        /* failed to receive data */
184 185 186 187 188 189 190 191 192 193 194
        if (ret == 2)
            tw_error(TW_LOC,
                    "resource LP %lu: invalid token %d passed in "
                    "(%d tokens created)\n",
                    lp->gid, m->i.tok, ns->r.num_tokens);
        else if (ret == -1)
            tw_error(TW_LOC,
                    "resource LP %lu: unsatisfiable request: "
                    "token %d, size %lu\n",
                    lp->gid, m->i.tok, m->i.req);

195 196 197
        if (m->i.block_on_unavail){
            /* queue up operation, save til later */
            b->c0 = 1;
198
            pending_op *op = (pending_op*)malloc(sizeof(pending_op));
199 200
            op->m = m->i; /* no need to set rc msg here */
            qlist_add_tail(&op->ql, &ns->pending[m->i.tok]);
201
            send_ack = 0;
202 203 204 205
        }
    }
    if (send_ack){
        b->c1 = 1;
206
        resource_response(&m->i.cb, lp, ret, TOKEN_DUMMY);
207 208 209 210 211 212
    }

    b->c2 = !ret;
}

/* bitfield usage:
213 214
 * c0 - enqueued a message
 * c1 - sent an ack
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
 * c2 - successfully got the resource */
static void handle_resource_get_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (b->c0){
        assert(!qlist_empty(&ns->pending[m->i.tok]));
        struct qlist_head *ql = qlist_pop_back(&ns->pending[m->i.tok]);
        free(qlist_entry(ql, pending_op, ql));
    }
    else if (b->c1){
        resource_response_rc(lp);
    }

    if (b->c2){
231 232
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r));
        assert(!resource_free(m->i.req, m->i.tok, &ns->r));
233 234 235 236 237 238 239 240
    }
}

static void handle_resource_free(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
Jonathan Jenkins's avatar
Jonathan Jenkins committed
241
    (void)b;
242 243
    assert(!resource_free(m->i.req, m->i.tok, &ns->r));
    /* create an event to pop the next queue item */
Jonathan Jenkins's avatar
Jonathan Jenkins committed
244
    tw_event *e = tw_event_new(lp->gid, codes_local_latency(lp), lp);
245
    resource_msg *m_deq = (resource_msg*)tw_event_data(e);
246 247 248 249 250 251 252 253 254
    msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
    m_deq->i.tok = m->i.tok; /* only tok is needed, all others grabbed from q */
    tw_event_send(e);
}
static void handle_resource_free_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
Jonathan Jenkins's avatar
Jonathan Jenkins committed
255
    (void)b;
256 257 258 259 260
    assert(!resource_get(m->i.req, m->i.tok, &ns->r));
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
261
 * c0 - queue was empty to begin with
262
 * c1 - assuming !c0, alloc succeeded */
263 264 265 266 267 268 269
static void handle_resource_deq(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (qlist_empty(&ns->pending[m->i.tok])){
        /* nothing to do */
270
        b->c0 = 1;
271 272 273 274 275
        return;
    }

    struct qlist_head *front = ns->pending[m->i.tok].next;
    pending_op *p = qlist_entry(front, pending_op, ql);
276
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
277
    int ret = resource_get(p->m.req, p->m.tok, &ns->r);
278
    assert(ret != 2 && ret != -1);
279
    if (!ret){
280
        b->c1 = 1;
281 282 283
        /* success, dequeue (saving as rc) and send to client */
        qlist_del(front);
        m->i_rc = p->m;
284
        resource_response(&p->m.cb, lp, ret, TOKEN_DUMMY);
285 286
        free(p);
        /* additionally attempt to dequeue next one down */
Jonathan Jenkins's avatar
Jonathan Jenkins committed
287
        tw_event *e = tw_event_new(lp->gid, codes_local_latency(lp), lp);
288
        resource_msg *m_deq = (resource_msg*)tw_event_data(e);
289 290
        msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
        /* only tok is needed, all others grabbed from q */
291
        m_deq->i.tok = m->i.tok;
292 293 294 295 296 297
        tw_event_send(e);
    }
    /* else do nothing */
}

/* bitfield usage:
298
 * c0 - dequeue+alloc success */
299 300 301 302 303
static void handle_resource_deq_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
304
    if (b->c0){
305 306 307
        return;
    }

308
    if (b->c1){
309
        /* add operation back to the front of the queue */
310
        pending_op *op = (pending_op*)malloc(sizeof(pending_op));
311 312 313
        op->m = m->i_rc;
        qlist_add(&op->ql, &ns->pending[m->i.tok]);
        resource_response_rc(lp);
314
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r));
Jonathan Jenkins's avatar
Jonathan Jenkins committed
315
        assert(!resource_free(op->m.req, op->m.tok, &ns->r));
316 317 318 319 320 321 322 323 324 325
        /* reverse "deq next" op */
        codes_local_latency_reverse(lp);
    }
}

static void handle_resource_reserve(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
Jonathan Jenkins's avatar
Jonathan Jenkins committed
326
    (void)b;
327 328 329
    resource_token_t tok;
    int ret = resource_reserve(m->i.req, &tok, &ns->r);
    assert(!ret);
330
    resource_response(&m->i.cb, lp, ret, tok);
331 332 333 334 335 336
}
static void handle_resource_reserve_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
Jonathan Jenkins's avatar
Jonathan Jenkins committed
337
    (void)b;
338
    /* this reversal method is essentially a hack that relies on each
339
     * sequential reserve appending to the end of the list
340 341
     * - we expect reserves to happen strictly at the beginning of the
     *   simulation */
342
    /* NOTE: this logic will change if the resource_reserve logic changes */
343 344 345
    ns->r.num_tokens--;
    ns->r.max[0] += m->i.req;
    ns->r.avail[0] += m->i.req;
346

347
    resource_response_rc(lp);
348 349 350 351 352 353 354
}

void resource_event_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
355 356
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
357
        case RESOURCE_GET:
358
            handle_resource_get(ns,b,m,lp);
359 360
            break;
        case RESOURCE_FREE:
361
            handle_resource_free(ns,b,m,lp);
362
            break;
363 364
        case RESOURCE_DEQ:
            handle_resource_deq(ns,b,m,lp);
365
            break;
366 367
        case RESOURCE_RESERVE:
            handle_resource_reserve(ns,b,m,lp);
368 369
            break;
        default:
370
            assert(0);
371 372 373 374 375 376 377
    }
}
void resource_rev_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
378 379
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
380
        case RESOURCE_GET:
381
            handle_resource_get_rc(ns,b,m,lp);
382 383
            break;
        case RESOURCE_FREE:
384
            handle_resource_free_rc(ns,b,m,lp);
385
            break;
386 387
        case RESOURCE_DEQ:
            handle_resource_deq_rc(ns,b,m,lp);
388
            break;
389 390
        case RESOURCE_RESERVE:
            handle_resource_reserve_rc(ns,b,m,lp);
391 392
            break;
        default:
393
            assert(0);
394 395 396 397 398 399
    }
}

void resource_finalize(
        resource_state * ns,
        tw_lp * lp){
400
    struct qlist_head *ent;
401 402
    for (int i = 0; i < MAX_RESERVE+1; i++){
        qlist_for_each(ent, &ns->pending[i]){
Jonathan Jenkins's avatar
Jonathan Jenkins committed
403 404
            fprintf(stderr, "WARNING: resource LP %llu has a pending allocation\n",
                    LLU(lp->gid));
405 406 407
        }
    }

408
    char *out_buf = (char*)malloc(1<<12);
409
    int written;
410 411
    // see if I'm the "first" resource (currently doing it globally)
    if (codes_mapping_get_lp_relative_id(lp->gid, 0, 0) == 0){
412
        written = sprintf(out_buf,
413
                "# format: <LP> <max used general> <max used token...>\n");
414
        lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
415
    }
Jonathan Jenkins's avatar
Jonathan Jenkins committed
416
    written = sprintf(out_buf, "%llu", LLU(lp->gid));
417

418
    // compute peak resource usage
419
    // TODO: wrap this up in the resource interface
Jonathan Jenkins's avatar
Jonathan Jenkins committed
420 421
    for (unsigned i = 0; i < ns->r.num_tokens+1; i++){
        written += sprintf(out_buf+written, " %llu", LLU(ns->r.max[i]-ns->r.min_avail[i]));
422
    }
423
    written += sprintf(out_buf+written, "\n");
424
    lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
425 426 427 428 429 430 431 432
}

/**** END IMPLEMENTATIONS ****/

/**** BEGIN USER-FACING FUNCTIONS ****/
void resource_lp_init(){
    uint32_t h1=0, h2=0;

433
    bj_hashlittle2(RESOURCE_LP_NM, strlen(RESOURCE_LP_NM), &h1, &h2);
434 435
    resource_magic = h1+h2;

436
    lp_type_register(RESOURCE_LP_NM, &resource_lp);
437 438 439
}

void resource_lp_configure(){
440 441 442

    anno_map = codes_mapping_get_lp_anno_map(RESOURCE_LP_NM);
    avail_per_anno = (anno_map->num_annos > 0) ?
443
        (uint64_t*)malloc(anno_map->num_annos * sizeof(*avail_per_anno)) :
444 445
            NULL;
    // get the unannotated version
446
    long int avail;
447
    int ret;
448
    if (anno_map->has_unanno_lp > 0){
449
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
450
            "available", NULL, &avail);
451 452 453 454 455 456 457 458 459
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available for "
                    "resource LP\n");
            exit(1);
        }
        assert(avail > 0);
        avail_unanno = (uint64_t)avail;
    }
Jonathan Jenkins's avatar
Jonathan Jenkins committed
460
    for (int i = 0; i < anno_map->num_annos; i++){
461
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
462
            "available", anno_map->annotations[i].ptr, &avail);
463 464 465
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available@%s for "
466
                    "resource LP\n", anno_map->annotations[i].ptr);
467 468 469 470
            exit(1);
        }
        assert(avail > 0);
        avail_per_anno[i] = (uint64_t)avail;
471 472 473
    }
}

474
static void resource_lp_issue_event_base(
475
        enum resource_event type,
476 477
        uint64_t req,
        resource_token_t tok, /* only used in reserve_get/free */
478
        int block_on_unavail,
479
        tw_lp *sender,
480
        struct codes_mctx const * map_ctx,
481 482
        int return_tag,
        msg_header const *return_header,
483 484 485 486 487 488
        struct codes_cb_info const *cb)
{
    if (cb)
        SANITY_CHECK_CB(cb, resource_return);

    tw_lpid resource_lpid =
489
        codes_mctx_to_lpid(map_ctx, RESOURCE_LP_NM, sender->gid);
490 491

    tw_event *e = tw_event_new(resource_lpid, codes_local_latency(sender),
492 493
            sender);

494 495
    resource_msg *m = tw_event_data(e);

496 497 498 499
    msg_set_header(resource_magic, type, sender->gid, &m->i.h);
    m->i.req = req;
    m->i.tok = tok;
    m->i.block_on_unavail = block_on_unavail;
500
    if (map_ctx != NULL && cb != NULL && return_header != NULL) {
501
        m->i.cb.info = *cb;
502 503
        m->i.cb.h = *return_header;
        m->i.cb.tag = return_tag;
504 505
    }

506 507 508
    tw_event_send(e);
}

509
void resource_lp_get(
510 511 512
        uint64_t req,
        int block_on_unavail,
        tw_lp *sender,
513
        struct codes_mctx const * map_ctx,
514 515
        int return_tag,
        msg_header const *return_header,
516 517 518
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_GET, req, 0, block_on_unavail,
519
            sender, map_ctx, return_tag, return_header, cb);
520 521 522
}

/* no callback for frees thus far */
523 524 525 526 527 528 529
void resource_lp_free(
        uint64_t req,
        tw_lp *sender,
        struct codes_mctx const * map_ctx)
{
    resource_lp_issue_event_base(RESOURCE_FREE, req, 0, -1, sender, map_ctx,
            0, NULL, NULL);
530 531 532
}
void resource_lp_reserve(
        uint64_t req,
533
        int block_on_unavail,
534 535
        tw_lp *sender,
        struct codes_mctx const * map_ctx,
536 537
        int return_tag,
        msg_header const *return_header,
538 539 540
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_RESERVE, req, 0, block_on_unavail,
541
            sender, map_ctx, return_tag, return_header, cb);
542 543 544 545
}
void resource_lp_get_reserved(
        uint64_t req,
        resource_token_t tok,
546
        int block_on_unavail,
547 548
        tw_lp *sender,
        struct codes_mctx const * map_ctx,
549 550
        int return_tag,
        msg_header const *return_header,
551 552 553
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_GET, req, tok, block_on_unavail,
554
            sender, map_ctx, return_tag, return_header, cb);
555 556
}
void resource_lp_free_reserved(
557
        uint64_t req,
558
        resource_token_t tok,
559 560 561 562 563
        tw_lp *sender,
        struct codes_mctx const * map_ctx)
{
    resource_lp_issue_event_base(RESOURCE_FREE, req, tok, -1,
            sender, map_ctx, 0, NULL, NULL);
564 565
}

566
/* rc functions - thankfully, they only use codes-local-latency, so no need
567 568
 * to pass in any arguments */

569
static void resource_lp_issue_event_base_rc(tw_lp *sender){
570 571 572 573
    codes_local_latency_reverse(sender);
}

void resource_lp_get_rc(tw_lp *sender){
574
    resource_lp_issue_event_base_rc(sender);
575 576
}
void resource_lp_free_rc(tw_lp *sender){
577
    resource_lp_issue_event_base_rc(sender);
578 579
}
void resource_lp_reserve_rc(tw_lp *sender){
580
    resource_lp_issue_event_base_rc(sender);
581 582
}
void resource_lp_get_reserved_rc(tw_lp *sender){
583
    resource_lp_issue_event_base_rc(sender);
584 585
}
void resource_lp_free_reserved_rc(tw_lp *sender){
586
    resource_lp_issue_event_base_rc(sender);
587 588 589 590 591 592 593 594 595 596
}

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ts=8 sts=4 sw=4 expandtab
 */