resource-lp.c 16.4 KB
Newer Older
1 2 3 4 5 6
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
*/

7 8 9 10 11 12 13 14 15
#include <codes/codes-callback.h>
#include <codes/resource-lp.h>
#include <codes/resource.h>
#include <codes/codes_mapping.h>
#include <codes/configuration.h>
#include <codes/jenkins-hash.h>
#include <codes/quicklist.h>
#include <codes/lp-io.h>
#include <ross.h>
16 17
#include <assert.h>
#include <stdio.h>
18
#include <string.h>
19 20 21 22 23


/**** BEGIN SIMULATION DATA STRUCTURES ****/

static int resource_magic; /* use this as sanity check on events */
24 25 26 27 28

/* configuration globals (will be consumed by LP when they init) */
static uint64_t avail_unanno;
static uint64_t *avail_per_anno;
static const config_anno_map_t *anno_map;
29 30 31

typedef struct resource_state resource_state;
typedef struct resource_msg resource_msg;
32
typedef struct pending_op pending_op;
33 34 35 36 37 38

#define TOKEN_DUMMY ((resource_token_t)-1)

/* event types */
enum resource_event
{
39
    RESOURCE_GET = 100,
40
    RESOURCE_FREE,
41
    RESOURCE_DEQ,
42 43 44 45 46
    RESOURCE_RESERVE,
};

struct resource_state {
    resource r;
47 48 49 50 51 52
    /* pending operations - if OOM and we are using the 'blocking' method, 
     * then need to stash parameters.
     * Index 0 is the general pool, index 1.. are the reservation-specific
     * pools. We take advantage of resource_token_t's status as a simple 
     * array index to do the proper indexing */
    struct qlist_head pending[MAX_RESERVE+1];
53 54
};

55 56 57
/* following struct exists because we want to basically cache a message within
 * a message for rc (ewww) */
struct resource_msg_internal{
58 59 60 61
    msg_header h;
    /* request data */
    uint64_t req;
    resource_token_t tok; /* only for reserved calls */
62 63 64 65
    /* behavior when sending response to caller
     * 0 - send the callback immediately if resource unavailable. 
     * 1 - send the callback when memory is available (danger - deadlock
     * possible) */
66
    int block_on_unavail;
67
    /* callback data */
68 69
    struct codes_cb_params cb;
};
70 71 72

struct resource_msg {
    struct resource_msg_internal i, i_rc;
73 74 75
    // for RC (asides from the message itself): the previous minimum resource
    // value
    uint64_t min_avail_rc;
76 77 78 79 80
};

struct pending_op {
    struct resource_msg_internal m;
    struct qlist_head ql;
81 82 83 84 85 86 87 88
};

/**** END SIMULATION DATA STRUCTURES ****/

/**** BEGIN LP, EVENT PROCESSING FUNCTION DECLS ****/

/* ROSS LP processing functions */  
static void resource_lp_ind_init(
89 90
        resource_state * ns,
        tw_lp * lp);
91
static void resource_event_handler(
92 93 94 95
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
96
static void resource_rev_handler(
97 98 99 100
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
101
static void resource_finalize(
102 103
        resource_state * ns,
        tw_lp * lp);
104 105 106

/* ROSS function pointer table for this LP */
static tw_lptype resource_lp = {
107 108 109 110 111 112 113
    (init_f) resource_lp_ind_init,
    (pre_run_f) NULL,
    (event_f) resource_event_handler,
    (revent_f) resource_rev_handler,
    (final_f)  resource_finalize, 
    (map_f) codes_mapping,
    sizeof(resource_state),
114 115 116 117 118 119 120 121 122
};

/**** END LP, EVENT PROCESSING FUNCTION DECLS ****/

/**** BEGIN IMPLEMENTATIONS ****/

void resource_lp_ind_init(
        resource_state * ns,
        tw_lp * lp){
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
    // get my annotation
    const char * anno = codes_mapping_get_annotation_by_lpid(lp->gid);
    if (anno == NULL){
        resource_init(avail_unanno, &ns->r);
    }
    else{
        int idx = configuration_get_annotation_index(anno, anno_map);
        if (idx < 0){
            tw_error("resource LP %lu: unable to find annotation "
                    "%s in configuration\n", lp->gid, anno);
        }
        else{
            resource_init(avail_per_anno[idx], &ns->r);
        }
    }
138 139 140 141 142 143 144
    int i;
    for (i = 0; i < MAX_RESERVE+1; i++){
        INIT_QLIST_HEAD(&ns->pending[i]);
    }
}

static void resource_response(
145
        struct codes_cb_params const * p,
146 147
        tw_lp *lp,
        int ret,
148 149 150 151 152 153 154 155 156 157 158 159 160
        resource_token_t tok)
{
    SANITY_CHECK_CB(&p->info, resource_return);

    tw_event *e = tw_event_new(p->h.src, codes_local_latency(lp), lp);
    void * m = tw_event_data(e);

    GET_INIT_CB_PTRS(p, m, lp->gid, h, tag, rc, resource_return);

    rc->ret = ret;
    rc->tok = tok;

    tw_event_send(e);
161
}
162

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
static void resource_response_rc(tw_lp *lp){
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
 * c0 - enqueued a message 
 * c1 - sent an ack 
 * c2 - successfully got the resource */
static void handle_resource_get(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    int ret = 1;
    int send_ack = 1;
178 179
    // save the previous minimum for RC
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
180 181 182 183 184 185 186
    if (!qlist_empty(&ns->pending[m->i.tok]) || 
            (ret = resource_get(m->i.req, m->i.tok, &ns->r))){
        /* failed to receive data */
        assert(ret != 2);
        if (m->i.block_on_unavail){
            /* queue up operation, save til later */
            b->c0 = 1;
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
187
            pending_op *op = (pending_op*)malloc(sizeof(pending_op));
188 189
            op->m = m->i; /* no need to set rc msg here */
            qlist_add_tail(&op->ql, &ns->pending[m->i.tok]);
190
            send_ack = 0;
191 192 193 194
        }
    }
    if (send_ack){
        b->c1 = 1;
195
        resource_response(&m->i.cb, lp, ret, TOKEN_DUMMY);
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
    }

    b->c2 = !ret;
}

/* bitfield usage:
 * c0 - enqueued a message 
 * c1 - sent an ack 
 * c2 - successfully got the resource */
static void handle_resource_get_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (b->c0){
        assert(!qlist_empty(&ns->pending[m->i.tok]));
        struct qlist_head *ql = qlist_pop_back(&ns->pending[m->i.tok]);
        free(qlist_entry(ql, pending_op, ql));
    }
    else if (b->c1){
        resource_response_rc(lp);
    }

    if (b->c2){
220 221
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r));
        assert(!resource_free(m->i.req, m->i.tok, &ns->r));
222 223 224 225 226 227 228 229 230 231 232
    }
}

static void handle_resource_free(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    assert(!resource_free(m->i.req, m->i.tok, &ns->r));
    /* create an event to pop the next queue item */
    tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp);
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
233
    resource_msg *m_deq = (resource_msg*)tw_event_data(e);
234 235 236 237 238 239 240 241 242 243 244 245 246 247
    msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
    m_deq->i.tok = m->i.tok; /* only tok is needed, all others grabbed from q */
    tw_event_send(e);
}
static void handle_resource_free_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    assert(!resource_get(m->i.req, m->i.tok, &ns->r));
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
248 249
 * c0 - queue was empty to begin with
 * c1 - assuming !c0, alloc succeeded */ 
250 251 252 253 254 255 256
static void handle_resource_deq(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (qlist_empty(&ns->pending[m->i.tok])){
        /* nothing to do */
257
        b->c0 = 1;
258 259 260 261 262
        return;
    }

    struct qlist_head *front = ns->pending[m->i.tok].next;
    pending_op *p = qlist_entry(front, pending_op, ql);
263
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
264 265 266
    int ret = resource_get(p->m.req, p->m.tok, &ns->r);
    assert(ret != 2);
    if (!ret){
267
        b->c1 = 1;
268 269 270
        /* success, dequeue (saving as rc) and send to client */
        qlist_del(front);
        m->i_rc = p->m;
271
        resource_response(&p->m.cb, lp, ret, TOKEN_DUMMY);
272 273 274
        free(p);
        /* additionally attempt to dequeue next one down */
        tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp);
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
275
        resource_msg *m_deq = (resource_msg*)tw_event_data(e);
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
        msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
        /* only tok is needed, all others grabbed from q */
        m_deq->i.tok = m->i.tok; 
        tw_event_send(e);
    }
    /* else do nothing */
}

/* bitfield usage:
 * c0 - dequeue+alloc success */ 
static void handle_resource_deq_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
291
    if (b->c0){
292 293 294
        return;
    }

295
    if (b->c1){
296
        /* add operation back to the front of the queue */
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
297
        pending_op *op = (pending_op*)malloc(sizeof(pending_op));
298 299 300
        op->m = m->i_rc;
        qlist_add(&op->ql, &ns->pending[m->i.tok]);
        resource_response_rc(lp);
301
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r)); 
Jonathan Jenkins's avatar
Jonathan Jenkins committed
302
        assert(!resource_free(op->m.req, op->m.tok, &ns->r));
303 304 305 306 307 308 309 310 311 312 313 314 315
        /* reverse "deq next" op */
        codes_local_latency_reverse(lp);
    }
}

static void handle_resource_reserve(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    resource_token_t tok;
    int ret = resource_reserve(m->i.req, &tok, &ns->r);
    assert(!ret);
316
    resource_response(&m->i.cb, lp, ret, tok);
317 318 319 320 321 322 323 324 325 326 327 328
}
static void handle_resource_reserve_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    /* this reversal method is essentially a hack that relies on each
     * sequential reserve appending to the end of the list 
     * - we expect reserves to happen strictly at the beginning of the
     *   simulation */
    ns->r.num_tokens--;
    resource_response_rc(lp);
329 330 331 332 333 334 335
}

void resource_event_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
336 337
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
338
        case RESOURCE_GET:
339
            handle_resource_get(ns,b,m,lp);
340 341
            break;
        case RESOURCE_FREE:
342
            handle_resource_free(ns,b,m,lp);
343
            break;
344 345
        case RESOURCE_DEQ:
            handle_resource_deq(ns,b,m,lp);
346
            break;
347 348
        case RESOURCE_RESERVE:
            handle_resource_reserve(ns,b,m,lp);
349 350
            break;
        default:
351
            assert(0);
352 353 354 355 356 357 358
    }
}
void resource_rev_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
359 360
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
361
        case RESOURCE_GET:
362
            handle_resource_get_rc(ns,b,m,lp);
363 364
            break;
        case RESOURCE_FREE:
365
            handle_resource_free_rc(ns,b,m,lp);
366
            break;
367 368
        case RESOURCE_DEQ:
            handle_resource_deq_rc(ns,b,m,lp);
369
            break;
370 371
        case RESOURCE_RESERVE:
            handle_resource_reserve_rc(ns,b,m,lp);
372 373
            break;
        default:
374
            assert(0);
375 376 377 378 379 380
    }
}

void resource_finalize(
        resource_state * ns,
        tw_lp * lp){
381
    struct qlist_head *ent;
382 383 384 385 386 387 388
    for (int i = 0; i < MAX_RESERVE+1; i++){
        qlist_for_each(ent, &ns->pending[i]){
            fprintf(stderr, "WARNING: resource LP %lu has a pending allocation\n",
                    lp->gid);
        }
    }

Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
389
    char *out_buf = (char*)malloc(1<<12);
390
    int written;
391 392
    // see if I'm the "first" resource (currently doing it globally)
    if (codes_mapping_get_lp_relative_id(lp->gid, 0, 0) == 0){
393 394
        written = sprintf(out_buf, 
                "# format: <LP> <max used general> <max used token...>\n");
395
        lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
396 397 398
    }
    written = sprintf(out_buf, "%lu", lp->gid);

399
    // compute peak resource usage
400 401 402
    // TODO: wrap this up in the resource interface
    for (int i = 0; i < ns->r.num_tokens+1; i++){
        written += sprintf(out_buf+written, " %lu", ns->r.max[i]-ns->r.min_avail[i]);
403
    }
404
    written += sprintf(out_buf+written, "\n");
405
    lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
406 407 408 409 410 411 412 413
}

/**** END IMPLEMENTATIONS ****/

/**** BEGIN USER-FACING FUNCTIONS ****/
void resource_lp_init(){
    uint32_t h1=0, h2=0;

414
    bj_hashlittle2(RESOURCE_LP_NM, strlen(RESOURCE_LP_NM), &h1, &h2);
415 416
    resource_magic = h1+h2;

417
    lp_type_register(RESOURCE_LP_NM, &resource_lp);
418 419 420
}

void resource_lp_configure(){
421 422 423

    anno_map = codes_mapping_get_lp_anno_map(RESOURCE_LP_NM);
    avail_per_anno = (anno_map->num_annos > 0) ?
Elsa Gonsiorowski (Uranus)'s avatar
Elsa Gonsiorowski (Uranus) committed
424
        (uint64_t*)malloc(anno_map->num_annos * sizeof(*avail_per_anno)) :
425 426
            NULL;
    // get the unannotated version
427
    long int avail;
428
    int ret;
429
    if (anno_map->has_unanno_lp > 0){
430
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
431
            "available", NULL, &avail);
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available for "
                    "resource LP\n");
            exit(1);
        }
        assert(avail > 0);
        avail_unanno = (uint64_t)avail;
    }
    for (uint64_t i = 0; i < anno_map->num_annos; i++){
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
            "available", anno_map->annotations[i], &avail);
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available@%s for "
                    "resource LP\n", anno_map->annotations[i]);
            exit(1);
        }
        assert(avail > 0);
        avail_per_anno[i] = (uint64_t)avail;
452 453 454
    }
}

455
static void resource_lp_issue_event_base(
456
        enum resource_event type,
457 458
        uint64_t req,
        resource_token_t tok, /* only used in reserve_get/free */
459
        int block_on_unavail,
460
        tw_lp *sender,
461
        struct codes_mctx const * map_ctx,
462 463
        int return_tag,
        msg_header const *return_header,
464 465 466 467 468 469
        struct codes_cb_info const *cb)
{
    if (cb)
        SANITY_CHECK_CB(cb, resource_return);

    tw_lpid resource_lpid =
470
        codes_mctx_to_lpid(map_ctx, RESOURCE_LP_NM, sender->gid);
471 472

    tw_event *e = tw_event_new(resource_lpid, codes_local_latency(sender),
473 474
            sender);

475 476
    resource_msg *m = tw_event_data(e);

477 478 479 480
    msg_set_header(resource_magic, type, sender->gid, &m->i.h);
    m->i.req = req;
    m->i.tok = tok;
    m->i.block_on_unavail = block_on_unavail;
481
    if (map_ctx != NULL && cb != NULL && return_header != NULL) {
482
        m->i.cb.info = *cb;
483 484
        m->i.cb.h = *return_header;
        m->i.cb.tag = return_tag;
485 486
    }

487 488 489
    tw_event_send(e);
}

490
void resource_lp_get(
491 492 493
        uint64_t req,
        int block_on_unavail,
        tw_lp *sender,
494
        struct codes_mctx const * map_ctx,
495 496
        int return_tag,
        msg_header const *return_header,
497 498 499
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_GET, req, 0, block_on_unavail,
500
            sender, map_ctx, return_tag, return_header, cb);
501 502 503
}

/* no callback for frees thus far */
504 505 506 507 508 509 510
void resource_lp_free(
        uint64_t req,
        tw_lp *sender,
        struct codes_mctx const * map_ctx)
{
    resource_lp_issue_event_base(RESOURCE_FREE, req, 0, -1, sender, map_ctx,
            0, NULL, NULL);
511 512 513
}
void resource_lp_reserve(
        uint64_t req,
514
        int block_on_unavail,
515 516
        tw_lp *sender,
        struct codes_mctx const * map_ctx,
517 518
        int return_tag,
        msg_header const *return_header,
519 520 521
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_RESERVE, req, 0, block_on_unavail,
522
            sender, map_ctx, return_tag, return_header, cb);
523 524 525 526
}
void resource_lp_get_reserved(
        uint64_t req,
        resource_token_t tok,
527
        int block_on_unavail,
528 529
        tw_lp *sender,
        struct codes_mctx const * map_ctx,
530 531
        int return_tag,
        msg_header const *return_header,
532 533 534
        struct codes_cb_info const *cb)
{
    resource_lp_issue_event_base(RESOURCE_GET, req, tok, block_on_unavail,
535
            sender, map_ctx, return_tag, return_header, cb);
536 537
}
void resource_lp_free_reserved(
538
        uint64_t req,
539
        resource_token_t tok,
540 541 542 543 544
        tw_lp *sender,
        struct codes_mctx const * map_ctx)
{
    resource_lp_issue_event_base(RESOURCE_FREE, req, tok, -1,
            sender, map_ctx, 0, NULL, NULL);
545 546 547 548 549
}

/* rc functions - thankfully, they only use codes-local-latency, so no need 
 * to pass in any arguments */

550
static void resource_lp_issue_event_base_rc(tw_lp *sender){
551 552 553 554
    codes_local_latency_reverse(sender);
}

void resource_lp_get_rc(tw_lp *sender){
555
    resource_lp_issue_event_base_rc(sender);
556 557
}
void resource_lp_free_rc(tw_lp *sender){
558
    resource_lp_issue_event_base_rc(sender);
559 560
}
void resource_lp_reserve_rc(tw_lp *sender){
561
    resource_lp_issue_event_base_rc(sender);
562 563
}
void resource_lp_get_reserved_rc(tw_lp *sender){
564
    resource_lp_issue_event_base_rc(sender);
565 566
}
void resource_lp_free_reserved_rc(tw_lp *sender){
567
    resource_lp_issue_event_base_rc(sender);
568 569 570 571 572 573 574 575 576 577
}

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ts=8 sts=4 sw=4 expandtab
 */