resource-lp.c 19.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
/*
 * Copyright (C) 2014 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
*/

#include "codes/resource-lp.h"
#include "codes/resource.h"
#include "codes/codes_mapping.h"
10
#include "codes/configuration.h"
11
#include "codes/jenkins-hash.h"
12
#include "codes/quicklist.h"
13
#include "codes/lp-io.h"
14
15
16
#include "ross.h"
#include <assert.h>
#include <stdio.h>
17
#include <string.h>
18
19
20
21
22


/**** BEGIN SIMULATION DATA STRUCTURES ****/

static int resource_magic; /* use this as sanity check on events */
23
24
25
26
27

/* configuration globals (will be consumed by LP when they init) */
static uint64_t avail_unanno;
static uint64_t *avail_per_anno;
static const config_anno_map_t *anno_map;
28
29
30

typedef struct resource_state resource_state;
typedef struct resource_msg resource_msg;
31
typedef struct pending_op pending_op;
32
33
34
35
36
37

#define TOKEN_DUMMY ((resource_token_t)-1)

/* event types */
enum resource_event
{
38
    RESOURCE_GET = 100,
39
    RESOURCE_FREE,
40
    RESOURCE_DEQ,
41
42
43
44
45
    RESOURCE_RESERVE,
};

struct resource_state {
    resource r;
46
47
48
49
50
51
    /* pending operations - if OOM and we are using the 'blocking' method, 
     * then need to stash parameters.
     * Index 0 is the general pool, index 1.. are the reservation-specific
     * pools. We take advantage of resource_token_t's status as a simple 
     * array index to do the proper indexing */
    struct qlist_head pending[MAX_RESERVE+1];
52
53
};

54
55
56
/* following struct exists because we want to basically cache a message within
 * a message for rc (ewww) */
struct resource_msg_internal{
57
58
59
60
    msg_header h;
    /* request data */
    uint64_t req;
    resource_token_t tok; /* only for reserved calls */
61
62
63
64
65
    /* behavior when sending response to caller
     * 0 - send the callback immediately if resource unavailable. 
     * 1 - send the callback when memory is available (danger - deadlock
     * possible) */
    int block_on_unavail; 
66
67
68
69
70
    /* callback data */
    msg_header h_callback;
    int msg_size;
    int msg_header_offset;
    int msg_callback_offset;
71
72
73
74
    /* user-provided data */
    int msg_callback_misc_size;
    int msg_callback_misc_offset;
    char msg_callback_misc[RESOURCE_MAX_CALLBACK_PAYLOAD];
75
76
77
78
}; 

struct resource_msg {
    struct resource_msg_internal i, i_rc;
79
80
81
    // for RC (asides from the message itself): the previous minimum resource
    // value
    uint64_t min_avail_rc;
82
83
84
85
86
};

struct pending_op {
    struct resource_msg_internal m;
    struct qlist_head ql;
87
88
89
90
91
92
93
94
};

/**** END SIMULATION DATA STRUCTURES ****/

/**** BEGIN LP, EVENT PROCESSING FUNCTION DECLS ****/

/* ROSS LP processing functions */  
static void resource_lp_ind_init(
95
96
        resource_state * ns,
        tw_lp * lp);
97
static void resource_event_handler(
98
99
100
101
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
102
static void resource_rev_handler(
103
104
105
106
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp);
107
static void resource_finalize(
108
109
        resource_state * ns,
        tw_lp * lp);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127

/* ROSS function pointer table for this LP */
static tw_lptype resource_lp = {
     (init_f) resource_lp_ind_init,
     (event_f) resource_event_handler,
     (revent_f) resource_rev_handler,
     (final_f)  resource_finalize, 
     (map_f) codes_mapping,
     sizeof(resource_state),
};

/**** END LP, EVENT PROCESSING FUNCTION DECLS ****/

/**** BEGIN IMPLEMENTATIONS ****/

void resource_lp_ind_init(
        resource_state * ns,
        tw_lp * lp){
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
    // get my annotation
    const char * anno = codes_mapping_get_annotation_by_lpid(lp->gid);
    if (anno == NULL){
        resource_init(avail_unanno, &ns->r);
    }
    else{
        int idx = configuration_get_annotation_index(anno, anno_map);
        if (idx < 0){
            tw_error("resource LP %lu: unable to find annotation "
                    "%s in configuration\n", lp->gid, anno);
        }
        else{
            resource_init(avail_per_anno[idx], &ns->r);
        }
    }
143
144
145
146
147
148
149
    int i;
    for (i = 0; i < MAX_RESERVE+1; i++){
        INIT_QLIST_HEAD(&ns->pending[i]);
    }
}

static void resource_response(
Jonathan Jenkins's avatar
oops!    
Jonathan Jenkins committed
150
        struct resource_msg_internal *m,
151
152
153
154
155
        tw_lp *lp,
        int ret,
        resource_token_t tok){
    /* send return message */
    msg_header h;
156
    msg_set_header(m->h_callback.magic, m->h_callback.event_type, 
157
158
159
160
161
162
163
            lp->gid, &h);

    resource_callback c;
    c.ret = ret;
    c.tok = tok;

    /* before we send the message, sanity check the sizes */
164
165
166
167
    if (m->msg_size >= m->msg_header_offset+sizeof(h) &&
            m->msg_size >= m->msg_callback_offset+sizeof(c) &&
            m->msg_size >= m->msg_callback_offset+m->msg_callback_misc_size){
        tw_event *e = codes_event_new(m->h_callback.src, 
168
169
                codes_local_latency(lp), lp);
        void *msg = tw_event_data(e);
170
171
172
173
174
        memcpy(((char*)msg)+m->msg_header_offset, &h, sizeof(h));
        memcpy(((char*)msg)+m->msg_callback_offset, &c, sizeof(c));
        if (m->msg_callback_misc_size > 0){
            memcpy(((char*)msg)+m->msg_callback_misc_offset, 
                        m->msg_callback_misc, m->msg_callback_misc_size);
175
        }
176
177
178
179
        tw_event_send(e);
    }
    else{
        tw_error(TW_LOC, 
180
                "message size not large enough to hold header/callback/misc"
181
182
183
184
                " structures\n"
                "msg size: %3d, header   off/size:  %d, %d\n"
                "               callback off/size:  %d, %d\n"
                "               callback misc size: %d",
185
186
187
                m->msg_size, m->msg_header_offset, (int)sizeof(h),
                m->msg_callback_offset, (int)sizeof(c),
                m->msg_callback_misc_size);
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
    }
}
static void resource_response_rc(tw_lp *lp){
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
 * c0 - enqueued a message 
 * c1 - sent an ack 
 * c2 - successfully got the resource */
static void handle_resource_get(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    int ret = 1;
    int send_ack = 1;
205
206
    // save the previous minimum for RC
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
207
208
209
210
211
212
213
214
215
216
    if (!qlist_empty(&ns->pending[m->i.tok]) || 
            (ret = resource_get(m->i.req, m->i.tok, &ns->r))){
        /* failed to receive data */
        assert(ret != 2);
        if (m->i.block_on_unavail){
            /* queue up operation, save til later */
            b->c0 = 1;
            pending_op *op = malloc(sizeof(pending_op));
            op->m = m->i; /* no need to set rc msg here */
            qlist_add_tail(&op->ql, &ns->pending[m->i.tok]);
217
            send_ack = 0;
218
219
220
221
        }
    }
    if (send_ack){
        b->c1 = 1;
222
        resource_response(&m->i, lp, ret, TOKEN_DUMMY);
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
    }

    b->c2 = !ret;
}

/* bitfield usage:
 * c0 - enqueued a message 
 * c1 - sent an ack 
 * c2 - successfully got the resource */
static void handle_resource_get_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (b->c0){
        assert(!qlist_empty(&ns->pending[m->i.tok]));
        struct qlist_head *ql = qlist_pop_back(&ns->pending[m->i.tok]);
        free(qlist_entry(ql, pending_op, ql));
    }
    else if (b->c1){
        resource_response_rc(lp);
    }

    if (b->c2){
247
248
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r));
        assert(!resource_free(m->i.req, m->i.tok, &ns->r));
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
    }
}

static void handle_resource_free(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    assert(!resource_free(m->i.req, m->i.tok, &ns->r));
    /* create an event to pop the next queue item */
    tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp);
    resource_msg *m_deq = tw_event_data(e);
    msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
    m_deq->i.tok = m->i.tok; /* only tok is needed, all others grabbed from q */
    tw_event_send(e);
}
static void handle_resource_free_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    assert(!resource_get(m->i.req, m->i.tok, &ns->r));
    codes_local_latency_reverse(lp);
}

/* bitfield usage:
275
276
 * c0 - queue was empty to begin with
 * c1 - assuming !c0, alloc succeeded */ 
277
278
279
280
281
282
283
static void handle_resource_deq(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    if (qlist_empty(&ns->pending[m->i.tok])){
        /* nothing to do */
284
        b->c0 = 1;
285
286
287
288
289
        return;
    }

    struct qlist_head *front = ns->pending[m->i.tok].next;
    pending_op *p = qlist_entry(front, pending_op, ql);
290
    assert(!resource_get_min_avail(m->i.tok, &m->min_avail_rc, &ns->r));
291
292
293
    int ret = resource_get(p->m.req, p->m.tok, &ns->r);
    assert(ret != 2);
    if (!ret){
294
        b->c1 = 1;
295
296
297
        /* success, dequeue (saving as rc) and send to client */
        qlist_del(front);
        m->i_rc = p->m;
298
        resource_response(&p->m, lp, ret, TOKEN_DUMMY);
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
        free(p);
        /* additionally attempt to dequeue next one down */
        tw_event *e = codes_event_new(lp->gid, codes_local_latency(lp), lp);
        resource_msg *m_deq = tw_event_data(e);
        msg_set_header(resource_magic, RESOURCE_DEQ, lp->gid, &m_deq->i.h);
        /* only tok is needed, all others grabbed from q */
        m_deq->i.tok = m->i.tok; 
        tw_event_send(e);
    }
    /* else do nothing */
}

/* bitfield usage:
 * c0 - dequeue+alloc success */ 
static void handle_resource_deq_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
318
    if (b->c0){
319
320
321
        return;
    }

322
    if (b->c1){
323
324
325
326
327
        /* add operation back to the front of the queue */
        pending_op *op = malloc(sizeof(pending_op));
        op->m = m->i_rc;
        qlist_add(&op->ql, &ns->pending[m->i.tok]);
        resource_response_rc(lp);
328
        assert(!resource_restore_min_avail(m->i.tok, m->min_avail_rc, &ns->r)); 
Jonathan Jenkins's avatar
Jonathan Jenkins committed
329
        assert(!resource_free(op->m.req, op->m.tok, &ns->r));
330
331
332
333
334
335
336
337
338
339
340
341
342
        /* reverse "deq next" op */
        codes_local_latency_reverse(lp);
    }
}

static void handle_resource_reserve(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    resource_token_t tok;
    int ret = resource_reserve(m->i.req, &tok, &ns->r);
    assert(!ret);
343
    resource_response(&m->i, lp, ret, tok);
344
345
346
347
348
349
350
351
352
353
354
355
}
static void handle_resource_reserve_rc(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
    /* this reversal method is essentially a hack that relies on each
     * sequential reserve appending to the end of the list 
     * - we expect reserves to happen strictly at the beginning of the
     *   simulation */
    ns->r.num_tokens--;
    resource_response_rc(lp);
356
357
358
359
360
361
362
}

void resource_event_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
363
364
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
365
        case RESOURCE_GET:
366
            handle_resource_get(ns,b,m,lp);
367
368
            break;
        case RESOURCE_FREE:
369
            handle_resource_free(ns,b,m,lp);
370
            break;
371
372
        case RESOURCE_DEQ:
            handle_resource_deq(ns,b,m,lp);
373
            break;
374
375
        case RESOURCE_RESERVE:
            handle_resource_reserve(ns,b,m,lp);
376
377
            break;
        default:
378
            assert(0);
379
380
381
382
383
384
385
    }
}
void resource_rev_handler(
        resource_state * ns,
        tw_bf * b,
        resource_msg * m,
        tw_lp * lp){
386
387
    assert(m->i.h.magic == resource_magic);
    switch(m->i.h.event_type){
388
        case RESOURCE_GET:
389
            handle_resource_get_rc(ns,b,m,lp);
390
391
            break;
        case RESOURCE_FREE:
392
            handle_resource_free_rc(ns,b,m,lp);
393
            break;
394
395
        case RESOURCE_DEQ:
            handle_resource_deq_rc(ns,b,m,lp);
396
            break;
397
398
        case RESOURCE_RESERVE:
            handle_resource_reserve_rc(ns,b,m,lp);
399
400
            break;
        default:
401
            assert(0);
402
403
404
405
406
407
    }
}

void resource_finalize(
        resource_state * ns,
        tw_lp * lp){
408
    struct qlist_head *ent;
409
410
411
412
413
414
415
416
417
    for (int i = 0; i < MAX_RESERVE+1; i++){
        qlist_for_each(ent, &ns->pending[i]){
            fprintf(stderr, "WARNING: resource LP %lu has a pending allocation\n",
                    lp->gid);
        }
    }

    char *out_buf = malloc(1<<12);
    int written;
418
419
    // see if I'm the "first" resource (currently doing it globally)
    if (codes_mapping_get_lp_relative_id(lp->gid, 0, 0) == 0){
420
421
        written = sprintf(out_buf, 
                "# format: <LP> <max used general> <max used token...>\n");
422
        lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
423
424
425
    }
    written = sprintf(out_buf, "%lu", lp->gid);

426
    // compute peak resource usage
427
428
429
    // TODO: wrap this up in the resource interface
    for (int i = 0; i < ns->r.num_tokens+1; i++){
        written += sprintf(out_buf+written, " %lu", ns->r.max[i]-ns->r.min_avail[i]);
430
    }
431
    written += sprintf(out_buf+written, "\n");
432
    lp_io_write(lp->gid, RESOURCE_LP_NM, written, out_buf);
433
434
435
436
437
438
439
440
}

/**** END IMPLEMENTATIONS ****/

/**** BEGIN USER-FACING FUNCTIONS ****/
void resource_lp_init(){
    uint32_t h1=0, h2=0;

441
    bj_hashlittle2(RESOURCE_LP_NM, strlen(RESOURCE_LP_NM), &h1, &h2);
442
443
    resource_magic = h1+h2;

444
    lp_type_register(RESOURCE_LP_NM, &resource_lp);
445
446
447
}

void resource_lp_configure(){
448
449
450
451
452
453

    anno_map = codes_mapping_get_lp_anno_map(RESOURCE_LP_NM);
    avail_per_anno = (anno_map->num_annos > 0) ?
            malloc(anno_map->num_annos * sizeof(*avail_per_anno)) :
            NULL;
    // get the unannotated version
454
    long int avail;
455
    int ret;
456
    if (anno_map->has_unanno_lp > 0){
457
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
458
            "available", NULL, &avail);
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available for "
                    "resource LP\n");
            exit(1);
        }
        assert(avail > 0);
        avail_unanno = (uint64_t)avail;
    }
    for (uint64_t i = 0; i < anno_map->num_annos; i++){
        ret = configuration_get_value_longint(&config, RESOURCE_LP_NM,
            "available", anno_map->annotations[i], &avail);
        if (ret){
            fprintf(stderr,
                    "Could not find section:resource value:available@%s for "
                    "resource LP\n", anno_map->annotations[i]);
            exit(1);
        }
        assert(avail > 0);
        avail_per_anno[i] = (uint64_t)avail;
479
480
481
482
483
484
485
    }
}

static void resource_lp_issue_event(
        msg_header *header,
        uint64_t req,
        resource_token_t tok, /* only used in reserve_get/free */
486
        int block_on_unavail,
487
488
489
        int msg_size,
        int msg_header_offset,
        int msg_callback_offset,
490
491
492
        int msg_callback_misc_size,
        int msg_callback_misc_offset,
        void *msg_callback_misc_data,
493
494
495
496
497
498
        enum resource_event type,
        tw_lp *sender){

    tw_lpid resource_lpid;

    /* map out the lpid of the resource */
499
500
501
502
503
504
    int mapping_rep_id, mapping_offset, dummy;
    char lp_group_name[MAX_NAME_LENGTH];
    // TODO: currently ignoring annotations... perhaps give annotation as a
    // parameter?
    codes_mapping_get_lp_info(sender->gid, lp_group_name, &dummy,
            NULL, &dummy, NULL,
505
            &mapping_rep_id, &mapping_offset);
506
507
    codes_mapping_get_lp_id(lp_group_name, RESOURCE_LP_NM, NULL, 1,
            mapping_rep_id, mapping_offset, &resource_lpid); 
508
509
510
511
512
513

    tw_event *e = codes_event_new(resource_lpid, codes_local_latency(sender),
            sender);

    /* set message info */
    resource_msg *m = tw_event_data(e);
514
515
516
517
    msg_set_header(resource_magic, type, sender->gid, &m->i.h);
    m->i.req = req;
    m->i.tok = tok;
    m->i.block_on_unavail = block_on_unavail;
518
519

    /* set callback info */
520
    if (header != NULL){
521
        m->i.h_callback = *header;
522
    }
523
524
525
    m->i.msg_size = msg_size;
    m->i.msg_header_offset = msg_header_offset;
    m->i.msg_callback_offset = msg_callback_offset;
526

527
528
529
530
531
532
533
534
535
536
537
538
    if (msg_callback_misc_size > 0){
        assert(msg_callback_misc_size <= RESOURCE_MAX_CALLBACK_PAYLOAD);
        m->i.msg_callback_misc_size = msg_callback_misc_size;
        m->i.msg_callback_misc_offset = msg_callback_misc_offset;
        memcpy(m->i.msg_callback_misc, msg_callback_misc_data,
                msg_callback_misc_size);
    }
    else{
        m->i.msg_callback_misc_size = 0;
        m->i.msg_callback_misc_offset = 0;
    }

539
540
541
542
543
544
    tw_event_send(e);
}

void resource_lp_get(
        msg_header *header,
        uint64_t req, 
545
        int block_on_unavail,
546
547
548
        int msg_size, 
        int msg_header_offset,
        int msg_callback_offset,
549
550
551
        int msg_callback_misc_size,
        int msg_callback_misc_offset,
        void *msg_callback_misc_data,
552
        tw_lp *sender){
553
    resource_lp_issue_event(header, req, 0, block_on_unavail,
554
555
556
            msg_size, msg_header_offset, msg_callback_offset,
            msg_callback_misc_size, msg_callback_misc_offset,
            msg_callback_misc_data, RESOURCE_GET, sender);
557
558
559
}

/* no callback for frees thus far */
560
void resource_lp_free(uint64_t req, tw_lp *sender){
561
    resource_lp_issue_event(NULL, req, 0, -1, -1,-1,-1, 0, 0, NULL,
562
563
564
565
566
            RESOURCE_FREE, sender);
}
void resource_lp_reserve(
        msg_header *header, 
        uint64_t req,
567
        int block_on_unavail,
568
569
570
        int msg_size,
        int msg_header_offset,
        int msg_callback_offset,
571
572
573
        int msg_callback_misc_size,
        int msg_callback_misc_offset,
        void *msg_callback_misc_data,
574
        tw_lp *sender){
575
576
577
578
    resource_lp_issue_event(header, req, 0, block_on_unavail, msg_size,
            msg_header_offset, msg_callback_offset, msg_callback_misc_size,
            msg_callback_misc_offset, msg_callback_misc_data, RESOURCE_RESERVE,
            sender);
579
580
581
582
583
}
void resource_lp_get_reserved(
        msg_header *header,
        uint64_t req,
        resource_token_t tok,
584
        int block_on_unavail,
585
586
587
        int msg_size, 
        int msg_header_offset,
        int msg_callback_offset,
588
589
590
        int msg_callback_misc_size,
        int msg_callback_misc_offset,
        void *msg_callback_misc_data,
591
        tw_lp *sender){
592
593
594
    resource_lp_issue_event(header, req, tok, block_on_unavail, msg_size,
            msg_header_offset, msg_callback_offset, msg_callback_misc_size,
            msg_callback_misc_offset, msg_callback_misc_data, RESOURCE_GET,
595
596
597
598
599
600
            sender);
}
void resource_lp_free_reserved(
        uint64_t req, 
        resource_token_t tok,
        tw_lp *sender){
601
602
    resource_lp_issue_event(NULL, req, tok, -1,-1,-1,-1, 0,0,NULL,
            RESOURCE_FREE, sender);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
}

/* rc functions - thankfully, they only use codes-local-latency, so no need 
 * to pass in any arguments */

static void resource_lp_issue_event_rc(tw_lp *sender){
    codes_local_latency_reverse(sender);
}

void resource_lp_get_rc(tw_lp *sender){
    resource_lp_issue_event_rc(sender);
}
void resource_lp_free_rc(tw_lp *sender){
    resource_lp_issue_event_rc(sender);
}
void resource_lp_reserve_rc(tw_lp *sender){
    resource_lp_issue_event_rc(sender);
}
void resource_lp_get_reserved_rc(tw_lp *sender){
    resource_lp_issue_event_rc(sender);
}
void resource_lp_free_reserved_rc(tw_lp *sender){
    resource_lp_issue_event_rc(sender);
}

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ts=8 sts=4 sw=4 expandtab
 */