margo.c 18.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19

20 21
#define MERCURY_PROGRESS_TIMEOUT_UB 100 /* 100 milliseconds */

22 23
struct margo_instance
{
24
    /* provided by caller */
25 26
    hg_context_t *hg_context;
    hg_class_t *hg_class;
27 28 29
    ABT_pool handler_pool;
    ABT_pool progress_pool;

30
    /* internal to margo for this particular instance */
31 32
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
33
    ABT_xstream progress_xstream;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
34 35 36
    int owns_progress_pool;
    ABT_xstream *rpc_xstreams;
    int num_handler_pool_threads;
37 38 39

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
40
    int waiters_in_progress_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
41
    int refcount;
42 43 44
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

45 46 47 48 49 50 51 52 53
    int table_index;
};

struct margo_handler_mapping
{
    hg_class_t *class;
    margo_instance_id mid;
};

54 55 56 57 58 59 60
struct margo_cb_arg
{
    ABT_eventual *eventual;
    margo_instance_id mid;
    char in_pool;
};

61 62 63
#define MAX_HANDLER_MAPPING 8
static int handler_mapping_table_size = 0;
static struct margo_handler_mapping handler_mapping_table[MAX_HANDLER_MAPPING] = {0};
64

65
static void hg_progress_fn(void* foo);
66
static int margo_xstream_is_in_progress_pool(margo_instance_id mid);
67 68 69 70 71 72 73 74

struct handler_entry
{
    void* fn;
    hg_handle_t handle;
    struct handler_entry *next; 
};

Jonathan Jenkins's avatar
Jonathan Jenkins committed
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
margo_instance_id margo_init(int use_progress_thread, int rpc_thread_count,
    hg_context_t *hg_context)
{
    struct margo_instance *mid = MARGO_INSTANCE_NULL;
    ABT_xstream progress_xstream = ABT_XSTREAM_NULL;
    ABT_pool progress_pool = ABT_POOL_NULL;
    ABT_xstream *rpc_xstreams = NULL;
    ABT_xstream rpc_xstream = ABT_XSTREAM_NULL;
    ABT_pool rpc_pool = ABT_POOL_NULL;
    int ret;
    int i;

    if (use_progress_thread)
    {
        ret = ABT_snoozer_xstream_create(1, &progress_pool, &progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        ret = ABT_xstream_self(&progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(progress_xstream, 1, &progress_pool);
        if (ret != ABT_SUCCESS) goto err;
    }

    if (rpc_thread_count > 0)
    {
        rpc_xstreams = malloc(rpc_thread_count * sizeof(*rpc_xstreams));
        if (rpc_xstreams == NULL) goto err;
        ret = ABT_snoozer_xstream_create(rpc_thread_count, &rpc_pool,
                rpc_xstreams);
        if (ret != ABT_SUCCESS) goto err;
    }
    else if (rpc_thread_count == 0)
    {
        ret = ABT_xstream_self(&rpc_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(rpc_xstream, 1, &rpc_pool);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        rpc_pool = progress_pool;
    }

    mid = margo_init_pool(progress_pool, rpc_pool, hg_context);
    if (mid == MARGO_INSTANCE_NULL) goto err;

Jonathan Jenkins's avatar
Jonathan Jenkins committed
123 124 125
    mid->owns_progress_pool = use_progress_thread;
    mid->progress_xstream = progress_xstream;
    mid->num_handler_pool_threads = rpc_thread_count < 0 ? 0 : rpc_thread_count;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
    mid->rpc_xstreams = rpc_xstreams;
    return mid;

err:
    if (use_progress_thread && progress_xstream != ABT_XSTREAM_NULL)
    {
        ABT_xstream_join(progress_xstream);
        ABT_xstream_free(&progress_xstream);
    }
    if (rpc_thread_count > 0 && rpc_xstreams != NULL)
    {
        for (i = 0; i < rpc_thread_count; i++)
        {
            ABT_xstream_join(rpc_xstreams[i]);
            ABT_xstream_free(&rpc_xstreams[i]);
        }
        free(rpc_xstreams);
    }
    return MARGO_INSTANCE_NULL;
}

margo_instance_id margo_init_pool(ABT_pool progress_pool, ABT_pool handler_pool,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
148
    hg_context_t *hg_context)
149 150
{
    int ret;
151 152 153
    struct margo_instance *mid;

    if(handler_mapping_table_size >= MAX_HANDLER_MAPPING)
154
        return(MARGO_INSTANCE_NULL);
155 156 157

    mid = malloc(sizeof(*mid));
    if(!mid)
158
        return(MARGO_INSTANCE_NULL);
159
    memset(mid, 0, sizeof(*mid));
160

161 162 163
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

164 165
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
166
    mid->hg_class = HG_Context_get_class(hg_context);
167
    mid->hg_context = hg_context;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
168
    mid->refcount = 1;
169

170 171 172 173 174 175 176 177
    ret = margo_timer_instance_init(mid);
    if(ret != 0)
    {
        fprintf(stderr, "Error: margo_timer_instance_init()\n");
        free(mid);
        return(MARGO_INSTANCE_NULL);
    }

178
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
179
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
180 181 182
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_thread_create()\n");
183
        free(mid);
184
        return(MARGO_INSTANCE_NULL);
185 186
    }

187 188 189 190 191 192
    handler_mapping_table[handler_mapping_table_size].mid = mid;
    handler_mapping_table[handler_mapping_table_size].class = mid->hg_class;
    mid->table_index = handler_mapping_table_size;
    handler_mapping_table_size++;

    return mid;
193 194
}

Jonathan Jenkins's avatar
Jonathan Jenkins committed
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static void margo_cleanup(margo_instance_id mid)
{
    int i;

    margo_timer_instance_finalize(mid);

    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);

    if (mid->owns_progress_pool)
    {
        ABT_xstream_join(mid->progress_xstream);
        ABT_xstream_free(&mid->progress_xstream);
    }

    if (mid->num_handler_pool_threads > 0)
    {
        for (i = 0; i < mid->num_handler_pool_threads; i++)
        {
            ABT_xstream_join(mid->rpc_xstreams[i]);
            ABT_xstream_free(&mid->rpc_xstreams[i]);
        }
        free(mid->rpc_xstreams);
    }

    free(mid);
}

223
void margo_finalize(margo_instance_id mid)
224
{
225
    int i;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
226
    int do_cleanup;
227

228
    /* tell progress thread to wrap things up */
229
    mid->hg_progress_shutdown_flag = 1;
230 231

    /* wait for it to shutdown cleanly */
232 233
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
234

235 236 237 238 239
    for(i=mid->table_index; i<(handler_mapping_table_size-1); i++)
    {
        handler_mapping_table[i] = handler_mapping_table[i+1];
    }
    handler_mapping_table_size--;
240

241 242 243 244
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
245 246
    mid->refcount--;
    do_cleanup = mid->refcount == 0;
247

Jonathan Jenkins's avatar
Jonathan Jenkins committed
248 249 250 251 252 253 254
    ABT_mutex_unlock(mid->finalize_mutex);

    /* if there was noone waiting on the finalize at the time of the finalize
     * broadcast, then we're safe to clean up. Otherwise, let the finalizer do
     * it */
    if (do_cleanup)
        margo_cleanup(mid);
255 256 257 258 259 260 261

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    int in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
262
    int do_cleanup;
263 264 265 266

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
267
    if(margo_xstream_is_in_progress_pool(mid))
268 269 270 271
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

272
        mid->waiters_in_progress_pool += in_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
273
        mid->refcount++;
274 275 276 277
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
278 279 280
        mid->refcount--;
        do_cleanup = mid->refcount == 0;

281
    ABT_mutex_unlock(mid->finalize_mutex);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
282 283 284 285

    if (do_cleanup)
        margo_cleanup(mid);

286 287 288 289
    return;
}

/* dedicated thread function to drive Mercury progress */
290
static void hg_progress_fn(void* foo)
291 292 293
{
    int ret;
    unsigned int actual_count;
294
    struct margo_instance *mid = (struct margo_instance *)foo;
295
    size_t size;
296 297
    unsigned int hg_progress_timeout = MERCURY_PROGRESS_TIMEOUT_UB;
    double next_timer_exp;
298
    int trigger_happened;
299

300
    while(!mid->hg_progress_shutdown_flag)
301
    {
302
        trigger_happened = 0;
303
        do {
304
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
305 306
            if(ret == HG_SUCCESS && actual_count > 0)
                trigger_happened = 1;
307
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
308

309 310
        if(trigger_happened)
            ABT_thread_yield();
311

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
        ABT_pool_get_total_size(mid->progress_pool, &size);
        /* Are there any other threads executing in this pool that are *not*
         * blocked on margo_wait_for_finalize()?  If so then, we can't
         * sleep here or else those threads will not get a chance to
         * execute.
         */
        if(size > mid->waiters_in_progress_pool)
        {
            //printf("DEBUG: Margo progress function running while other ULTs are eligible for execution (size: %d, waiters: %d.\n", size, mid->waiters_in_progress_pool);

            /* TODO: this is being executed more than is necessary (i.e.
             * in cases where there are other legitimate ULTs eligible
             * for execution that are not blocking on any events, Margo
             * or otherwise). Maybe we need an abt scheduling tweak here
             * to make sure that this ULT is the lowest priority in that
             * scenario.
328
             */
329
            ABT_thread_yield();
Philip Carns's avatar
Philip Carns committed
330
            HG_Progress(mid->hg_context, 0);
331 332 333 334 335
        }
        else
        {
            ret = margo_timer_get_next_expiration(mid, &next_timer_exp);
            if(ret == 0)
336
            {
337 338 339 340
                /* there is a queued timer, don't block long enough
                 * to keep this timer waiting
                 */
                if(next_timer_exp >= 0.0)
341
                {
342 343 344 345 346 347 348
                    next_timer_exp *= 1000; /* convert to milliseconds */
                    if(next_timer_exp < MERCURY_PROGRESS_TIMEOUT_UB)
                        hg_progress_timeout = (unsigned int)next_timer_exp;
                }
                else
                {
                    hg_progress_timeout = 0;
349
                }
350
            }
351
            HG_Progress(mid->hg_context, hg_progress_timeout);
352
        }
353

354
        /* check for any expired timers */
355
        margo_check_timers(mid);
356 357
    }

358
    return;
359 360
}

361
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
362
{
363
    return(&mid->handler_pool);
364 365
}

366 367 368 369 370 371 372 373 374 375 376
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}


Jonathan Jenkins's avatar
Jonathan Jenkins committed
377
static hg_return_t margo_cb(const struct hg_cb_info *info)
378 379
{
    hg_return_t hret = info->ret;
380
    struct margo_cb_arg* arg = info->arg;
381 382

    /* propagate return code out through eventual */
383
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
384
    
385 386 387
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;

388 389 390
    return(HG_SUCCESS);
}

391 392 393
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
394
} margo_forward_timeout_cb_dat;
395 396 397

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
398 399
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
400 401

    /* cancel the Mercury op if the forward timed out */
402
    HG_Cancel(timeout_cb_dat->handle);
403 404 405
    return;
}

406 407 408 409 410 411
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
412
    int ret;
413
    hg_return_t hret;
414 415
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
416 417
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
418
    struct margo_cb_arg arg;
419 420 421 422 423 424 425

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
426 427
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
428
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
429
        &timeout_cb_dat, timeout_ms);
430

431 432 433 434 435 436 437 438 439 440
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
441 442 443 444 445 446
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

447 448 449 450
    /* convert HG_CANCELED to HG_TIMEOUT to indicate op timed out */
    if(hret == HG_CANCELED)
        hret = HG_TIMEOUT;

451 452
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
453
        margo_timer_destroy(mid, &forward_timer);
454 455 456 457

    ABT_eventual_free(&eventual);

    return(hret);
458 459 460
}


461
hg_return_t margo_forward(
462
    margo_instance_id mid,
463 464 465 466 467 468 469
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
470
    struct margo_cb_arg arg;
471 472 473 474 475 476 477

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

478 479 480 481 482 483 484 485 486 487
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
508
    struct margo_cb_arg arg;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
509 510 511 512 513 514 515

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

516 517 518 519 520 521 522 523 524 525
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
    hret = HG_Respond(handle, margo_cb, &arg, out_struct);
526 527 528 529 530 531 532 533 534 535 536
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

537

538
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
539
{
540
    hg_return_t hret = info->ret;
541
    struct margo_cb_arg* arg = info->arg;
542 543

    /* propagate return code out through eventual */
544
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
545
    
546 547 548
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;

549 550 551
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
552 553
struct lookup_cb_evt
{
554 555
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
556 557
};

558
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
559 560
{
    struct lookup_cb_evt evt;
561 562
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
563
    struct margo_cb_arg* arg = info->arg;
Philip Carns's avatar
Philip Carns committed
564 565

    /* propagate return code out through eventual */
566 567 568 569
    ABT_eventual_set(*(arg->eventual), &evt, sizeof(evt));

    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
Philip Carns's avatar
Philip Carns committed
570
    
571
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
572 573 574
}


575
hg_return_t margo_addr_lookup(
576
    margo_instance_id mid,
Philip Carns's avatar
Philip Carns committed
577
    const char   *name,
578
    hg_addr_t    *addr)
579
{
580
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
581
    struct lookup_cb_evt *evt;
582 583
    ABT_eventual eventual;
    int ret;
584
    struct margo_cb_arg arg;
585

Philip Carns's avatar
Philip Carns committed
586
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
587 588 589 590 591
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

592 593 594 595 596 597 598 599 600
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
601
    nret = HG_Addr_lookup(mid->hg_context, margo_addr_lookup_cb,
602
        &arg, name, HG_OP_ID_IGNORE);
603 604
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
605 606 607
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
608 609 610 611 612 613 614
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

615
hg_return_t margo_bulk_transfer(
616
    margo_instance_id mid,
617
    hg_bulk_op_t op,
618
    hg_addr_t origin_addr,
619 620 621 622 623 624 625 626 627 628
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;
629
    struct margo_cb_arg arg;
630 631 632 633 634 635 636

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

637 638 639 640 641 642 643 644 645
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
646
    hret = HG_Bulk_transfer(mid->hg_context, margo_bulk_transfer_cb,
647
        &arg, op, origin_addr, origin_handle, origin_offset, local_handle,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
648
        local_offset, size, HG_OP_ID_IGNORE);
649 650 651 652 653 654 655 656 657 658 659
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

660 661
typedef struct
{
662
    margo_instance_id mid;
663 664
    ABT_mutex mutex;
    ABT_cond cond;
Shane Snyder's avatar
Shane Snyder committed
665 666
    char is_asleep;
    char in_pool;
667 668 669 670 671 672 673
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

674
    /* decrement number of waiting threads */
675
    sleep_cb_dat->mid->waiters_in_progress_pool -=
Shane Snyder's avatar
Shane Snyder committed
676
        sleep_cb_dat->in_pool;
677

678 679
    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
680
    sleep_cb_dat->is_asleep = 0;
681 682 683 684 685 686 687
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
688
    margo_instance_id mid,
689 690
    double timeout_ms)
{
691
    int in_pool = 0;
692 693 694
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

Shane Snyder's avatar
Shane Snyder committed
695 696 697
    if(margo_xstream_is_in_progress_pool(mid))
        in_pool = 1;

698
    /* set data needed for sleep callback */
699
    sleep_cb_dat.mid = mid;
700 701
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));
702
    sleep_cb_dat.is_asleep = 1;
Shane Snyder's avatar
Shane Snyder committed
703
    sleep_cb_dat.in_pool = in_pool;
704 705

    /* initialize the sleep timer */
706
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
707 708
        &sleep_cb_dat, timeout_ms);

709
    /* increment number of waiting threads */
710
    mid->waiters_in_progress_pool += in_pool;
711

712 713
    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
714 715
    while(sleep_cb_dat.is_asleep)
        ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
716 717 718 719 720
    ABT_mutex_unlock(sleep_cb_dat.mutex);

    return;
}

721
margo_instance_id margo_hg_class_to_instance(hg_class_t *cl)
722 723 724 725 726
{
    int i;

    for(i=0; i<handler_mapping_table_size; i++)
    {
727
        if(handler_mapping_table[i].class == cl)
728 729 730 731
            return(handler_mapping_table[i].mid);
    }
    return(NULL);
}
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749

/* returns 1 if current xstream is in the progress pool, 0 if not */
static int margo_xstream_is_in_progress_pool(margo_instance_id mid)
{
    int ret;
    ABT_xstream xstream;
    ABT_pool pool;

    ret = ABT_xstream_self(&xstream);
    assert(ret == ABT_SUCCESS);
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    assert(ret == ABT_SUCCESS);

    if(pool == mid->progress_pool)
        return(1);
    else
        return(0);
}