margo.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19

20 21
#define MERCURY_PROGRESS_TIMEOUT_UB 100 /* 100 milliseconds */

22 23
struct margo_instance
{
24
    /* provided by caller */
25 26
    hg_context_t *hg_context;
    hg_class_t *hg_class;
27 28 29
    ABT_pool handler_pool;
    ABT_pool progress_pool;

30
    /* internal to margo for this particular instance */
31 32
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
33
    ABT_xstream progress_xstream;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
34 35 36
    int owns_progress_pool;
    ABT_xstream *rpc_xstreams;
    int num_handler_pool_threads;
37 38 39

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
40
    int waiters_in_progress_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
41
    int refcount;
42 43 44
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

45 46 47 48 49 50 51 52 53
    int table_index;
};

struct margo_handler_mapping
{
    hg_class_t *class;
    margo_instance_id mid;
};

54 55 56 57 58 59 60
struct margo_cb_arg
{
    ABT_eventual *eventual;
    margo_instance_id mid;
    char in_pool;
};

61 62 63
#define MAX_HANDLER_MAPPING 8
static int handler_mapping_table_size = 0;
static struct margo_handler_mapping handler_mapping_table[MAX_HANDLER_MAPPING] = {0};
64

65
static void hg_progress_fn(void* foo);
66
static int margo_xstream_is_in_progress_pool(margo_instance_id mid);
67 68 69 70 71 72 73 74

struct handler_entry
{
    void* fn;
    hg_handle_t handle;
    struct handler_entry *next; 
};

Jonathan Jenkins's avatar
Jonathan Jenkins committed
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
margo_instance_id margo_init(int use_progress_thread, int rpc_thread_count,
    hg_context_t *hg_context)
{
    struct margo_instance *mid = MARGO_INSTANCE_NULL;
    ABT_xstream progress_xstream = ABT_XSTREAM_NULL;
    ABT_pool progress_pool = ABT_POOL_NULL;
    ABT_xstream *rpc_xstreams = NULL;
    ABT_xstream rpc_xstream = ABT_XSTREAM_NULL;
    ABT_pool rpc_pool = ABT_POOL_NULL;
    int ret;
    int i;

    if (use_progress_thread)
    {
        ret = ABT_snoozer_xstream_create(1, &progress_pool, &progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        ret = ABT_xstream_self(&progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(progress_xstream, 1, &progress_pool);
        if (ret != ABT_SUCCESS) goto err;
    }

    if (rpc_thread_count > 0)
    {
        rpc_xstreams = malloc(rpc_thread_count * sizeof(*rpc_xstreams));
        if (rpc_xstreams == NULL) goto err;
        ret = ABT_snoozer_xstream_create(rpc_thread_count, &rpc_pool,
                rpc_xstreams);
        if (ret != ABT_SUCCESS) goto err;
    }
    else if (rpc_thread_count == 0)
    {
        ret = ABT_xstream_self(&rpc_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(rpc_xstream, 1, &rpc_pool);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        rpc_pool = progress_pool;
    }

    mid = margo_init_pool(progress_pool, rpc_pool, hg_context);
    if (mid == MARGO_INSTANCE_NULL) goto err;

Jonathan Jenkins's avatar
Jonathan Jenkins committed
123 124 125
    mid->owns_progress_pool = use_progress_thread;
    mid->progress_xstream = progress_xstream;
    mid->num_handler_pool_threads = rpc_thread_count < 0 ? 0 : rpc_thread_count;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
    mid->rpc_xstreams = rpc_xstreams;
    return mid;

err:
    if (use_progress_thread && progress_xstream != ABT_XSTREAM_NULL)
    {
        ABT_xstream_join(progress_xstream);
        ABT_xstream_free(&progress_xstream);
    }
    if (rpc_thread_count > 0 && rpc_xstreams != NULL)
    {
        for (i = 0; i < rpc_thread_count; i++)
        {
            ABT_xstream_join(rpc_xstreams[i]);
            ABT_xstream_free(&rpc_xstreams[i]);
        }
        free(rpc_xstreams);
    }
    return MARGO_INSTANCE_NULL;
}

margo_instance_id margo_init_pool(ABT_pool progress_pool, ABT_pool handler_pool,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
148
    hg_context_t *hg_context)
149 150
{
    int ret;
151 152 153
    struct margo_instance *mid;

    if(handler_mapping_table_size >= MAX_HANDLER_MAPPING)
154
        return(MARGO_INSTANCE_NULL);
155 156 157

    mid = malloc(sizeof(*mid));
    if(!mid)
158
        return(MARGO_INSTANCE_NULL);
159
    memset(mid, 0, sizeof(*mid));
160

161 162 163
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

164 165
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
166
    mid->hg_class = HG_Context_get_class(hg_context);
167
    mid->hg_context = hg_context;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
168
    mid->refcount = 1;
169

170 171 172 173 174 175 176 177
    ret = margo_timer_instance_init(mid);
    if(ret != 0)
    {
        fprintf(stderr, "Error: margo_timer_instance_init()\n");
        free(mid);
        return(MARGO_INSTANCE_NULL);
    }

178
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
179
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
180 181 182
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_thread_create()\n");
183
        free(mid);
184
        return(MARGO_INSTANCE_NULL);
185 186
    }

187 188 189 190 191 192
    handler_mapping_table[handler_mapping_table_size].mid = mid;
    handler_mapping_table[handler_mapping_table_size].class = mid->hg_class;
    mid->table_index = handler_mapping_table_size;
    handler_mapping_table_size++;

    return mid;
193 194
}

Jonathan Jenkins's avatar
Jonathan Jenkins committed
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static void margo_cleanup(margo_instance_id mid)
{
    int i;

    margo_timer_instance_finalize(mid);

    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);

    if (mid->owns_progress_pool)
    {
        ABT_xstream_join(mid->progress_xstream);
        ABT_xstream_free(&mid->progress_xstream);
    }

    if (mid->num_handler_pool_threads > 0)
    {
        for (i = 0; i < mid->num_handler_pool_threads; i++)
        {
            ABT_xstream_join(mid->rpc_xstreams[i]);
            ABT_xstream_free(&mid->rpc_xstreams[i]);
        }
        free(mid->rpc_xstreams);
    }

    free(mid);
}

223
void margo_finalize(margo_instance_id mid)
224
{
225
    int i;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
226
    int do_cleanup;
227

228
    /* tell progress thread to wrap things up */
229
    mid->hg_progress_shutdown_flag = 1;
230 231

    /* wait for it to shutdown cleanly */
232 233
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
234

235 236 237 238 239
    for(i=mid->table_index; i<(handler_mapping_table_size-1); i++)
    {
        handler_mapping_table[i] = handler_mapping_table[i+1];
    }
    handler_mapping_table_size--;
240

241 242 243 244
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
245 246
    mid->refcount--;
    do_cleanup = mid->refcount == 0;
247

Jonathan Jenkins's avatar
Jonathan Jenkins committed
248 249 250 251 252 253 254
    ABT_mutex_unlock(mid->finalize_mutex);

    /* if there was noone waiting on the finalize at the time of the finalize
     * broadcast, then we're safe to clean up. Otherwise, let the finalizer do
     * it */
    if (do_cleanup)
        margo_cleanup(mid);
255 256 257 258 259 260 261

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    int in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
262
    int do_cleanup;
263 264 265 266

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
267
    if(margo_xstream_is_in_progress_pool(mid))
268 269 270 271
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

272
        mid->waiters_in_progress_pool += in_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
273
        mid->refcount++;
274 275 276 277
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
278 279 280
        mid->refcount--;
        do_cleanup = mid->refcount == 0;

281
    ABT_mutex_unlock(mid->finalize_mutex);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
282 283 284 285

    if (do_cleanup)
        margo_cleanup(mid);

286 287 288 289
    return;
}

/* dedicated thread function to drive Mercury progress */
290
static void hg_progress_fn(void* foo)
291 292 293
{
    int ret;
    unsigned int actual_count;
294
    struct margo_instance *mid = (struct margo_instance *)foo;
295
    size_t size;
296 297
    unsigned int hg_progress_timeout = MERCURY_PROGRESS_TIMEOUT_UB;
    double next_timer_exp;
298
    int trigger_happened;
299

300
    while(!mid->hg_progress_shutdown_flag)
301
    {
302
        trigger_happened = 0;
303
        do {
304
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
305 306
            if(ret == HG_SUCCESS && actual_count > 0)
                trigger_happened = 1;
307
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
308

309 310
        if(trigger_happened)
            ABT_thread_yield();
311

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
        ABT_pool_get_total_size(mid->progress_pool, &size);
        /* Are there any other threads executing in this pool that are *not*
         * blocked on margo_wait_for_finalize()?  If so then, we can't
         * sleep here or else those threads will not get a chance to
         * execute.
         */
        if(size > mid->waiters_in_progress_pool)
        {
            //printf("DEBUG: Margo progress function running while other ULTs are eligible for execution (size: %d, waiters: %d.\n", size, mid->waiters_in_progress_pool);

            /* TODO: this is being executed more than is necessary (i.e.
             * in cases where there are other legitimate ULTs eligible
             * for execution that are not blocking on any events, Margo
             * or otherwise). Maybe we need an abt scheduling tweak here
             * to make sure that this ULT is the lowest priority in that
             * scenario.
328
             */
329
            ABT_thread_yield();
Philip Carns's avatar
Philip Carns committed
330
            HG_Progress(mid->hg_context, 0);
331 332 333 334 335
        }
        else
        {
            ret = margo_timer_get_next_expiration(mid, &next_timer_exp);
            if(ret == 0)
336
            {
337 338 339 340
                /* there is a queued timer, don't block long enough
                 * to keep this timer waiting
                 */
                if(next_timer_exp >= 0.0)
341
                {
342 343 344 345 346 347 348
                    next_timer_exp *= 1000; /* convert to milliseconds */
                    if(next_timer_exp < MERCURY_PROGRESS_TIMEOUT_UB)
                        hg_progress_timeout = (unsigned int)next_timer_exp;
                }
                else
                {
                    hg_progress_timeout = 0;
349
                }
350
            }
351
            HG_Progress(mid->hg_context, hg_progress_timeout);
352
        }
353

354
        /* check for any expired timers */
355
        margo_check_timers(mid);
356 357
    }

358
    return;
359 360
}

361
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
362
{
363
    return(&mid->handler_pool);
364 365
}

366 367 368 369 370 371 372 373 374 375 376
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}


Jonathan Jenkins's avatar
Jonathan Jenkins committed
377
static hg_return_t margo_cb(const struct hg_cb_info *info)
378 379
{
    hg_return_t hret = info->ret;
380
    struct margo_cb_arg* arg = info->arg;
381 382

    /* propagate return code out through eventual */
383
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
384
    
385
#if 0
386 387
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
388
#endif
389

390 391 392
    return(HG_SUCCESS);
}

393 394 395
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
396
} margo_forward_timeout_cb_dat;
397 398 399

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
400 401
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
402 403

    /* cancel the Mercury op if the forward timed out */
404
    HG_Cancel(timeout_cb_dat->handle);
405 406 407
    return;
}

408 409 410 411 412 413
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
414
    int ret;
415
    hg_return_t hret;
416 417
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
418 419
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
420
    struct margo_cb_arg arg;
421 422 423 424 425 426 427

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
428 429
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
430
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
431
        &timeout_cb_dat, timeout_ms);
432

433 434
    arg.eventual = &eventual;
    arg.mid = mid;
435
#if 0
436 437 438 439 440 441 442
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
443
#endif
444
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
445 446 447 448 449 450
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

451 452 453 454
    /* convert HG_CANCELED to HG_TIMEOUT to indicate op timed out */
    if(hret == HG_CANCELED)
        hret = HG_TIMEOUT;

455 456
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
457
        margo_timer_destroy(mid, &forward_timer);
458 459 460 461

    ABT_eventual_free(&eventual);

    return(hret);
462 463 464
}


465
hg_return_t margo_forward(
466
    margo_instance_id mid,
467 468 469 470 471 472 473
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
474
    struct margo_cb_arg arg;
475 476 477 478 479 480 481

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

482 483
    arg.eventual = &eventual;
    arg.mid = mid;
484
#if 0
485 486 487 488 489 490 491
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
492
#endif
493
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
514
    struct margo_cb_arg arg;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
515 516 517 518 519 520 521

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

522 523
    arg.eventual = &eventual;
    arg.mid = mid;
524
#if 0
525 526 527 528 529 530 531
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
532
#endif
533
    hret = HG_Respond(handle, margo_cb, &arg, out_struct);
534 535 536 537 538 539 540 541 542 543 544
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

545

546
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
547
{
548
    hg_return_t hret = info->ret;
549
    struct margo_cb_arg* arg = info->arg;
550 551

    /* propagate return code out through eventual */
552
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
553
    
554 555 556
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;

557 558 559
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
560 561
struct lookup_cb_evt
{
562 563
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
564 565
};

566
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
567 568
{
    struct lookup_cb_evt evt;
569 570
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
571
    struct margo_cb_arg* arg = info->arg;
Philip Carns's avatar
Philip Carns committed
572 573

    /* propagate return code out through eventual */
574 575
    ABT_eventual_set(*(arg->eventual), &evt, sizeof(evt));

576
#if 0
577 578
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
579
#endif
Philip Carns's avatar
Philip Carns committed
580
    
581
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
582 583 584
}


585
hg_return_t margo_addr_lookup(
586
    margo_instance_id mid,
Philip Carns's avatar
Philip Carns committed
587
    const char   *name,
588
    hg_addr_t    *addr)
589
{
590
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
591
    struct lookup_cb_evt *evt;
592 593
    ABT_eventual eventual;
    int ret;
594
    struct margo_cb_arg arg;
595

Philip Carns's avatar
Philip Carns committed
596
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
597 598 599 600 601
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

602 603
    arg.eventual = &eventual;
    arg.mid = mid;
604
#if 0
605 606 607 608 609 610 611
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
612
#endif
Jonathan Jenkins's avatar
Jonathan Jenkins committed
613
    nret = HG_Addr_lookup(mid->hg_context, margo_addr_lookup_cb,
614
        &arg, name, HG_OP_ID_IGNORE);
615 616
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
617 618 619
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
620 621 622 623 624 625 626
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

627
hg_return_t margo_bulk_transfer(
628
    margo_instance_id mid,
629
    hg_bulk_op_t op,
630
    hg_addr_t origin_addr,
631 632 633 634 635 636 637 638 639 640
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;
641
    struct margo_cb_arg arg;
642 643 644 645 646 647 648

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

649 650 651 652 653 654 655 656 657
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
658
    hret = HG_Bulk_transfer(mid->hg_context, margo_bulk_transfer_cb,
659
        &arg, op, origin_addr, origin_handle, origin_offset, local_handle,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
660
        local_offset, size, HG_OP_ID_IGNORE);
661 662 663 664 665 666 667 668 669 670 671
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

672 673
typedef struct
{
674
    margo_instance_id mid;
675 676
    ABT_mutex mutex;
    ABT_cond cond;
Shane Snyder's avatar
Shane Snyder committed
677 678
    char is_asleep;
    char in_pool;
679 680 681 682 683 684 685
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

686
    /* decrement number of waiting threads */
687
    sleep_cb_dat->mid->waiters_in_progress_pool -=
Shane Snyder's avatar
Shane Snyder committed
688
        sleep_cb_dat->in_pool;
689

690 691
    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
692
    sleep_cb_dat->is_asleep = 0;
693 694 695 696 697 698 699
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
700
    margo_instance_id mid,
701 702
    double timeout_ms)
{
703
    int in_pool = 0;
704 705 706
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

Shane Snyder's avatar
Shane Snyder committed
707 708 709
    if(margo_xstream_is_in_progress_pool(mid))
        in_pool = 1;

710
    /* set data needed for sleep callback */
711
    sleep_cb_dat.mid = mid;
712 713
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));
714
    sleep_cb_dat.is_asleep = 1;
Shane Snyder's avatar
Shane Snyder committed
715
    sleep_cb_dat.in_pool = in_pool;
716 717

    /* initialize the sleep timer */
718
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
719 720
        &sleep_cb_dat, timeout_ms);

721
    /* increment number of waiting threads */
722
    mid->waiters_in_progress_pool += in_pool;
723

724 725
    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
726 727
    while(sleep_cb_dat.is_asleep)
        ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
728 729 730 731 732
    ABT_mutex_unlock(sleep_cb_dat.mutex);

    return;
}

733
margo_instance_id margo_hg_class_to_instance(hg_class_t *cl)
734 735 736 737 738
{
    int i;

    for(i=0; i<handler_mapping_table_size; i++)
    {
739
        if(handler_mapping_table[i].class == cl)
740 741 742 743
            return(handler_mapping_table[i].mid);
    }
    return(NULL);
}
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761

/* returns 1 if current xstream is in the progress pool, 0 if not */
static int margo_xstream_is_in_progress_pool(margo_instance_id mid)
{
    int ret;
    ABT_xstream xstream;
    ABT_pool pool;

    ret = ABT_xstream_self(&xstream);
    assert(ret == ABT_SUCCESS);
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    assert(ret == ABT_SUCCESS);

    if(pool == mid->progress_pool)
        return(1);
    else
        return(0);
}