margo.c 22 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19
#include "uthash.h"
20

21 22
#define MERCURY_PROGRESS_TIMEOUT_UB 100 /* 100 milliseconds */

23 24 25 26 27 28 29 30 31 32 33 34 35
struct mplex_key
{
    hg_id_t id;
    uint32_t mplex_id;
};

struct mplex_element
{
    struct mplex_key key;
    ABT_pool pool;
    UT_hash_handle hh;
};

36 37
struct margo_instance
{
38
    /* provided by caller */
39 40
    hg_context_t *hg_context;
    hg_class_t *hg_class;
41 42 43
    ABT_pool handler_pool;
    ABT_pool progress_pool;

44
    /* internal to margo for this particular instance */
45 46
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
47
    ABT_xstream progress_xstream;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
48 49 50
    int owns_progress_pool;
    ABT_xstream *rpc_xstreams;
    int num_handler_pool_threads;
51 52 53

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
54
    int waiters_in_progress_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
55
    int refcount;
56 57 58
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

59 60
    /* hash table to track multiplexed rpcs registered with margo */
    struct mplex_element *mplex_table;
61 62
};

63 64 65 66 67 68 69
struct margo_cb_arg
{
    ABT_eventual *eventual;
    margo_instance_id mid;
    char in_pool;
};

70 71 72 73 74 75
struct margo_rpc_data
{
	margo_instance_id mid;
	void* user_data;
	void (*user_free_callback)(void *);
};
76

77
static void hg_progress_fn(void* foo);
78
static int margo_xstream_is_in_progress_pool(margo_instance_id mid);
79
static void margo_rpc_data_free(void* ptr);
80 81 82 83 84 85 86 87

struct handler_entry
{
    void* fn;
    hg_handle_t handle;
    struct handler_entry *next; 
};

Jonathan Jenkins's avatar
Jonathan Jenkins committed
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
margo_instance_id margo_init(int use_progress_thread, int rpc_thread_count,
    hg_context_t *hg_context)
{
    struct margo_instance *mid = MARGO_INSTANCE_NULL;
    ABT_xstream progress_xstream = ABT_XSTREAM_NULL;
    ABT_pool progress_pool = ABT_POOL_NULL;
    ABT_xstream *rpc_xstreams = NULL;
    ABT_xstream rpc_xstream = ABT_XSTREAM_NULL;
    ABT_pool rpc_pool = ABT_POOL_NULL;
    int ret;
    int i;

    if (use_progress_thread)
    {
        ret = ABT_snoozer_xstream_create(1, &progress_pool, &progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        ret = ABT_xstream_self(&progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(progress_xstream, 1, &progress_pool);
        if (ret != ABT_SUCCESS) goto err;
    }

    if (rpc_thread_count > 0)
    {
        rpc_xstreams = malloc(rpc_thread_count * sizeof(*rpc_xstreams));
        if (rpc_xstreams == NULL) goto err;
        ret = ABT_snoozer_xstream_create(rpc_thread_count, &rpc_pool,
                rpc_xstreams);
        if (ret != ABT_SUCCESS) goto err;
    }
    else if (rpc_thread_count == 0)
    {
        ret = ABT_xstream_self(&rpc_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(rpc_xstream, 1, &rpc_pool);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        rpc_pool = progress_pool;
    }

    mid = margo_init_pool(progress_pool, rpc_pool, hg_context);
    if (mid == MARGO_INSTANCE_NULL) goto err;

Jonathan Jenkins's avatar
Jonathan Jenkins committed
136 137 138
    mid->owns_progress_pool = use_progress_thread;
    mid->progress_xstream = progress_xstream;
    mid->num_handler_pool_threads = rpc_thread_count < 0 ? 0 : rpc_thread_count;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
    mid->rpc_xstreams = rpc_xstreams;
    return mid;

err:
    if (use_progress_thread && progress_xstream != ABT_XSTREAM_NULL)
    {
        ABT_xstream_join(progress_xstream);
        ABT_xstream_free(&progress_xstream);
    }
    if (rpc_thread_count > 0 && rpc_xstreams != NULL)
    {
        for (i = 0; i < rpc_thread_count; i++)
        {
            ABT_xstream_join(rpc_xstreams[i]);
            ABT_xstream_free(&rpc_xstreams[i]);
        }
        free(rpc_xstreams);
    }
    return MARGO_INSTANCE_NULL;
}

margo_instance_id margo_init_pool(ABT_pool progress_pool, ABT_pool handler_pool,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
161
    hg_context_t *hg_context)
162 163
{
    int ret;
164 165 166 167
    struct margo_instance *mid;

    mid = malloc(sizeof(*mid));
    if(!mid)
168
        return(MARGO_INSTANCE_NULL);
169
    memset(mid, 0, sizeof(*mid));
170

171 172 173
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

174 175
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
176
    mid->hg_class = HG_Context_get_class(hg_context);
177
    mid->hg_context = hg_context;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
178
    mid->refcount = 1;
179

180 181 182 183 184 185 186 187
    ret = margo_timer_instance_init(mid);
    if(ret != 0)
    {
        fprintf(stderr, "Error: margo_timer_instance_init()\n");
        free(mid);
        return(MARGO_INSTANCE_NULL);
    }

188
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
189
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
190 191 192
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_thread_create()\n");
193
        free(mid);
194
        return(MARGO_INSTANCE_NULL);
195 196
    }

197
    return mid;
198 199
}

Jonathan Jenkins's avatar
Jonathan Jenkins committed
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static void margo_cleanup(margo_instance_id mid)
{
    int i;

    margo_timer_instance_finalize(mid);

    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);

    if (mid->owns_progress_pool)
    {
        ABT_xstream_join(mid->progress_xstream);
        ABT_xstream_free(&mid->progress_xstream);
    }

    if (mid->num_handler_pool_threads > 0)
    {
        for (i = 0; i < mid->num_handler_pool_threads; i++)
        {
            ABT_xstream_join(mid->rpc_xstreams[i]);
            ABT_xstream_free(&mid->rpc_xstreams[i]);
        }
        free(mid->rpc_xstreams);
    }

    free(mid);
}

228
void margo_finalize(margo_instance_id mid)
229
{
230
    int i;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
231
    int do_cleanup;
232

233
    /* tell progress thread to wrap things up */
234
    mid->hg_progress_shutdown_flag = 1;
235 236

    /* wait for it to shutdown cleanly */
237 238
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
239

240 241 242 243
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
244 245
    mid->refcount--;
    do_cleanup = mid->refcount == 0;
246

Jonathan Jenkins's avatar
Jonathan Jenkins committed
247 248 249 250 251 252 253
    ABT_mutex_unlock(mid->finalize_mutex);

    /* if there was noone waiting on the finalize at the time of the finalize
     * broadcast, then we're safe to clean up. Otherwise, let the finalizer do
     * it */
    if (do_cleanup)
        margo_cleanup(mid);
254 255 256 257 258 259 260

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    int in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
261
    int do_cleanup;
262 263 264 265

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
266
    if(margo_xstream_is_in_progress_pool(mid))
267 268 269 270
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

271
        mid->waiters_in_progress_pool += in_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
272
        mid->refcount++;
273 274 275 276
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
277 278 279
        mid->refcount--;
        do_cleanup = mid->refcount == 0;

280
    ABT_mutex_unlock(mid->finalize_mutex);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
281 282 283 284

    if (do_cleanup)
        margo_cleanup(mid);

285 286 287 288
    return;
}

/* dedicated thread function to drive Mercury progress */
289
static void hg_progress_fn(void* foo)
290 291 292
{
    int ret;
    unsigned int actual_count;
293
    struct margo_instance *mid = (struct margo_instance *)foo;
294
    size_t size;
295 296
    unsigned int hg_progress_timeout = MERCURY_PROGRESS_TIMEOUT_UB;
    double next_timer_exp;
297
    int trigger_happened;
298

299
    while(!mid->hg_progress_shutdown_flag)
300
    {
301
        trigger_happened = 0;
302
        do {
303
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
304 305
            if(ret == HG_SUCCESS && actual_count > 0)
                trigger_happened = 1;
306
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
307

308 309
        if(trigger_happened)
            ABT_thread_yield();
310

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
        ABT_pool_get_total_size(mid->progress_pool, &size);
        /* Are there any other threads executing in this pool that are *not*
         * blocked on margo_wait_for_finalize()?  If so then, we can't
         * sleep here or else those threads will not get a chance to
         * execute.
         */
        if(size > mid->waiters_in_progress_pool)
        {
            //printf("DEBUG: Margo progress function running while other ULTs are eligible for execution (size: %d, waiters: %d.\n", size, mid->waiters_in_progress_pool);

            /* TODO: this is being executed more than is necessary (i.e.
             * in cases where there are other legitimate ULTs eligible
             * for execution that are not blocking on any events, Margo
             * or otherwise). Maybe we need an abt scheduling tweak here
             * to make sure that this ULT is the lowest priority in that
             * scenario.
327
             */
328 329 330 331 332 333 334 335 336 337
            ret = HG_Progress(mid->hg_context, 0);
            if(ret == HG_SUCCESS)
            {
                /* Mercury completed something; loop around to trigger
                 * callbacks 
                 */
            }
            else if(ret == HG_TIMEOUT)
            {
                /* No completion; yield here to allow other ULTs to run */
Philip Carns's avatar
Philip Carns committed
338
                ABT_thread_yield();
339 340 341 342 343 344
            }
            else
            {
                /* TODO: error handling */
                fprintf(stderr, "WARNING: unexpected return code (%d) from HG_Progress()\n", ret);
            }
345 346 347
        }
        else
        {
348
            hg_progress_timeout = MERCURY_PROGRESS_TIMEOUT_UB;
349 350
            ret = margo_timer_get_next_expiration(mid, &next_timer_exp);
            if(ret == 0)
351
            {
352 353 354 355
                /* there is a queued timer, don't block long enough
                 * to keep this timer waiting
                 */
                if(next_timer_exp >= 0.0)
356
                {
357 358 359 360 361 362 363
                    next_timer_exp *= 1000; /* convert to milliseconds */
                    if(next_timer_exp < MERCURY_PROGRESS_TIMEOUT_UB)
                        hg_progress_timeout = (unsigned int)next_timer_exp;
                }
                else
                {
                    hg_progress_timeout = 0;
364
                }
365
            }
366 367 368 369 370 371
            ret = HG_Progress(mid->hg_context, hg_progress_timeout);
            if(ret != HG_SUCCESS && ret != HG_TIMEOUT)
            {
                /* TODO: error handling */
                fprintf(stderr, "WARNING: unexpected return code (%d) from HG_Progress()\n", ret);
            }
372
        }
373

374
        /* check for any expired timers */
375
        margo_check_timers(mid);
376 377
    }

378
    return;
379 380
}

381
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
382
{
383
    return(&mid->handler_pool);
384 385
}

386 387 388 389 390 391 392 393 394 395
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
hg_return_t margo_register_data(
    margo_instance_id mid,
    hg_id_t id,
    void *data,
    void (*free_callback)(void *)) 
{
	struct margo_rpc_data* margo_data 
		= (struct margo_rpc_data*) HG_Registered_data(margo_get_class(mid), id);
	if(!margo_data) return HG_OTHER_ERROR;
	margo_data->user_data = data;
	margo_data->user_free_callback = free_callback;
	return HG_SUCCESS;
}

void* margo_registered_data(margo_instance_id mid, hg_id_t id)
{
	struct margo_rpc_data* data
		= (struct margo_rpc_data*) HG_Registered_data(margo_get_class(mid), id);
	if(!data) return NULL;
	else return data->user_data;
}

margo_instance_id margo_hg_handle_get_instance(hg_handle_t h)
{
	const struct hg_info* info = HG_Get_info(h);
	if(!info) return MARGO_INSTANCE_NULL;
	struct margo_rpc_data* data = 
		(struct margo_rpc_data*) HG_Registered_data(info->hg_class, info->id);
	if(!data) return MARGO_INSTANCE_NULL;
	return data->mid;
}
427

Jonathan Jenkins's avatar
Jonathan Jenkins committed
428
static hg_return_t margo_cb(const struct hg_cb_info *info)
429 430
{
    hg_return_t hret = info->ret;
431
    struct margo_cb_arg* arg = info->arg;
432 433

    /* propagate return code out through eventual */
434
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
435
    
436
#if 0
437 438
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
439
#endif
440

441 442 443
    return(HG_SUCCESS);
}

444 445 446
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
447
} margo_forward_timeout_cb_dat;
448 449 450

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
451 452
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
453 454

    /* cancel the Mercury op if the forward timed out */
455
    HG_Cancel(timeout_cb_dat->handle);
456 457 458
    return;
}

459 460 461 462 463 464
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
465
    int ret;
466
    hg_return_t hret;
467 468
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
469 470
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
471
    struct margo_cb_arg arg;
472 473 474 475 476 477 478

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
479 480
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
481
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
482
        &timeout_cb_dat, timeout_ms);
483

484 485
    arg.eventual = &eventual;
    arg.mid = mid;
486
#if 0
487 488 489 490 491 492 493
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
494
#endif
495
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
496 497 498 499 500 501
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

502 503 504 505
    /* convert HG_CANCELED to HG_TIMEOUT to indicate op timed out */
    if(hret == HG_CANCELED)
        hret = HG_TIMEOUT;

506 507
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
508
        margo_timer_destroy(mid, &forward_timer);
509 510 511 512

    ABT_eventual_free(&eventual);

    return(hret);
513 514 515
}


516
hg_return_t margo_forward(
517
    margo_instance_id mid,
518 519 520 521 522 523 524
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
525
    struct margo_cb_arg arg;
526 527 528 529 530 531 532

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

533 534
    arg.eventual = &eventual;
    arg.mid = mid;
535
#if 0
536 537 538 539 540 541 542
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
543
#endif
544
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
565
    struct margo_cb_arg arg;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
566 567 568 569 570 571 572

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

573 574
    arg.eventual = &eventual;
    arg.mid = mid;
575
#if 0
576 577 578 579 580 581 582
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
583
#endif
584
    hret = HG_Respond(handle, margo_cb, &arg, out_struct);
585 586 587 588 589 590 591 592 593 594 595
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

596

597
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
598
{
599
    hg_return_t hret = info->ret;
600
    struct margo_cb_arg* arg = info->arg;
601 602

    /* propagate return code out through eventual */
603
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
604
    
605 606 607
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;

608 609 610
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
611 612
struct lookup_cb_evt
{
613 614
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
615 616
};

617
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
618 619
{
    struct lookup_cb_evt evt;
620 621
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
622
    struct margo_cb_arg* arg = info->arg;
Philip Carns's avatar
Philip Carns committed
623 624

    /* propagate return code out through eventual */
625 626
    ABT_eventual_set(*(arg->eventual), &evt, sizeof(evt));

627
#if 0
628 629
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
630
#endif
Philip Carns's avatar
Philip Carns committed
631
    
632
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
633 634 635
}


636
hg_return_t margo_addr_lookup(
637
    margo_instance_id mid,
Philip Carns's avatar
Philip Carns committed
638
    const char   *name,
639
    hg_addr_t    *addr)
640
{
641
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
642
    struct lookup_cb_evt *evt;
643 644
    ABT_eventual eventual;
    int ret;
645
    struct margo_cb_arg arg;
646

Philip Carns's avatar
Philip Carns committed
647
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
648 649 650 651 652
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

653 654
    arg.eventual = &eventual;
    arg.mid = mid;
655
#if 0
656 657 658 659 660 661 662
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
663
#endif
Jonathan Jenkins's avatar
Jonathan Jenkins committed
664
    nret = HG_Addr_lookup(mid->hg_context, margo_addr_lookup_cb,
665
        &arg, name, HG_OP_ID_IGNORE);
666 667
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
668 669 670
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
671 672 673 674 675 676 677
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

678
hg_return_t margo_bulk_transfer(
679
    margo_instance_id mid,
680
    hg_bulk_op_t op,
681
    hg_addr_t origin_addr,
682 683 684 685 686 687 688 689 690 691
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;
692
    struct margo_cb_arg arg;
693 694 695 696 697 698 699

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

700 701 702 703 704 705 706 707 708
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
709
    hret = HG_Bulk_transfer(mid->hg_context, margo_bulk_transfer_cb,
710
        &arg, op, origin_addr, origin_handle, origin_offset, local_handle,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
711
        local_offset, size, HG_OP_ID_IGNORE);
712 713 714 715 716 717 718 719 720 721 722
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

723 724
typedef struct
{
725
    margo_instance_id mid;
726 727
    ABT_mutex mutex;
    ABT_cond cond;
Shane Snyder's avatar
Shane Snyder committed
728 729
    char is_asleep;
    char in_pool;
730 731 732 733 734 735 736
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

737
    /* decrement number of waiting threads */
738
    sleep_cb_dat->mid->waiters_in_progress_pool -=
Shane Snyder's avatar
Shane Snyder committed
739
        sleep_cb_dat->in_pool;
740

741 742
    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
743
    sleep_cb_dat->is_asleep = 0;
744 745 746 747 748 749 750
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
751
    margo_instance_id mid,
752 753
    double timeout_ms)
{
754
    int in_pool = 0;
755 756 757
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

Shane Snyder's avatar
Shane Snyder committed
758 759 760
    if(margo_xstream_is_in_progress_pool(mid))
        in_pool = 1;

761
    /* set data needed for sleep callback */
762
    sleep_cb_dat.mid = mid;
763 764
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));
765
    sleep_cb_dat.is_asleep = 1;
Shane Snyder's avatar
Shane Snyder committed
766
    sleep_cb_dat.in_pool = in_pool;
767 768

    /* initialize the sleep timer */
769
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
770 771
        &sleep_cb_dat, timeout_ms);

772
    /* increment number of waiting threads */
773
    mid->waiters_in_progress_pool += in_pool;
774

775 776
    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
777 778
    while(sleep_cb_dat.is_asleep)
        ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
779 780
    ABT_mutex_unlock(sleep_cb_dat.mutex);

781 782 783 784
    /* clean up */
    ABT_mutex_free(&sleep_cb_dat.mutex);
    ABT_cond_free(&sleep_cb_dat.cond);

785 786 787
    return;
}

788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
/* returns 1 if current xstream is in the progress pool, 0 if not */
static int margo_xstream_is_in_progress_pool(margo_instance_id mid)
{
    int ret;
    ABT_xstream xstream;
    ABT_pool pool;

    ret = ABT_xstream_self(&xstream);
    assert(ret == ABT_SUCCESS);
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    assert(ret == ABT_SUCCESS);

    if(pool == mid->progress_pool)
        return(1);
    else
        return(0);
}
Philip Carns's avatar
Philip Carns committed
805

806 807 808 809 810 811 812 813 814
static void margo_rpc_data_free(void* ptr)
{
	struct margo_rpc_data* data = (struct margo_rpc_data*) ptr;
	if(data->user_data && data->user_free_callback) {
		data->user_free_callback(data->user_data);
	}
	free(ptr);
}

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
int margo_lookup_mplex(margo_instance_id mid, hg_id_t id, uint32_t mplex_id, ABT_pool *pool)
{
    struct mplex_key key;
    struct mplex_element *element;

    if(!mplex_id)
    {
        *pool = mid->handler_pool;
        return(0);
    }

    memset(&key, 0, sizeof(key));
    key.id = id;
    key.mplex_id = mplex_id;

    HASH_FIND(hh, mid->mplex_table, &key, sizeof(key), element);
    if(!element)
        return(-1);

Philip Carns's avatar
Philip Carns committed
834 835
    assert(element->key.id == id && element->key.mplex_id == mplex_id);

836 837 838 839 840
    *pool = element->pool;

    return(0);
}

841 842 843 844 845 846 847 848 849 850 851
int margo_register(margo_instance_id mid, hg_id_t id)
{
	/* register the margo data with the RPC */
	struct margo_rpc_data* margo_data = (struct margo_rpc_data*)malloc(sizeof(struct margo_rpc_data));
	margo_data->mid = mid;
	margo_data->user_data = NULL;
	margo_data->user_free_callback = NULL;
	hg_return_t ret = HG_Register_data(margo_get_class(mid), id, margo_data, margo_rpc_data_free);
	return ret;
}

Philip Carns's avatar
Philip Carns committed
852 853
int margo_register_mplex(margo_instance_id mid, hg_id_t id, uint32_t mplex_id, ABT_pool pool)
{
854 855 856
    struct mplex_key key;
    struct mplex_element *element;

857 858 859 860 861 862 863 864 865
	/* register the margo data with the RPC */
	struct margo_rpc_data* margo_data = (struct margo_rpc_data*)malloc(sizeof(struct margo_rpc_data));
	margo_data->mid = mid;
	margo_data->user_data = NULL;
	margo_data->user_free_callback = NULL;
	hg_return_t ret = HG_Register_data(margo_get_class(mid), id, margo_data, margo_rpc_data_free);
	if(ret != HG_SUCCESS) 
		return ret;

866 867 868
    /* nothing to do, we'll let the handler pool take this directly */
    if(mplex_id == MARGO_DEFAULT_MPLEX_ID)
        return(0);
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
    memset(&key, 0, sizeof(key));
    key.id = id;
    key.mplex_id = mplex_id;

    HASH_FIND(hh, mid->mplex_table, &key, sizeof(key), element);
    if(element)
        return(0);

    element = malloc(sizeof(*element));
    if(!element)
        return(-1);
    element->key = key;
    element->pool = pool;

    HASH_ADD(hh, mid->mplex_table, key, sizeof(key), element);

    return(0);
Philip Carns's avatar
Philip Carns committed
887
}