margo.c 23.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19
#include "uthash.h"
20

21 22
#define MERCURY_PROGRESS_TIMEOUT_UB 100 /* 100 milliseconds */

23 24 25 26 27 28 29 30 31 32 33 34 35
struct mplex_key
{
    hg_id_t id;
    uint32_t mplex_id;
};

struct mplex_element
{
    struct mplex_key key;
    ABT_pool pool;
    UT_hash_handle hh;
};

36 37
struct margo_instance
{
38
    /* provided by caller */
39 40
    hg_context_t *hg_context;
    hg_class_t *hg_class;
41 42 43
    ABT_pool handler_pool;
    ABT_pool progress_pool;

44
    /* internal to margo for this particular instance */
45
    int margo_init;
46 47
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
48
    ABT_xstream progress_xstream;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
49 50 51
    int owns_progress_pool;
    ABT_xstream *rpc_xstreams;
    int num_handler_pool_threads;
52 53 54

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
55
    int waiters_in_progress_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
56
    int refcount;
57 58 59
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

60 61
    /* hash table to track multiplexed rpcs registered with margo */
    struct mplex_element *mplex_table;
62 63
};

64 65 66 67 68 69 70
struct margo_cb_arg
{
    ABT_eventual *eventual;
    margo_instance_id mid;
    char in_pool;
};

71 72 73 74 75 76
struct margo_rpc_data
{
	margo_instance_id mid;
	void* user_data;
	void (*user_free_callback)(void *);
};
77

78
static void hg_progress_fn(void* foo);
79
static int margo_xstream_is_in_progress_pool(margo_instance_id mid);
80
static void margo_rpc_data_free(void* ptr);
81

82 83 84
/* XXX: maybe instead of listen_flag, we can specify either CLIENT or SERVER mode? */
margo_instance_id margo_init(const char *addr_str, int listen_flag,
    int use_progress_thread, int rpc_thread_count)
85
{
Jonathan Jenkins's avatar
Jonathan Jenkins committed
86 87 88 89 90
    ABT_xstream progress_xstream = ABT_XSTREAM_NULL;
    ABT_pool progress_pool = ABT_POOL_NULL;
    ABT_xstream *rpc_xstreams = NULL;
    ABT_xstream rpc_xstream = ABT_XSTREAM_NULL;
    ABT_pool rpc_pool = ABT_POOL_NULL;
91 92
    hg_class_t *hg_class = NULL;
    hg_context_t *hg_context = NULL;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
93
    int i;
94 95 96 97 98 99 100 101 102 103
    int ret;
    struct margo_instance *mid = MARGO_INSTANCE_NULL;

    ret = ABT_init(0, NULL); /* XXX: argc/argv not currently used by ABT ... */
    if(ret != 0) goto err;

    /* set primary ES to idle without polling */
    /* XXX: is this right? always set snoozer scheduler on the calling xstream? */
    ret = ABT_snoozer_xstream_self_set();
    if(ret != 0) goto err;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
104 105 106 107 108 109 110 111 112 113 114 115 116 117

    if (use_progress_thread)
    {
        ret = ABT_snoozer_xstream_create(1, &progress_pool, &progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
    }
    else
    {
        ret = ABT_xstream_self(&progress_xstream);
        if (ret != ABT_SUCCESS) goto err;
        ret = ABT_xstream_get_main_pools(progress_xstream, 1, &progress_pool);
        if (ret != ABT_SUCCESS) goto err;
    }

118
    if (listen_flag)
Jonathan Jenkins's avatar
Jonathan Jenkins committed
119
    {
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
        if (rpc_thread_count > 0)
        {
            rpc_xstreams = malloc(rpc_thread_count * sizeof(*rpc_xstreams));
            if (rpc_xstreams == NULL) goto err;
            ret = ABT_snoozer_xstream_create(rpc_thread_count, &rpc_pool,
                    rpc_xstreams);
            if (ret != ABT_SUCCESS) goto err;
        }
        else if (rpc_thread_count == 0)
        {
            ret = ABT_xstream_self(&rpc_xstream);
            if (ret != ABT_SUCCESS) goto err;
            ret = ABT_xstream_get_main_pools(rpc_xstream, 1, &rpc_pool);
            if (ret != ABT_SUCCESS) goto err;
        }
        else
        {
            rpc_pool = progress_pool;
        }
Jonathan Jenkins's avatar
Jonathan Jenkins committed
139 140
    }

141 142 143 144 145 146
    hg_class = HG_Init(addr_str, listen_flag);
    if(!hg_class) goto err;

    hg_context = HG_Context_create(hg_class);
    if(!hg_context) goto err;

Jonathan Jenkins's avatar
Jonathan Jenkins committed
147 148 149
    mid = margo_init_pool(progress_pool, rpc_pool, hg_context);
    if (mid == MARGO_INSTANCE_NULL) goto err;

150
    mid->margo_init = 1;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
151 152 153
    mid->owns_progress_pool = use_progress_thread;
    mid->progress_xstream = progress_xstream;
    mid->num_handler_pool_threads = rpc_thread_count < 0 ? 0 : rpc_thread_count;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
154 155 156 157
    mid->rpc_xstreams = rpc_xstreams;
    return mid;

err:
158 159 160 161 162 163 164
    if(mid)
    {
        margo_timer_instance_finalize(mid);
        ABT_mutex_free(&mid->finalize_mutex);
        ABT_cond_free(&mid->finalize_cond);
        free(mid);
    }
Jonathan Jenkins's avatar
Jonathan Jenkins committed
165 166 167 168 169 170 171 172 173 174 175 176 177 178
    if (use_progress_thread && progress_xstream != ABT_XSTREAM_NULL)
    {
        ABT_xstream_join(progress_xstream);
        ABT_xstream_free(&progress_xstream);
    }
    if (rpc_thread_count > 0 && rpc_xstreams != NULL)
    {
        for (i = 0; i < rpc_thread_count; i++)
        {
            ABT_xstream_join(rpc_xstreams[i]);
            ABT_xstream_free(&rpc_xstreams[i]);
        }
        free(rpc_xstreams);
    }
179 180 181 182 183
    if(hg_context)
        HG_Context_destroy(hg_context);
    if(hg_class)
        HG_Finalize(hg_class);
    ABT_finalize();
Jonathan Jenkins's avatar
Jonathan Jenkins committed
184 185 186 187
    return MARGO_INSTANCE_NULL;
}

margo_instance_id margo_init_pool(ABT_pool progress_pool, ABT_pool handler_pool,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
188
    hg_context_t *hg_context)
189 190
{
    int ret;
191 192 193
    struct margo_instance *mid;

    mid = malloc(sizeof(*mid));
194
    if(!mid) goto err;
195
    memset(mid, 0, sizeof(*mid));
196

197 198 199
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

200 201
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
202
    mid->hg_class = HG_Context_get_class(hg_context);
203
    mid->hg_context = hg_context;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
204
    mid->refcount = 1;
205

206
    ret = margo_timer_instance_init(mid);
207
    if(ret != 0) goto err;
208

209
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
210
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
211 212 213 214
    if(ret != 0) goto err;

err:
    if(mid)
215
    {
216 217 218
        margo_timer_instance_finalize(mid);
        ABT_mutex_free(&mid->finalize_mutex);
        ABT_cond_free(&mid->finalize_cond);
219
        free(mid);
220
    }
221
    return MARGO_INSTANCE_NULL;
222 223
}

Jonathan Jenkins's avatar
Jonathan Jenkins committed
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
static void margo_cleanup(margo_instance_id mid)
{
    int i;

    margo_timer_instance_finalize(mid);

    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);

    if (mid->owns_progress_pool)
    {
        ABT_xstream_join(mid->progress_xstream);
        ABT_xstream_free(&mid->progress_xstream);
    }

    if (mid->num_handler_pool_threads > 0)
    {
        for (i = 0; i < mid->num_handler_pool_threads; i++)
        {
            ABT_xstream_join(mid->rpc_xstreams[i]);
            ABT_xstream_free(&mid->rpc_xstreams[i]);
        }
        free(mid->rpc_xstreams);
    }

249 250 251 252 253 254 255 256 257
    if (mid->margo_init)
    {
        if (mid->hg_context)
            HG_Context_destroy(mid->hg_context);
        if (mid->hg_class)
            HG_Finalize(mid->hg_class);
        ABT_finalize();
    }

Jonathan Jenkins's avatar
Jonathan Jenkins committed
258 259 260
    free(mid);
}

261
void margo_finalize(margo_instance_id mid)
262
{
Jonathan Jenkins's avatar
Jonathan Jenkins committed
263
    int do_cleanup;
264

265
    /* tell progress thread to wrap things up */
266
    mid->hg_progress_shutdown_flag = 1;
267 268

    /* wait for it to shutdown cleanly */
269 270
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
271

272 273 274 275
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
276 277
    mid->refcount--;
    do_cleanup = mid->refcount == 0;
278

Jonathan Jenkins's avatar
Jonathan Jenkins committed
279 280 281 282 283 284 285
    ABT_mutex_unlock(mid->finalize_mutex);

    /* if there was noone waiting on the finalize at the time of the finalize
     * broadcast, then we're safe to clean up. Otherwise, let the finalizer do
     * it */
    if (do_cleanup)
        margo_cleanup(mid);
286 287 288 289 290 291 292

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    int in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
293
    int do_cleanup;
294 295 296 297

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
298
    if(margo_xstream_is_in_progress_pool(mid))
299 300 301 302
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

303
        mid->waiters_in_progress_pool += in_pool;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
304
        mid->refcount++;
305 306 307 308
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

Jonathan Jenkins's avatar
Jonathan Jenkins committed
309 310 311
        mid->refcount--;
        do_cleanup = mid->refcount == 0;

312
    ABT_mutex_unlock(mid->finalize_mutex);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
313 314 315 316

    if (do_cleanup)
        margo_cleanup(mid);

317 318 319 320
    return;
}

/* dedicated thread function to drive Mercury progress */
321
static void hg_progress_fn(void* foo)
322 323 324
{
    int ret;
    unsigned int actual_count;
325
    struct margo_instance *mid = (struct margo_instance *)foo;
326
    size_t size;
327 328
    unsigned int hg_progress_timeout = MERCURY_PROGRESS_TIMEOUT_UB;
    double next_timer_exp;
329
    int trigger_happened;
330

331
    while(!mid->hg_progress_shutdown_flag)
332
    {
333
        trigger_happened = 0;
334
        do {
335
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
336 337
            if(ret == HG_SUCCESS && actual_count > 0)
                trigger_happened = 1;
338
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
339

340 341
        if(trigger_happened)
            ABT_thread_yield();
342

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
        ABT_pool_get_total_size(mid->progress_pool, &size);
        /* Are there any other threads executing in this pool that are *not*
         * blocked on margo_wait_for_finalize()?  If so then, we can't
         * sleep here or else those threads will not get a chance to
         * execute.
         */
        if(size > mid->waiters_in_progress_pool)
        {
            //printf("DEBUG: Margo progress function running while other ULTs are eligible for execution (size: %d, waiters: %d.\n", size, mid->waiters_in_progress_pool);

            /* TODO: this is being executed more than is necessary (i.e.
             * in cases where there are other legitimate ULTs eligible
             * for execution that are not blocking on any events, Margo
             * or otherwise). Maybe we need an abt scheduling tweak here
             * to make sure that this ULT is the lowest priority in that
             * scenario.
359
             */
360 361 362 363 364 365 366 367 368 369
            ret = HG_Progress(mid->hg_context, 0);
            if(ret == HG_SUCCESS)
            {
                /* Mercury completed something; loop around to trigger
                 * callbacks 
                 */
            }
            else if(ret == HG_TIMEOUT)
            {
                /* No completion; yield here to allow other ULTs to run */
Philip Carns's avatar
Philip Carns committed
370
                ABT_thread_yield();
371 372 373 374 375 376
            }
            else
            {
                /* TODO: error handling */
                fprintf(stderr, "WARNING: unexpected return code (%d) from HG_Progress()\n", ret);
            }
377 378 379
        }
        else
        {
380
            hg_progress_timeout = MERCURY_PROGRESS_TIMEOUT_UB;
381 382
            ret = margo_timer_get_next_expiration(mid, &next_timer_exp);
            if(ret == 0)
383
            {
384 385 386 387
                /* there is a queued timer, don't block long enough
                 * to keep this timer waiting
                 */
                if(next_timer_exp >= 0.0)
388
                {
389 390 391 392 393 394 395
                    next_timer_exp *= 1000; /* convert to milliseconds */
                    if(next_timer_exp < MERCURY_PROGRESS_TIMEOUT_UB)
                        hg_progress_timeout = (unsigned int)next_timer_exp;
                }
                else
                {
                    hg_progress_timeout = 0;
396
                }
397
            }
398 399 400 401 402 403
            ret = HG_Progress(mid->hg_context, hg_progress_timeout);
            if(ret != HG_SUCCESS && ret != HG_TIMEOUT)
            {
                /* TODO: error handling */
                fprintf(stderr, "WARNING: unexpected return code (%d) from HG_Progress()\n", ret);
            }
404
        }
405

406
        /* check for any expired timers */
407
        margo_check_timers(mid);
408 409
    }

410
    return;
411 412
}

413
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
414
{
415
    return(&mid->handler_pool);
416 417
}

418 419 420 421 422 423 424 425 426 427
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
hg_return_t margo_register_data(
    margo_instance_id mid,
    hg_id_t id,
    void *data,
    void (*free_callback)(void *)) 
{
	struct margo_rpc_data* margo_data 
		= (struct margo_rpc_data*) HG_Registered_data(margo_get_class(mid), id);
	if(!margo_data) return HG_OTHER_ERROR;
	margo_data->user_data = data;
	margo_data->user_free_callback = free_callback;
	return HG_SUCCESS;
}

void* margo_registered_data(margo_instance_id mid, hg_id_t id)
{
	struct margo_rpc_data* data
		= (struct margo_rpc_data*) HG_Registered_data(margo_get_class(mid), id);
	if(!data) return NULL;
	else return data->user_data;
}

margo_instance_id margo_hg_handle_get_instance(hg_handle_t h)
{
	const struct hg_info* info = HG_Get_info(h);
	if(!info) return MARGO_INSTANCE_NULL;
	struct margo_rpc_data* data = 
		(struct margo_rpc_data*) HG_Registered_data(info->hg_class, info->id);
	if(!data) return MARGO_INSTANCE_NULL;
	return data->mid;
}
459

Jonathan Jenkins's avatar
Jonathan Jenkins committed
460
static hg_return_t margo_cb(const struct hg_cb_info *info)
461 462
{
    hg_return_t hret = info->ret;
463
    struct margo_cb_arg* arg = info->arg;
464 465

    /* propagate return code out through eventual */
466
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
467
    
468
#if 0
469 470
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
471
#endif
472

473 474 475
    return(HG_SUCCESS);
}

476 477 478
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
479
} margo_forward_timeout_cb_dat;
480 481 482

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
483 484
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
485 486

    /* cancel the Mercury op if the forward timed out */
487
    HG_Cancel(timeout_cb_dat->handle);
488 489 490
    return;
}

491 492 493 494 495 496
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
497
    int ret;
498
    hg_return_t hret;
499 500
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
501 502
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
503
    struct margo_cb_arg arg;
504 505 506 507 508 509 510

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
511 512
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
513
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
514
        &timeout_cb_dat, timeout_ms);
515

516 517
    arg.eventual = &eventual;
    arg.mid = mid;
518
#if 0
519 520 521 522 523 524 525
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
526
#endif
527
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
528 529 530 531 532 533
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

534 535 536 537
    /* convert HG_CANCELED to HG_TIMEOUT to indicate op timed out */
    if(hret == HG_CANCELED)
        hret = HG_TIMEOUT;

538 539
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
540
        margo_timer_destroy(mid, &forward_timer);
541 542 543 544

    ABT_eventual_free(&eventual);

    return(hret);
545 546 547
}


548
hg_return_t margo_forward(
549
    margo_instance_id mid,
550 551 552 553 554 555 556
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
557
    struct margo_cb_arg arg;
558 559 560 561 562 563 564

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

565 566
    arg.eventual = &eventual;
    arg.mid = mid;
567
#if 0
568 569 570 571 572 573 574
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
575
#endif
576
    hret = HG_Forward(handle, margo_cb, &arg, in_struct);
Jonathan Jenkins's avatar
Jonathan Jenkins committed
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;
597
    struct margo_cb_arg arg;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
598 599 600 601 602 603 604

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

605 606
    arg.eventual = &eventual;
    arg.mid = mid;
607
#if 0
608 609 610 611 612 613 614
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
615
#endif
616
    hret = HG_Respond(handle, margo_cb, &arg, out_struct);
617 618 619 620 621 622 623 624 625 626 627
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

628

629
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
630
{
631
    hg_return_t hret = info->ret;
632
    struct margo_cb_arg* arg = info->arg;
633 634

    /* propagate return code out through eventual */
635
    ABT_eventual_set(*(arg->eventual), &hret, sizeof(hret));
636
    
637 638 639
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;

640 641 642
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
643 644
struct lookup_cb_evt
{
645 646
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
647 648
};

649
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
650 651
{
    struct lookup_cb_evt evt;
652 653
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
654
    struct margo_cb_arg* arg = info->arg;
Philip Carns's avatar
Philip Carns committed
655 656

    /* propagate return code out through eventual */
657 658
    ABT_eventual_set(*(arg->eventual), &evt, sizeof(evt));

659
#if 0
660 661
    if(arg->in_pool)
        arg->mid->waiters_in_progress_pool--;
662
#endif
Philip Carns's avatar
Philip Carns committed
663
    
664
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
665 666 667
}


668
hg_return_t margo_addr_lookup(
669
    margo_instance_id mid,
Philip Carns's avatar
Philip Carns committed
670
    const char   *name,
671
    hg_addr_t    *addr)
672
{
673
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
674
    struct lookup_cb_evt *evt;
675 676
    ABT_eventual eventual;
    int ret;
677
    struct margo_cb_arg arg;
678

Philip Carns's avatar
Philip Carns committed
679
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
680 681 682 683 684
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

685 686
    arg.eventual = &eventual;
    arg.mid = mid;
687
#if 0
688 689 690 691 692 693 694
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
695
#endif
Jonathan Jenkins's avatar
Jonathan Jenkins committed
696
    nret = HG_Addr_lookup(mid->hg_context, margo_addr_lookup_cb,
697
        &arg, name, HG_OP_ID_IGNORE);
698 699
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
700 701 702
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
703 704 705 706 707 708 709
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

710
hg_return_t margo_bulk_transfer(
711
    margo_instance_id mid,
712
    hg_bulk_op_t op,
713
    hg_addr_t origin_addr,
714 715 716 717 718 719 720 721 722 723
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;
724
    struct margo_cb_arg arg;
725 726 727 728 729 730 731

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

732 733 734 735 736 737 738 739 740
    arg.eventual = &eventual;
    arg.mid = mid;
    if(margo_xstream_is_in_progress_pool(mid))
    {
        arg.in_pool = 1;
        mid->waiters_in_progress_pool++;
    }
    else
        arg.in_pool = 0;
Jonathan Jenkins's avatar
Jonathan Jenkins committed
741
    hret = HG_Bulk_transfer(mid->hg_context, margo_bulk_transfer_cb,
742
        &arg, op, origin_addr, origin_handle, origin_offset, local_handle,
Jonathan Jenkins's avatar
Jonathan Jenkins committed
743
        local_offset, size, HG_OP_ID_IGNORE);
744 745 746 747 748 749 750 751 752 753 754
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

755 756
typedef struct
{
757
    margo_instance_id mid;
758 759
    ABT_mutex mutex;
    ABT_cond cond;
Shane Snyder's avatar
Shane Snyder committed
760 761
    char is_asleep;
    char in_pool;
762 763 764 765 766 767 768
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

769
    /* decrement number of waiting threads */
770
    sleep_cb_dat->mid->waiters_in_progress_pool -=
Shane Snyder's avatar
Shane Snyder committed
771
        sleep_cb_dat->in_pool;
772

773 774
    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
775
    sleep_cb_dat->is_asleep = 0;
776 777 778 779 780 781 782
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
783
    margo_instance_id mid,
784 785
    double timeout_ms)
{
786
    int in_pool = 0;
787 788 789
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

Shane Snyder's avatar
Shane Snyder committed
790 791 792
    if(margo_xstream_is_in_progress_pool(mid))
        in_pool = 1;

793
    /* set data needed for sleep callback */
794
    sleep_cb_dat.mid = mid;
795 796
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));
797
    sleep_cb_dat.is_asleep = 1;
Shane Snyder's avatar
Shane Snyder committed
798
    sleep_cb_dat.in_pool = in_pool;
799 800

    /* initialize the sleep timer */
801
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
802 803
        &sleep_cb_dat, timeout_ms);

804
    /* increment number of waiting threads */
805
    mid->waiters_in_progress_pool += in_pool;
806

807 808
    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
809 810
    while(sleep_cb_dat.is_asleep)
        ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
811 812
    ABT_mutex_unlock(sleep_cb_dat.mutex);

813 814 815 816
    /* clean up */
    ABT_mutex_free(&sleep_cb_dat.mutex);
    ABT_cond_free(&sleep_cb_dat.cond);

817 818 819
    return;
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
/* returns 1 if current xstream is in the progress pool, 0 if not */
static int margo_xstream_is_in_progress_pool(margo_instance_id mid)
{
    int ret;
    ABT_xstream xstream;
    ABT_pool pool;

    ret = ABT_xstream_self(&xstream);
    assert(ret == ABT_SUCCESS);
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    assert(ret == ABT_SUCCESS);

    if(pool == mid->progress_pool)
        return(1);
    else
        return(0);
}
Philip Carns's avatar
Philip Carns committed
837

838 839 840 841 842 843 844 845 846
static void margo_rpc_data_free(void* ptr)
{
	struct margo_rpc_data* data = (struct margo_rpc_data*) ptr;
	if(data->user_data && data->user_free_callback) {
		data->user_free_callback(data->user_data);
	}
	free(ptr);
}

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
int margo_lookup_mplex(margo_instance_id mid, hg_id_t id, uint32_t mplex_id, ABT_pool *pool)
{
    struct mplex_key key;
    struct mplex_element *element;

    if(!mplex_id)
    {
        *pool = mid->handler_pool;
        return(0);
    }

    memset(&key, 0, sizeof(key));
    key.id = id;
    key.mplex_id = mplex_id;

    HASH_FIND(hh, mid->mplex_table, &key, sizeof(key), element);
    if(!element)
        return(-1);

Philip Carns's avatar
Philip Carns committed
866 867
    assert(element->key.id == id && element->key.mplex_id == mplex_id);

868 869 870 871 872
    *pool = element->pool;

    return(0);
}

873 874 875 876 877 878 879 880 881 882 883
int margo_register(margo_instance_id mid, hg_id_t id)
{
	/* register the margo data with the RPC */
	struct margo_rpc_data* margo_data = (struct margo_rpc_data*)malloc(sizeof(struct margo_rpc_data));
	margo_data->mid = mid;
	margo_data->user_data = NULL;
	margo_data->user_free_callback = NULL;
	hg_return_t ret = HG_Register_data(margo_get_class(mid), id, margo_data, margo_rpc_data_free);
	return ret;
}

Philip Carns's avatar
Philip Carns committed
884 885
int margo_register_mplex(margo_instance_id mid, hg_id_t id, uint32_t mplex_id, ABT_pool pool)
{
886 887 888
    struct mplex_key key;
    struct mplex_element *element;

889 890 891 892 893 894 895 896 897
	/* register the margo data with the RPC */
	struct margo_rpc_data* margo_data = (struct margo_rpc_data*)malloc(sizeof(struct margo_rpc_data));
	margo_data->mid = mid;
	margo_data->user_data = NULL;
	margo_data->user_free_callback = NULL;
	hg_return_t ret = HG_Register_data(margo_get_class(mid), id, margo_data, margo_rpc_data_free);
	if(ret != HG_SUCCESS) 
		return ret;

898 899 900
    /* nothing to do, we'll let the handler pool take this directly */
    if(mplex_id == MARGO_DEFAULT_MPLEX_ID)
        return(0);
901

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
    memset(&key, 0, sizeof(key));
    key.id = id;
    key.mplex_id = mplex_id;

    HASH_FIND(hh, mid->mplex_table, &key, sizeof(key), element);
    if(element)
        return(0);

    element = malloc(sizeof(*element));
    if(!element)
        return(-1);
    element->key = key;
    element->pool = pool;

    HASH_ADD(hh, mid->mplex_table, key, sizeof(key), element);

    return(0);
Philip Carns's avatar
Philip Carns committed
919
}