margo.c 12 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
bug fix  
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19

20 21 22 23 24
/* TODO: including core.h for cancel definition, presumably this will be 
 * available in top level later?
 */
#include <mercury_core.h>

25 26
struct margo_instance
{
27
    /* provided by caller */
28 29
    hg_context_t *hg_context;
    hg_class_t *hg_class;
30 31 32
    ABT_pool handler_pool;
    ABT_pool progress_pool;

33
    /* internal to margo for this particular instance */
34 35
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
36 37 38 39 40 41 42

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
    int finalize_waiters_in_progress_pool;
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

43 44 45 46 47 48 49 50 51 52 53 54
    int table_index;
};

struct margo_handler_mapping
{
    hg_class_t *class;
    margo_instance_id mid;
};

#define MAX_HANDLER_MAPPING 8
static int handler_mapping_table_size = 0;
static struct margo_handler_mapping handler_mapping_table[MAX_HANDLER_MAPPING] = {0};
55

56
static void hg_progress_fn(void* foo);
57 58 59 60 61 62 63 64 65


struct handler_entry
{
    void* fn;
    hg_handle_t handle;
    struct handler_entry *next; 
};

66 67
margo_instance_id margo_init(ABT_pool progress_pool, ABT_pool handler_pool,
    hg_context_t *hg_context, hg_class_t *hg_class)
68 69
{
    int ret;
70 71 72
    struct margo_instance *mid;

    if(handler_mapping_table_size >= MAX_HANDLER_MAPPING)
73
        return(MARGO_INSTANCE_NULL);
74 75 76

    mid = malloc(sizeof(*mid));
    if(!mid)
77
        return(MARGO_INSTANCE_NULL);
78
    memset(mid, 0, sizeof(*mid));
79

80 81 82
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

83 84
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
85 86
    mid->hg_class = hg_class;
    mid->hg_context = hg_context;
87

88 89 90 91 92 93 94 95
    ret = margo_timer_instance_init(mid);
    if(ret != 0)
    {
        fprintf(stderr, "Error: margo_timer_instance_init()\n");
        free(mid);
        return(MARGO_INSTANCE_NULL);
    }

96
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
97
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
98 99 100
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_thread_create()\n");
101
        free(mid);
102
        return(MARGO_INSTANCE_NULL);
103 104
    }

105 106 107 108 109 110
    handler_mapping_table[handler_mapping_table_size].mid = mid;
    handler_mapping_table[handler_mapping_table_size].class = mid->hg_class;
    mid->table_index = handler_mapping_table_size;
    handler_mapping_table_size++;

    return mid;
111 112
}

113
void margo_finalize(margo_instance_id mid)
114
{
115 116
    int i;

117
    /* tell progress thread to wrap things up */
118
    mid->hg_progress_shutdown_flag = 1;
119 120

    /* wait for it to shutdown cleanly */
121 122
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
123

124 125 126 127 128
    for(i=mid->table_index; i<(handler_mapping_table_size-1); i++)
    {
        handler_mapping_table[i] = handler_mapping_table[i+1];
    }
    handler_mapping_table_size--;
129

130 131 132 133 134 135 136 137 138 139 140 141 142
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);
    ABT_mutex_unlock(mid->finalize_mutex);

    /* TODO: yuck, there is a race here if someone was really waiting for
     * finalize; we can't destroy the data structures out from under them.
     * We could fix this by reference counting so that the last caller
     * (whether a finalize() caller or wait_for_finalize() caller) knows it
     * is safe to turn off the lights on their way out.  For now we just leak 
     * a small amount of memory.
     */
#if 0
143
    margo_timer_instance_finalize(mid);
144

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);
    free(mid);
#endif

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    ABT_xstream xstream;
    ABT_pool pool;
    int ret;
    int in_pool = 0;

    ret = ABT_xstream_self(&xstream);
    if(ret != 0)
        return;
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    if(ret != 0)
        return;

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
    if(pool == mid->progress_pool)
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

        mid->finalize_waiters_in_progress_pool += in_pool;
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

    ABT_mutex_unlock(mid->finalize_mutex);
    
182 183 184 185
    return;
}

/* dedicated thread function to drive Mercury progress */
186
static void hg_progress_fn(void* foo)
187 188 189
{
    int ret;
    unsigned int actual_count;
190
    struct margo_instance *mid = (struct margo_instance *)foo;
191
    size_t size;
192

193
    while(!mid->hg_progress_shutdown_flag)
194 195
    {
        do {
196
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
197
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
198

199
        if(!mid->hg_progress_shutdown_flag)
200
        {
201
            ABT_pool_get_total_size(mid->progress_pool, &size);
202 203 204 205 206 207
            /* Are there any other threads executing in this pool that are *not*
             * blocked on margo_wait_for_finalize()?  If so then, we can't
             * sleep here or else those threads will not get a chance to
             * execute.
             */
            if(size > mid->finalize_waiters_in_progress_pool)
208
            {
209
                HG_Progress(mid->hg_context, 0);
210 211 212 213
                ABT_thread_yield();
            }
            else
            {
214
                HG_Progress(mid->hg_context, 100);
215 216
            }
        }
217

218
        /* check for any expired timers */
219
        margo_check_timers(mid);
220 221
    }

222
    return;
223 224
}

225
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
226
{
227
    return(&mid->handler_pool);
228 229
}

230 231 232 233 234 235 236 237 238 239 240
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}


Jonathan Jenkins's avatar
Jonathan Jenkins committed
241
static hg_return_t margo_cb(const struct hg_cb_info *info)
242 243 244 245 246 247 248 249 250 251
{
    hg_return_t hret = info->ret;

    ABT_eventual *eventual = info->arg;
    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &hret, sizeof(hret));
    
    return(HG_SUCCESS);
}

252 253 254
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
255
} margo_forward_timeout_cb_dat;
256 257 258

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
259 260
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
261 262

    /* cancel the Mercury op if the forward timed out */
Shane Snyder's avatar
Shane Snyder committed
263
    HG_Core_cancel(timeout_cb_dat->handle);
264 265 266
    return;
}

267 268 269 270 271 272
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
273
    int ret;
274 275 276
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
277 278
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
279 280 281 282 283 284 285

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
286 287
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
288
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
289
        &timeout_cb_dat, timeout_ms);
290 291 292 293 294 295 296 297

    hret = HG_Forward(handle, margo_cb, &eventual, in_struct);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

298 299
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
300
        margo_timer_destroy(mid, &forward_timer);
301 302 303 304

    ABT_eventual_free(&eventual);

    return(hret);
305 306 307 308

}


309
hg_return_t margo_forward(
310
    margo_instance_id mid,
311 312 313 314 315 316 317 318 319 320 321 322 323 324
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Jonathan Jenkins's avatar
Jonathan Jenkins committed
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
    hret = HG_Forward(handle, margo_cb, &eventual, in_struct);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

    hret = HG_Respond(handle, margo_cb, &eventual, out_struct);
354 355 356 357 358 359 360 361 362 363 364
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

365

366
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
367
{
368 369
    hg_return_t hret = info->ret;
    ABT_eventual *eventual = info->arg;
370 371 372 373 374 375 376

    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &hret, sizeof(hret));
    
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
377 378
struct lookup_cb_evt
{
379 380
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
381 382
};

383
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
384 385
{
    struct lookup_cb_evt evt;
386 387
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
Philip Carns's avatar
Philip Carns committed
388

389
    ABT_eventual *eventual = info->arg;
Philip Carns's avatar
Philip Carns committed
390 391 392 393

    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &evt, sizeof(evt));
    
394
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
395 396 397
}


398
hg_return_t margo_addr_lookup(
399
    margo_instance_id mid,
400
    hg_context_t *context,
Philip Carns's avatar
Philip Carns committed
401
    const char   *name,
402
    hg_addr_t    *addr)
403
{
404
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
405
    struct lookup_cb_evt *evt;
406 407 408
    ABT_eventual eventual;
    int ret;

Philip Carns's avatar
Philip Carns committed
409
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
410 411 412 413 414
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

415 416
    nret = HG_Addr_lookup(context, margo_addr_lookup_cb,
        &eventual, name, HG_OP_ID_IGNORE);
417 418
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
419 420 421
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
422 423 424 425 426 427 428
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

429
hg_return_t margo_bulk_transfer(
430
    margo_instance_id mid,
431
    hg_context_t *context,
432
    hg_bulk_op_t op,
433
    hg_addr_t origin_addr,
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

    hret = HG_Bulk_transfer(context, margo_bulk_transfer_cb, &eventual, op, 
        origin_addr, origin_handle, origin_offset, local_handle, local_offset,
        size, HG_OP_ID_IGNORE);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
typedef struct
{
    ABT_mutex mutex;
    ABT_cond cond;
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
485
    margo_instance_id mid,
486 487 488 489 490 491 492 493 494 495
    double timeout_ms)
{
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

    /* set data needed for sleep callback */
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));

    /* initialize the sleep timer */
496
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
497 498 499 500 501 502 503 504 505 506 507
        &sleep_cb_dat, timeout_ms);

    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
    ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
    ABT_mutex_unlock(sleep_cb_dat.mutex);

    return;
}


508
margo_instance_id margo_hg_class_to_instance(hg_class_t *cl)
509 510 511 512 513
{
    int i;

    for(i=0; i<handler_mapping_table_size; i++)
    {
514
        if(handler_mapping_table[i].class == cl)
515 516 517 518
            return(handler_mapping_table[i].mid);
    }
    return(NULL);
}