client.c 6.32 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <stdio.h>
#include <assert.h>
#include <unistd.h>
#include <abt.h>
Philip Carns's avatar
Philip Carns committed
11 12
#include <abt-snoozer.h>
#include <margo.h>
13 14 15 16 17 18 19 20 21 22

#include "my-rpc.h"

/* This is an example client program that issues 4 concurrent RPCs, each of
 * which includes a bulk transfer driven by the server.
 *
 * Each client operation executes as an independent ULT in Argobots.
 * The HG forward call is executed using asynchronous operations.
 */

23 24 25 26
struct run_my_rpc_args
{
    int val;
    margo_instance_id mid;
27 28
    hg_context_t *hg_context;
    hg_class_t *hg_class;
29
    hg_addr_t svr_addr;
30 31
};

32 33 34
static void run_my_rpc(void *_arg);

static hg_id_t my_rpc_id;
35
static hg_id_t my_rpc_shutdown_id;
36 37 38

int main(int argc, char **argv) 
{
39
    struct run_my_rpc_args args[4];
40 41 42 43 44
    ABT_thread threads[4];
    int i;
    int ret;
    ABT_xstream xstream;
    ABT_pool pool;
45
    margo_instance_id mid;
46 47
    ABT_xstream progress_xstream;
    ABT_pool progress_pool;
48 49
    hg_context_t *hg_context;
    hg_class_t *hg_class;
50
    hg_addr_t svr_addr = HG_ADDR_NULL;
51
    hg_handle_t handle;
52 53 54 55 56 57 58
  
    if(argc != 2)
    {
        fprintf(stderr, "Usage: ./client <server_addr>\n");
        return(-1);
    }
       
59
    /* boilerplate HG initialization steps */
60
    /***************************************/
61 62 63 64 65
    /* NOTE: the reason for passing in the server address into HG_Init() on
     * the client is just to make sure that Mercury initializes the right
     * transport.
     */
    hg_class = HG_Init(argv[1], HG_FALSE);
66 67 68 69 70 71 72 73 74 75 76 77 78 79
    if(!hg_class)
    {
        fprintf(stderr, "Error: HG_Init()\n");
        return(-1);
    }
    hg_context = HG_Context_create(hg_class);
    if(!hg_context)
    {
        fprintf(stderr, "Error: HG_Context_create()\n");
        HG_Finalize(hg_class);
        return(-1);
    }

    /* set up argobots */
80
    /***************************************/
81 82 83 84 85 86 87
    ret = ABT_init(argc, argv);
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_init()\n");
        return(-1);
    }

Philip Carns's avatar
Philip Carns committed
88 89
    /* set primary ES to idle without polling */
    ret = ABT_snoozer_xstream_self_set();
Philip Carns's avatar
Philip Carns committed
90
    if(ret != 0)
91
    {
Philip Carns's avatar
Philip Carns committed
92
        fprintf(stderr, "Error: ABT_snoozer_xstream_self_set()\n");
93 94 95
        return(-1);
    }

Philip Carns's avatar
Philip Carns committed
96 97
    /* retrieve current pool to use for ULT creation */
    ret = ABT_xstream_self(&xstream);
Philip Carns's avatar
Philip Carns committed
98
    if(ret != 0)
99
    {
Philip Carns's avatar
Philip Carns committed
100
        fprintf(stderr, "Error: ABT_xstream_self()\n");
101 102
        return(-1);
    }
Philip Carns's avatar
Philip Carns committed
103
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
Philip Carns's avatar
Philip Carns committed
104
    if(ret != 0)
105
    {
Philip Carns's avatar
Philip Carns committed
106
        fprintf(stderr, "Error: ABT_xstream_get_main_pools()\n");
107 108 109
        return(-1);
    }

110 111 112 113 114 115 116 117
    /* create a dedicated ES drive Mercury progress */
    ret = ABT_snoozer_xstream_create(1, &progress_pool, &progress_xstream);
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_snoozer_xstream_create()\n");
        return(-1);
    }

118
    /* actually start margo */
119 120 121 122
    /* provide argobots pools for driving communication progress and
     * executing rpc handlers as well as class and context for Mercury
     * communication.  The rpc handler pool is null in this example program
     * because this is a pure client that will not be servicing rpc requests.
123
     */
124
    /***************************************/
125
    mid = margo_init(progress_pool, ABT_POOL_NULL, hg_context, hg_class);
126 127

    /* register RPC */
128
    my_rpc_id = MERCURY_REGISTER(hg_class, "my_rpc", my_rpc_in_t, my_rpc_out_t, 
129 130 131
        NULL);
    my_rpc_shutdown_id = MERCURY_REGISTER(hg_class, "my_shutdown_rpc", void, void, 
        NULL);
132

133 134 135 136
    /* find addr for server */
    ret = margo_addr_lookup(mid, hg_context, argv[1], &svr_addr);
    assert(ret == 0);

137 138
    for(i=0; i<4; i++)
    {
139 140
        args[i].val = i;
        args[i].mid = mid;
141 142
        args[i].hg_class = hg_class;
        args[i].hg_context = hg_context;
143
        args[i].svr_addr = svr_addr;
144

145
        /* Each fiber gets a pointer to an element of the array to use
146 147
         * as input for the run_my_rpc() function.
         */
148
        ret = ABT_thread_create(pool, run_my_rpc, &args[i],
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
            ABT_THREAD_ATTR_NULL, &threads[i]);
        if(ret != 0)
        {
            fprintf(stderr, "Error: ABT_thread_create()\n");
            return(-1);
        }

    }

    /* yield to one of the threads */
    ABT_thread_yield_to(threads[0]);

    for(i=0; i<4; i++)
    {
        ret = ABT_thread_join(threads[i]);
        if(ret != 0)
        {
            fprintf(stderr, "Error: ABT_thread_join()\n");
            return(-1);
        }
        ret = ABT_thread_free(&threads[i]);
        if(ret != 0)
        {
            fprintf(stderr, "Error: ABT_thread_join()\n");
            return(-1);
        }
    }

177 178 179 180 181 182 183 184
    /* send one rpc to server to shut it down */

    /* create handle */
    ret = HG_Create(hg_context, svr_addr, my_rpc_shutdown_id, &handle);
    assert(ret == 0);

    margo_forward(mid, handle, NULL);

185
    /* shut down everything */
186
    margo_finalize(mid);
187 188 189 190
    
    ABT_xstream_join(progress_xstream);
    ABT_xstream_free(&progress_xstream);

191 192
    ABT_finalize();

193 194 195
    HG_Context_destroy(hg_context);
    HG_Finalize(hg_class);

196 197 198 199 200
    return(0);
}

static void run_my_rpc(void *_arg)
{
201
    struct run_my_rpc_args *arg = _arg;
202 203 204 205 206 207 208 209
    hg_handle_t handle;
    my_rpc_in_t in;
    my_rpc_out_t out;
    int ret;
    hg_size_t size;
    void* buffer;
    struct hg_info *hgi;

210
    printf("ULT [%d] running.\n", arg->val);
211 212 213 214 215 216 217 218

    /* allocate buffer for bulk transfer */
    size = 512;
    buffer = calloc(1, 512);
    assert(buffer);
    sprintf((char*)buffer, "Hello world!\n");

    /* create handle */
219
    ret = HG_Create(arg->hg_context, arg->svr_addr, my_rpc_id, &handle);
220 221 222 223 224
    assert(ret == 0);

    /* register buffer for rdma/bulk access by server */
    hgi = HG_Get_info(handle);
    assert(hgi);
225
    ret = HG_Bulk_create(hgi->hg_class, 1, &buffer, &size, 
226 227 228 229 230 231
        HG_BULK_READ_ONLY, &in.bulk_handle);
    assert(ret == 0);

    /* Send rpc. Note that we are also transmitting the bulk handle in the
     * input struct.  It was set above. 
     */ 
232 233
    in.input_val = arg->val;
    margo_forward(arg->mid, handle, &in);
234 235 236 237 238 239 240 241 242 243 244 245 246

    /* decode response */
    ret = HG_Get_output(handle, &out);
    assert(ret == 0);

    printf("Got response ret: %d\n", out.ret);

    /* clean up resources consumed by this rpc */
    HG_Bulk_free(in.bulk_handle);
    HG_Free_output(handle, &out);
    HG_Destroy(handle);
    free(buffer);

247
    printf("ULT [%d] done.\n", arg->val);
248 249 250
    return;
}