waitall.c 10.7 KB
Newer Older
1
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2
3
4
5
6
7
8
9
10
/*
 *
 *  (C) 2001 by Argonne National Laboratory.
 *      See COPYRIGHT in top-level directory.
 */

#include "mpiimpl.h"

#if !defined(MPID_REQUEST_PTR_ARRAY_SIZE)
11
12
/* use a larger default size of 64 in order to enhance SQMR performance */
#define MPID_REQUEST_PTR_ARRAY_SIZE 64
13
14
15
16
17
18
19
20
21
#endif

/* -- Begin Profiling Symbol Block for routine MPI_Waitall */
#if defined(HAVE_PRAGMA_WEAK)
#pragma weak MPI_Waitall = PMPI_Waitall
#elif defined(HAVE_PRAGMA_HP_SEC_DEF)
#pragma _HP_SECONDARY_DEF PMPI_Waitall  MPI_Waitall
#elif defined(HAVE_PRAGMA_CRI_DUP)
#pragma _CRI duplicate MPI_Waitall as PMPI_Waitall
22
23
#elif defined(HAVE_WEAK_ATTRIBUTE)
int MPI_Waitall(int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[]) __attribute__((weak,alias("PMPI_Waitall")));
24
25
26
27
28
29
30
31
32
33
#endif
/* -- End Profiling Symbol Block */

/* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build
   the MPI routines */
#ifndef MPICH_MPI_FROM_PMPI
#undef MPI_Waitall
#define MPI_Waitall PMPI_Waitall

#undef FUNCNAME
34
35
36
37
38
#define FUNCNAME MPIR_Waitall_impl
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
int MPIR_Waitall_impl(int count, MPI_Request array_of_requests[],
                      MPI_Status array_of_statuses[])
39
{
40
    int mpi_errno = MPI_SUCCESS;
41
42
43
44
    MPID_Request * request_ptr_array[MPID_REQUEST_PTR_ARRAY_SIZE];
    MPID_Request ** request_ptrs = request_ptr_array;
    MPI_Status * status_ptr;
    MPID_Progress_state progress_state;
45
46
    int i, j;
    int n_completed;
47
48
    int active_flag;
    int rc;
49
    int n_greqs;
50
    int proc_failure = 0;
51
    const int ignoring_statuses = (array_of_statuses == MPI_STATUSES_IGNORE);
52
    int optimize = ignoring_statuses; /* see NOTE-O1 */
53
    MPIU_CHKLMEM_DECL(1);
54

55
56
57
58
59
60
    /* Convert MPI request handles to a request object pointers */
    if (count > MPID_REQUEST_PTR_ARRAY_SIZE)
    {
	MPIU_CHKLMEM_MALLOC(request_ptrs, MPID_Request **, count * sizeof(MPID_Request *), mpi_errno, "request pointers");
    }

61
    n_greqs = 0;
62
63
64
65
66
67
68
69
70
71
72
73
    n_completed = 0;
    for (i = 0; i < count; i++)
    {
	if (array_of_requests[i] != MPI_REQUEST_NULL)
	{
	    MPID_Request_get_ptr(array_of_requests[i], request_ptrs[i]);
	    /* Validate object pointers if error checking is enabled */
#           ifdef HAVE_ERROR_CHECKING
	    {
		MPID_BEGIN_ERROR_CHECKS;
		{
		    MPID_Request_valid_ptr( request_ptrs[i], mpi_errno );
74
                    if (mpi_errno) MPIU_ERR_POP(mpi_errno);
75
                    MPIU_ERR_CHKANDJUMP1((request_ptrs[i]->kind == MPID_REQUEST_MPROBE),
76
                                         mpi_errno, MPI_ERR_ARG, "**msgnotreq", "**msgnotreq %d", i);
77
78
79
		}
		MPID_END_ERROR_CHECKS;
	    }
80
#           endif
81
82
83
84
85
86
            if (request_ptrs[i]->kind != MPID_REQUEST_RECV &&
                request_ptrs[i]->kind != MPID_REQUEST_SEND)
            {
                optimize = FALSE;
            }

87
88
            if (request_ptrs[i]->kind == MPID_UREQUEST)
                ++n_greqs;
89
90
91
92
93
94
95
	}
	else
	{
	    status_ptr = (array_of_statuses != MPI_STATUSES_IGNORE) ? &array_of_statuses[i] : MPI_STATUS_IGNORE;
	    MPIR_Status_set_empty(status_ptr);
	    request_ptrs[i] = NULL;
	    n_completed += 1;
96
            optimize = FALSE;
97
98
99
100
101
102
103
104
	}
    }
    
    if (n_completed == count)
    {
	goto fn_exit;
    }

105
106
107
108
109
110
111
    /* NOTE-O1: high-message-rate optimization.  For simple send and recv
     * operations and MPI_STATUSES_IGNORE we use a fastpath approach that strips
     * out as many unnecessary jumps and error handling as possible.
     *
     * Possible variation: permit request_ptrs[i]==NULL at the cost of an
     * additional branch inside the for-loop below. */
    if (optimize) {
112
        MPID_Progress_start(&progress_state);
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
        for (i = 0; i < count; ++i) {
            while (!MPID_Request_is_complete(request_ptrs[i])) {
                mpi_errno = MPID_Progress_wait(&progress_state);
                /* must check and handle the error, can't guard with HAVE_ERROR_CHECKING, but it's
                 * OK for the error case to be slower */
                if (unlikely(mpi_errno)) {
                    /* --BEGIN ERROR HANDLING-- */
                    MPID_Progress_end(&progress_state);
                    MPIU_ERR_POP(mpi_errno);
                    /* --END ERROR HANDLING-- */
                }
            }
            mpi_errno = MPIR_Request_complete_fastpath(&array_of_requests[i], request_ptrs[i]);
            if (mpi_errno) MPIU_ERR_POP(mpi_errno);
        }

129
130
        MPID_Progress_end(&progress_state);

131
132
133
134
135
        goto fn_exit;
    }

    /* ------ "slow" code path below ------ */

136
137
    /* Grequest_waitall may run the progress engine - thus, we don't 
       invoke progress_start until after running Grequest_waitall */
138
139
140
    /* first, complete any generalized requests */
    if (n_greqs)
    {
141
        mpi_errno = MPIR_Grequest_waitall(count, request_ptrs);
142
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);
143
144
    }
    
145
146
    MPID_Progress_start(&progress_state);

147
    for (i = 0; i < count; i++)
148
    {
149
150
151
152
153
154
155
        if (request_ptrs[i] == NULL)
        {
            if (!ignoring_statuses)
                array_of_statuses[i].MPI_ERROR = MPI_SUCCESS;
            continue;
        }
        
156
        /* wait for ith request to complete */
157
        while (!MPID_Request_is_complete(request_ptrs[i]) && !MPID_Request_is_pending_failure(request_ptrs[i]))
158
159
160
161
162
        {
            /* generalized requests should already be finished */
            MPIU_Assert(request_ptrs[i]->kind != MPID_UREQUEST);
            
            mpi_errno = MPID_Progress_wait(&progress_state);
163
            if (mpi_errno != MPI_SUCCESS) {
164
165
                /* --BEGIN ERROR HANDLING-- */
                MPID_Progress_end(&progress_state);
166
                MPIU_ERR_POP(mpi_errno);
167
168
169
170
                /* --END ERROR HANDLING-- */
            }
        }

171
172
173
174
175
176
177
178
179
180
        if (MPID_Request_is_complete(request_ptrs[i])) {
            /* complete the request and check the status */
            status_ptr = (ignoring_statuses) ? MPI_STATUS_IGNORE : &array_of_statuses[i];
            rc = MPIR_Request_complete(&array_of_requests[i], request_ptrs[i], status_ptr, &active_flag);
        } else {
            /* If the request isn't complete, it's because it's pending due
             * to a failure so set the rc accordingly. */
            rc = request_ptrs[i]->status.MPI_ERROR;
            proc_failure = 1;
        }
181
182
        if (rc == MPI_SUCCESS)
        {
183
            request_ptrs[i] = NULL;
184
185
            if (!ignoring_statuses)
                status_ptr->MPI_ERROR = MPI_SUCCESS;
186
187
188
189
190
        }
        else
        {
            /* req completed with an error */
            mpi_errno = MPI_ERR_IN_STATUS;
191
192
193
194
195
196

            if (!proc_failure) {
                if (MPIX_ERR_PROC_FAILED == MPIR_ERR_GET_CLASS(rc))
                    proc_failure = 1;
            }

197
            if (!ignoring_statuses)
198
199
200
201
202
203
204
            {
                /* set the error code for this request */
                status_ptr->MPI_ERROR = rc;

                /* set the error codes for the rest of the uncompleted requests to PENDING */
                for (j = i+1; j < count; ++j)
                {
205
206
207
208
209
210
211
212
213
                    if (!ignoring_statuses)
                    {
                        if (request_ptrs[j] == NULL)
                        {
                            /* either the user specified MPI_REQUEST_NULL, or this is a completed greq */
                            array_of_statuses[j].MPI_ERROR = MPI_SUCCESS;
                        }
                        else
                        {
214
215
216
217
                            if (!proc_failure)
                                array_of_statuses[j].MPI_ERROR = MPI_ERR_PENDING;
                            else
                                array_of_statuses[j].MPI_ERROR = MPIX_ERR_PROC_FAILED_PENDING;
218
                        }
219
                    }
220
221
222
                }
            }
            break;
223
        }
224
225
    }
    MPID_Progress_end(&progress_state);
226
        
227
 fn_exit:
228
229
     if (count > MPID_REQUEST_PTR_ARRAY_SIZE)
    {
230
231
232
	MPIU_CHKLMEM_FREEALL();
    }

233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
   return mpi_errno;
 fn_fail:
    goto fn_exit;
}

#endif

#undef FUNCNAME
#define FUNCNAME MPI_Waitall
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
/*@
    MPI_Waitall - Waits for all given MPI Requests to complete

Input Parameters:
+ count - list length (integer) 
- array_of_requests - array of request handles (array of handles)

251
Output Parameters:
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
. array_of_statuses - array of status objects (array of Statuses).  May be
  'MPI_STATUSES_IGNORE'.

Notes:

If one or more of the requests completes with an error, 'MPI_ERR_IN_STATUS' is
returned.  An error value will be present is elements of 'array_of_status'
associated with the requests.  Likewise, the 'MPI_ERROR' field in the status
elements associated with requests that have successfully completed will be
'MPI_SUCCESS'.  Finally, those requests that have not completed will have a 
value of 'MPI_ERR_PENDING'.

While it is possible to list a request handle more than once in the
array_of_requests, such an action is considered erroneous and may cause the
program to unexecpectedly terminate or produce incorrect results.

.N waitstatus

.N ThreadSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_REQUEST
.N MPI_ERR_ARG
.N MPI_ERR_IN_STATUS
@*/
int MPI_Waitall(int count, MPI_Request array_of_requests[], 
		MPI_Status array_of_statuses[])
{
    int mpi_errno = MPI_SUCCESS;
    MPID_MPI_STATE_DECL(MPID_STATE_MPI_WAITALL);

    MPIR_ERRTEST_INITIALIZED_ORDIE();
    
    MPIU_THREAD_CS_ENTER(ALLFUNC,);
    MPID_MPI_PT2PT_FUNC_ENTER(MPID_STATE_MPI_WAITALL);

    /* Check the arguments */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
296
            int i;
297
298
299
300
301
302
303
304
305
	    MPIR_ERRTEST_COUNT(count, mpi_errno);

	    if (count != 0) {
		MPIR_ERRTEST_ARGNULL(array_of_requests, "array_of_requests", mpi_errno);
		/* NOTE: MPI_STATUSES_IGNORE != NULL */
	    
		MPIR_ERRTEST_ARGNULL(array_of_statuses, "array_of_statuses", mpi_errno);
	    }

306
307
	    for (i = 0; i < count; i++) {
		MPIR_ERRTEST_ARRAYREQUEST_OR_NULL(array_of_requests[i], i, mpi_errno);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
	    }
	}
        MPID_END_ERROR_CHECKS;
    }
#   endif /* HAVE_ERROR_CHECKING */
    
    /* ... body of routine ...  */

    mpi_errno = MPIR_Waitall_impl(count, array_of_requests, array_of_statuses);
    if (mpi_errno) goto fn_fail;

    /* ... end of body of routine ... */
    
 fn_exit:
322
    MPID_MPI_PT2PT_FUNC_EXIT(MPID_STATE_MPI_WAITALL);
323
    MPIU_THREAD_CS_EXIT(ALLFUNC,);
324
325
    return mpi_errno;

326
 fn_fail:
327
328
    /* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
329
    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE,
330
				     FCNAME, __LINE__, MPI_ERR_OTHER,
331
332
333
				     "**mpi_waitall",
				     "**mpi_waitall %d %p %p",
				     count, array_of_requests,
334
335
336
337
338
339
				     array_of_statuses);
#endif
    mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno);
    goto fn_exit;
    /* --END ERROR HANDLING-- */
}