waitall.c 9.81 KB
Newer Older
1
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
2
3
4
5
6
7
8
9
10
/*
 *
 *  (C) 2001 by Argonne National Laboratory.
 *      See COPYRIGHT in top-level directory.
 */

#include "mpiimpl.h"

#if !defined(MPID_REQUEST_PTR_ARRAY_SIZE)
11
12
/* use a larger default size of 64 in order to enhance SQMR performance */
#define MPID_REQUEST_PTR_ARRAY_SIZE 64
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#endif

/* -- Begin Profiling Symbol Block for routine MPI_Waitall */
#if defined(HAVE_PRAGMA_WEAK)
#pragma weak MPI_Waitall = PMPI_Waitall
#elif defined(HAVE_PRAGMA_HP_SEC_DEF)
#pragma _HP_SECONDARY_DEF PMPI_Waitall  MPI_Waitall
#elif defined(HAVE_PRAGMA_CRI_DUP)
#pragma _CRI duplicate MPI_Waitall as PMPI_Waitall
#endif
/* -- End Profiling Symbol Block */

/* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build
   the MPI routines */
#ifndef MPICH_MPI_FROM_PMPI
#undef MPI_Waitall
#define MPI_Waitall PMPI_Waitall

#undef FUNCNAME
32
33
34
35
36
#define FUNCNAME MPIR_Waitall_impl
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
int MPIR_Waitall_impl(int count, MPI_Request array_of_requests[],
                      MPI_Status array_of_statuses[])
37
{
38
    int mpi_errno = MPI_SUCCESS;
39
40
41
42
    MPID_Request * request_ptr_array[MPID_REQUEST_PTR_ARRAY_SIZE];
    MPID_Request ** request_ptrs = request_ptr_array;
    MPI_Status * status_ptr;
    MPID_Progress_state progress_state;
43
44
    int i, j;
    int n_completed;
45
46
    int active_flag;
    int rc;
47
48
    int n_greqs;
    const int ignoring_statuses = (array_of_statuses == MPI_STATUSES_IGNORE);
49
    int optimize = ignoring_statuses; /* see NOTE-O1 */
50
    MPIU_CHKLMEM_DECL(1);
51

52
53
54
55
56
57
    /* Convert MPI request handles to a request object pointers */
    if (count > MPID_REQUEST_PTR_ARRAY_SIZE)
    {
	MPIU_CHKLMEM_MALLOC(request_ptrs, MPID_Request **, count * sizeof(MPID_Request *), mpi_errno, "request pointers");
    }

58
    n_greqs = 0;
59
60
61
62
63
64
65
66
67
68
69
70
    n_completed = 0;
    for (i = 0; i < count; i++)
    {
	if (array_of_requests[i] != MPI_REQUEST_NULL)
	{
	    MPID_Request_get_ptr(array_of_requests[i], request_ptrs[i]);
	    /* Validate object pointers if error checking is enabled */
#           ifdef HAVE_ERROR_CHECKING
	    {
		MPID_BEGIN_ERROR_CHECKS;
		{
		    MPID_Request_valid_ptr( request_ptrs[i], mpi_errno );
71
                    if (mpi_errno) MPIU_ERR_POP(mpi_errno);
72
                    MPIU_ERR_CHKANDJUMP1((request_ptrs[i]->kind == MPID_REQUEST_MPROBE),
73
                                         mpi_errno, MPI_ERR_ARG, "**msgnotreq", "**msgnotreq %d", i);
74
75
76
		}
		MPID_END_ERROR_CHECKS;
	    }
77
#           endif
78
79
80
81
82
83
            if (request_ptrs[i]->kind != MPID_REQUEST_RECV &&
                request_ptrs[i]->kind != MPID_REQUEST_SEND)
            {
                optimize = FALSE;
            }

84
85
            if (request_ptrs[i]->kind == MPID_UREQUEST)
                ++n_greqs;
86
87
88
89
90
91
92
	}
	else
	{
	    status_ptr = (array_of_statuses != MPI_STATUSES_IGNORE) ? &array_of_statuses[i] : MPI_STATUS_IGNORE;
	    MPIR_Status_set_empty(status_ptr);
	    request_ptrs[i] = NULL;
	    n_completed += 1;
93
            optimize = FALSE;
94
95
96
97
98
99
100
101
	}
    }
    
    if (n_completed == count)
    {
	goto fn_exit;
    }

102
103
104
105
106
107
108
    /* NOTE-O1: high-message-rate optimization.  For simple send and recv
     * operations and MPI_STATUSES_IGNORE we use a fastpath approach that strips
     * out as many unnecessary jumps and error handling as possible.
     *
     * Possible variation: permit request_ptrs[i]==NULL at the cost of an
     * additional branch inside the for-loop below. */
    if (optimize) {
109
        MPID_Progress_start(&progress_state);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
        for (i = 0; i < count; ++i) {
            while (!MPID_Request_is_complete(request_ptrs[i])) {
                mpi_errno = MPID_Progress_wait(&progress_state);
                /* must check and handle the error, can't guard with HAVE_ERROR_CHECKING, but it's
                 * OK for the error case to be slower */
                if (unlikely(mpi_errno)) {
                    /* --BEGIN ERROR HANDLING-- */
                    MPID_Progress_end(&progress_state);
                    MPIU_ERR_POP(mpi_errno);
                    /* --END ERROR HANDLING-- */
                }
            }
            mpi_errno = MPIR_Request_complete_fastpath(&array_of_requests[i], request_ptrs[i]);
            if (mpi_errno) MPIU_ERR_POP(mpi_errno);
        }

126
127
        MPID_Progress_end(&progress_state);

128
129
130
131
132
        goto fn_exit;
    }

    /* ------ "slow" code path below ------ */

133
134
    /* Grequest_waitall may run the progress engine - thus, we don't 
       invoke progress_start until after running Grequest_waitall */
135
136
137
    /* first, complete any generalized requests */
    if (n_greqs)
    {
138
        mpi_errno = MPIR_Grequest_waitall(count, request_ptrs);
139
        if (mpi_errno) MPIU_ERR_POP(mpi_errno);
140
141
    }
    
142
143
    MPID_Progress_start(&progress_state);

144
    for (i = 0; i < count; i++)
145
    {
146
147
148
149
150
151
152
        if (request_ptrs[i] == NULL)
        {
            if (!ignoring_statuses)
                array_of_statuses[i].MPI_ERROR = MPI_SUCCESS;
            continue;
        }
        
153
        /* wait for ith request to complete */
154
        while (!MPID_Request_is_complete(request_ptrs[i]))
155
156
157
158
159
        {
            /* generalized requests should already be finished */
            MPIU_Assert(request_ptrs[i]->kind != MPID_UREQUEST);
            
            mpi_errno = MPID_Progress_wait(&progress_state);
160
            if (mpi_errno != MPI_SUCCESS) {
161
162
                /* --BEGIN ERROR HANDLING-- */
                MPID_Progress_end(&progress_state);
163
                MPIU_ERR_POP(mpi_errno);
164
165
166
167
168
                /* --END ERROR HANDLING-- */
            }
        }

        /* complete the request and check the status */
169
        status_ptr = (ignoring_statuses) ? MPI_STATUS_IGNORE : &array_of_statuses[i];
170
        rc = MPIR_Request_complete(&array_of_requests[i], request_ptrs[i], status_ptr, &active_flag);
171
172
        if (rc == MPI_SUCCESS)
        {
173
            request_ptrs[i] = NULL;
174
175
            if (!ignoring_statuses)
                status_ptr->MPI_ERROR = MPI_SUCCESS;
176
177
178
179
180
        }
        else
        {
            /* req completed with an error */
            mpi_errno = MPI_ERR_IN_STATUS;
181
            if (!ignoring_statuses)
182
183
184
185
186
187
188
            {
                /* set the error code for this request */
                status_ptr->MPI_ERROR = rc;

                /* set the error codes for the rest of the uncompleted requests to PENDING */
                for (j = i+1; j < count; ++j)
                {
189
190
191
192
193
194
195
196
197
198
199
                    if (!ignoring_statuses)
                    {
                        if (request_ptrs[j] == NULL)
                        {
                            /* either the user specified MPI_REQUEST_NULL, or this is a completed greq */
                            array_of_statuses[j].MPI_ERROR = MPI_SUCCESS;
                        }
                        else
                        {
                            array_of_statuses[j].MPI_ERROR = MPI_ERR_PENDING;
                        }
200
                    }
201
202
203
                }
            }
            break;
204
        }
205
206
    }
    MPID_Progress_end(&progress_state);
207
        
208
 fn_exit:
209
210
     if (count > MPID_REQUEST_PTR_ARRAY_SIZE)
    {
211
212
213
	MPIU_CHKLMEM_FREEALL();
    }

214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
   return mpi_errno;
 fn_fail:
    goto fn_exit;
}

#endif

#undef FUNCNAME
#define FUNCNAME MPI_Waitall
#undef FCNAME
#define FCNAME MPIU_QUOTE(FUNCNAME)
/*@
    MPI_Waitall - Waits for all given MPI Requests to complete

Input Parameters:
+ count - list length (integer) 
- array_of_requests - array of request handles (array of handles)

Output Parameter:
. array_of_statuses - array of status objects (array of Statuses).  May be
  'MPI_STATUSES_IGNORE'.

Notes:

If one or more of the requests completes with an error, 'MPI_ERR_IN_STATUS' is
returned.  An error value will be present is elements of 'array_of_status'
associated with the requests.  Likewise, the 'MPI_ERROR' field in the status
elements associated with requests that have successfully completed will be
'MPI_SUCCESS'.  Finally, those requests that have not completed will have a 
value of 'MPI_ERR_PENDING'.

While it is possible to list a request handle more than once in the
array_of_requests, such an action is considered erroneous and may cause the
program to unexecpectedly terminate or produce incorrect results.

.N waitstatus

.N ThreadSafe

.N Fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_REQUEST
.N MPI_ERR_ARG
.N MPI_ERR_IN_STATUS
@*/
int MPI_Waitall(int count, MPI_Request array_of_requests[], 
		MPI_Status array_of_statuses[])
{
    int mpi_errno = MPI_SUCCESS;
    MPID_MPI_STATE_DECL(MPID_STATE_MPI_WAITALL);

    MPIR_ERRTEST_INITIALIZED_ORDIE();
    
    MPIU_THREAD_CS_ENTER(ALLFUNC,);
    MPID_MPI_PT2PT_FUNC_ENTER(MPID_STATE_MPI_WAITALL);

    /* Check the arguments */
#   ifdef HAVE_ERROR_CHECKING
    {
        MPID_BEGIN_ERROR_CHECKS;
        {
277
            int i;
278
279
280
281
282
283
284
285
286
	    MPIR_ERRTEST_COUNT(count, mpi_errno);

	    if (count != 0) {
		MPIR_ERRTEST_ARGNULL(array_of_requests, "array_of_requests", mpi_errno);
		/* NOTE: MPI_STATUSES_IGNORE != NULL */
	    
		MPIR_ERRTEST_ARGNULL(array_of_statuses, "array_of_statuses", mpi_errno);
	    }

287
288
	    for (i = 0; i < count; i++) {
		MPIR_ERRTEST_ARRAYREQUEST_OR_NULL(array_of_requests[i], i, mpi_errno);
289
290
291
292
293
294
295
296
297
298
299
300
301
302
	    }
	}
        MPID_END_ERROR_CHECKS;
    }
#   endif /* HAVE_ERROR_CHECKING */
    
    /* ... body of routine ...  */

    mpi_errno = MPIR_Waitall_impl(count, array_of_requests, array_of_statuses);
    if (mpi_errno) goto fn_fail;

    /* ... end of body of routine ... */
    
 fn_exit:
303
    MPID_MPI_PT2PT_FUNC_EXIT(MPID_STATE_MPI_WAITALL);
304
    MPIU_THREAD_CS_EXIT(ALLFUNC,);
305
306
    return mpi_errno;

307
 fn_fail:
308
309
    /* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
310
    mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE,
311
				     FCNAME, __LINE__, MPI_ERR_OTHER,
312
313
314
				     "**mpi_waitall",
				     "**mpi_waitall %d %p %p",
				     count, array_of_requests,
315
316
317
318
319
320
				     array_of_statuses);
#endif
    mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno);
    goto fn_exit;
    /* --END ERROR HANDLING-- */
}