Commit a5ebdf5c authored by William Gropp's avatar William Gropp
Browse files

[svn-r3855] Fixes for brief-global with ch3:sock

parent 3f9e041d
......@@ -11,8 +11,9 @@
#error 'This file requires mpichconf.h'
#endif
/* FIXME: TEMP - should make sure enable-g set this */
/* #define MPID_THREAD_DEBUG */
#if !defined(MPID_THREAD_DEBUG) && defined(MPICH_DEBUG_MUTEXNESTING)
#define MPID_THREAD_DEBUG 1
#endif
/* Rather than embed a conditional test in the MPICH2 code, we define a
single value on which we can test */
......@@ -419,13 +420,22 @@ typedef struct MPIU_ThreadDebug {
_nest_ptr[MPIUNest_##_name].file, \
_nest_ptr[MPIUNest_##_name].line,\
_nest_ptr[MPIUNest_##_name].fname ); \
fflush(stdout); \
fflush(stderr); \
}}}
#define MPIU_THREAD_UPDATEDEPTH(_name,_value) {if (1){ \
MPIU_ThreadDebug_t *_nest_ptr=0;\
MPID_Thread_tls_get( &MPIR_ThreadInfo.nest_storage, &_nest_ptr );\
if (!_nest_ptr) { _nest_ptr = (MPIU_ThreadDebug_t*)MPIU_Calloc(2,sizeof(MPIU_ThreadDebug_t));\
MPID_Thread_tls_set( &MPIR_ThreadInfo.nest_storage,_nest_ptr);}\
if (_nest_ptr[MPIUNest_##_name].count +_value< 0) {\
fprintf(stderr, "%s:%d %s = %d (<0); previously set in %s:%d(%s)\n",\
__FILE__, __LINE__, #_name, \
_nest_ptr[MPIUNest_##_name].count, \
_nest_ptr[MPIUNest_##_name].file, \
_nest_ptr[MPIUNest_##_name].line,\
_nest_ptr[MPIUNest_##_name].fname ); \
fflush(stderr); \
}\
_nest_ptr[MPIUNest_##_name].count += _value;\
_nest_ptr[MPIUNest_##_name].line = __LINE__; \
MPIU_Strncpy( _nest_ptr[MPIUNest_##_name].file, __FILE__, MPIU_THREAD_LOC_LEN ); \
......@@ -444,7 +454,7 @@ typedef struct MPIU_ThreadDebug {
#endif /* MPID_THREAD_DEBUG */
#else
#define MPIU_THREAD_CHECKNEST(_name)
#endif
#endif /* test on THREAD_GRANULARITY */
#define MPIU_THREAD_CS_ENTER_LOCKNAME(_name) \
{ \
......@@ -467,7 +477,6 @@ typedef struct MPIU_ThreadDebug {
} \
}
/* Definitions of the thread support for various levels of thread granularity */
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL
/* There is a single, global lock, held for the duration of an MPI call */
......@@ -485,6 +494,8 @@ typedef struct MPIU_ThreadDebug {
#define MPIU_THREAD_CS_EXIT_MPIDCOMM(_context)
#define MPIU_THREAD_CS_ENTER_INITFLAG(_context)
#define MPIU_THREAD_CS_EXIT_INITFLAG(_context)
#define MPIU_THREAD_CS_ENTER_PMI(_context)
#define MPIU_THREAD_CS_EXIT_PMI(_context)
#elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_BRIEF_GLOBAL
/* There is a single, global lock, held only when needed */
......@@ -519,6 +530,13 @@ typedef struct MPIU_ThreadDebug {
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_ENTER_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#define MPIU_THREAD_CS_EXIT_INITFLAG(_context) \
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_EXIT_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
/* PMI for spawn needs to be single-threaded - this allows us to add
PMI calls where no other mutex may be active. This is a temporary
fix for brief-global only */
#define MPIU_THREAD_CS_ENTER_PMI(_context) \
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_ENTER_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#define MPIU_THREAD_CS_EXIT_PMI(_context) \
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_EXIT_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_PER_OBJECT
/* There are multiple locks, one for each logical class (e.g., each type of
......@@ -578,6 +596,10 @@ typedef struct MPIU_ThreadDebug {
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_ENTER_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#define MPIU_THREAD_CS_EXIT_INITFLAG(_context) \
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_EXIT_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#define MPIU_THREAD_CS_ENTER_PMI(_context) \
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_ENTER_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#define MPIU_THREAD_CS_EXIT_PMI(_context) \
MPIU_THREAD_CHECK_BEGIN MPIU_THREAD_CS_EXIT_LOCKNAME(global_mutex) MPIU_THREAD_CHECK_END
#elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_LOCK_FREE
/* Updates to shared data and access to shared services is handled without
......
......@@ -90,7 +90,9 @@ int MPI_Init( int *argc, char ***argv )
MPIU_THREAD_SINGLE_CS_ENTER/EXIT because
MPIR_ThreadInfo.isThreaded hasn't been initialized yet.
*/
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL
MPID_CS_ENTER();
#endif
MPID_MPI_INIT_FUNC_ENTER(MPID_STATE_MPI_INIT);
# ifdef HAVE_ERROR_CHECKING
......@@ -144,7 +146,9 @@ int MPI_Init( int *argc, char ***argv )
/* ... end of body of routine ... */
MPID_MPI_INIT_FUNC_EXIT(MPID_STATE_MPI_INIT);
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL
MPID_CS_EXIT();
#endif
return mpi_errno;
fn_fail:
......
......@@ -499,7 +499,9 @@ int MPI_Init_thread( int *argc, char ***argv, int required, int *provided )
MPIR_ThreadInfo.isThreaded hasn't been initialized yet.
*/
/* */
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL
MPID_CS_ENTER();
#endif
#if 0
/* Create the thread-private region if necessary and go ahead
......@@ -532,7 +534,9 @@ int MPI_Init_thread( int *argc, char ***argv, int required, int *provided )
/* ... end of body of routine ... */
MPID_MPI_INIT_FUNC_EXIT(MPID_STATE_MPI_INIT_THREAD);
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL
MPID_CS_EXIT();
#endif
return mpi_errno;
fn_fail:
......
......@@ -163,9 +163,9 @@ int MPI_Waitall(int count, MPI_Request array_of_requests[],
{
goto fn_exit;
}
MPID_Progress_start(&progress_state);
/* Grequest_waitall may run the progress engine - thus, we don't
invoke progress_start until after running Grequest_waitall */
/* first, complete any generalized requests */
if (n_greqs)
{
......@@ -173,6 +173,8 @@ int MPI_Waitall(int count, MPI_Request array_of_requests[],
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
MPID_Progress_start(&progress_state);
for (i = 0; i < count; i++)
{
if (request_ptrs[i] == NULL)
......
......@@ -167,7 +167,7 @@ int MPIDI_Comm_spawn_multiple(int count, char **commands,
/* FIXME: info may be needed for port name */
mpi_errno = MPID_Open_port(NULL, port_name);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
if (mpi_errno != MPI_SUCCESS)
{
MPIU_ERR_POP(mpi_errno);
}
......@@ -177,6 +177,7 @@ int MPIDI_Comm_spawn_multiple(int count, char **commands,
preput_keyval_vector.val = port_name;
/* Spawn the processes */
MPIU_THREAD_CS_ENTER(PMI,);
pmi_errno = PMI_Spawn_multiple(count, (const char **)
commands,
(const char ***) argvs,
......@@ -185,7 +186,7 @@ int MPIDI_Comm_spawn_multiple(int count, char **commands,
info_keyval_vectors, 1,
&preput_keyval_vector,
pmi_errcodes);
MPIU_THREAD_CS_EXIT(PMI,);
if (pmi_errno != PMI_SUCCESS) {
MPIU_ERR_SETANDJUMP1(mpi_errno, MPI_ERR_OTHER,
"**pmi_spawn_multiple", "**pmi_spawn_multiple %d", pmi_errno);
......@@ -284,7 +285,9 @@ int MPIDI_CH3_GetParentPort(char ** parent_port)
char *kvsname = NULL;
/* We can always use PMI_KVS_Get on our own process group */
MPIDI_PG_GetConnKVSname( &kvsname );
MPIU_THREAD_CS_ENTER(PMI,);
pmi_errno = PMI_KVS_Get( kvsname, PARENT_PORT_KVSKEY, val, sizeof(val));
MPIU_THREAD_CS_EXIT(PMI,);
if (pmi_errno) {
mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**pmi_kvs_get", "**pmi_kvs_get %d", pmi_errno);
goto fn_exit;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment