Commit 5611f86e authored by William Gropp's avatar William Gropp
Browse files

[svn-r3184] Added definitions for THREAD_CS_ENTER/EXIT in the non-threaded...

[svn-r3184] Added definitions for THREAD_CS_ENTER/EXIT in the non-threaded mode and added a few FIXMEs and minor text changes (in some cases to conform to the coding standards)
parent 2bbf805c
......@@ -29,9 +29,7 @@
* shared structures and services
*
* A configure choice will set MPIU_THREAD_GRANULARITY to one of these values
* "Single" means no thread support
*/
#define MPIU_THREAD_GRANULARITY_SINGLE 0
#define MPIU_THREAD_GRANULARITY_GLOBAL 1
#define MPIU_THREAD_GRANULARITY_BRIEF_GLOBAL 2
#define MPIU_THREAD_GRANULARITY_PER_OBJECT 3
......@@ -371,6 +369,8 @@ M*/
#endif
#ifdef MPICH_IS_THREADED
/* Helper definitions */
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL
/*#define MPIU_THREAD_CHECKNEST(_name)*/
......@@ -541,6 +541,8 @@ typedef struct MPIU_ThreadDebug {
#define MPIU_THREAD_CS_ENTER_ALLFUNC(_context)
#define MPIU_THREAD_CS_EXIT_ALLFUNC(_context)
/* FIXME: dprintf is a temporary hack here. It must be removed (use DBG_MSG
if a non-temporary version is desired) */
#define dprintf(...)
#define MPIU_THREAD_CS_ENTER_HANDLE(_context) { \
dprintf("Calling MPIU_THREAD_CS_ENTER_HANDLE in %s\n", __FUNCTION__); \
......@@ -580,15 +582,15 @@ typedef struct MPIU_ThreadDebug {
locks where ever possible. */
#error lock-free not yet implemented
#elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_SINGLE
/* No thread support, make all operations a no-op */
#define MPIU_THREAD_CS_ENTER_ALLFUNC(_context)
#define MPIU_THREAD_CS_EXIT_ALLFUNC(_context)
#else
#error Unrecognized thread granularity
#endif
#else /* ! MPICH_IS_THREAED */
#define MPIU_THREAD_CS_ENTER(_name,_context)
#define MPIU_THREAD_CS_EXIT(_name,_context)
#endif /* MPICH_IS_THREADED */
#endif /* !defined(MPIIMPLTHREAD_H_INCLUDED) */
/* This block of text makes it easier to add local use of the thread macros */
......@@ -608,9 +610,6 @@ typedef struct MPIU_ThreadDebug {
locks where ever possible. */
#error lock-free not yet implemented
#elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_SINGLE
/* No thread support, make all operations a no-op */
#else
#error Unrecognized thread granularity
#endif
......
......@@ -248,6 +248,10 @@ int MPIC_Irecv(void *buf, int count, MPI_Datatype datatype, int
return mpi_errno;
}
/* FIXME: For the brief-global and finer-grain control, we must ensure that
the global lock is *not* held when this routine is called. (unless we change
progress_start/end to grab the lock, in which case we must *still* make
sure that the lock is not held when this routine is called). */
#undef FUNCNAME
#define FUNCNAME MPIC_Wait
#undef FCNAME
......
......@@ -340,6 +340,7 @@ int MPIR_Get_contextid( MPID_Comm *comm_ptr, MPIR_Context_id_t *context_id )
}
memcpy( local_mask, context_mask, MPIR_MAX_CONTEXT_MASK * sizeof(int) );
/* Note that this is the unthreaded version */
MPIU_THREADPRIV_GET;
MPIR_Nest_incr();
/* Comm must be an intracommunicator */
......@@ -394,11 +395,6 @@ int MPIR_Get_contextid( MPID_Comm *comm_ptr, MPIR_Context_id_t *context_id )
within another MPI routine before calling the CS_ENTER macro */
MPIR_Nest_incr();
/* The SINGLE_CS_ENTER/EXIT macros are commented out because this
routine shold always be called from within a routine that has
already entered the single critical section. However, in a
finer-grained approach, these macros indicate where atomic updates
to the shared data structures must be protected. */
/* We lock only around access to the mask. If another thread is
using the mask, we take a mask of zero */
MPIU_DBG_MSG_FMT( COMM, VERBOSE, (MPIU_DBG_FDEST,
......@@ -439,6 +435,10 @@ int MPIR_Get_contextid( MPID_Comm *comm_ptr, MPIR_Context_id_t *context_id )
/* Now, try to get a context id */
MPIU_Assert(comm_ptr->comm_kind == MPID_INTRACOMM);
/* In the global and brief-global cases, note that this routine will
release that global lock when it needs to wait. That will allow
other processes to enter the global or brief global critical section.
*/
mpi_errno = NMPI_Allreduce( MPI_IN_PLACE, local_mask, MPIR_MAX_CONTEXT_MASK,
MPI_INT, MPI_BAND, comm_ptr->handle );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
......
......@@ -1243,6 +1243,8 @@ static MPIR_Err_msg_t ErrorRing[MAX_ERROR_RING];
static volatile unsigned int error_ring_loc = 0;
static volatile unsigned int max_error_ring_loc = 0;
/* FIXME: This needs to be made consistent with the different thread levels,
since in the "global" thread level, an extra thread mutex is not required. */
#if defined(MPID_REQUIRES_THREAD_SAFETY)
/* if the device requires internal MPICH routines to be thread safe, the
MPIU_THREAD_CHECK macros are not appropriate */
......
......@@ -173,6 +173,8 @@ int MPI_Finalize( void )
completing the finalize */
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
/* FIXME: Many of these debugging items could/should be callbacks,
added to the finalize callback list */
/* FIXME: Both the memory tracing and debug nesting code blocks should
be finalize callbacks */
/* If memory debugging is enabled, check the memory here, after all
......@@ -206,7 +208,7 @@ int MPI_Finalize( void )
if the user erroneously calls Finalize from another thread, an
error message will be issued. */
MPIU_THREAD_CS_EXIT(ALLFUNC,);
MPIU_THREAD_SINGLE_CS_FINALIZE;
MPID_CS_FINALIZE();
/* We place the memory tracing at the very end because any of the other
steps may have allocated memory that they still need to release*/
......
......@@ -51,6 +51,12 @@
* Mutexes
*/
/* FIXME: mutex creation and destruction should be implemented as routines
because there is no reason to use macros (these are not on the performance
critical path). Making these macros requires that any code that might use
these must load all of the pthread.h (or other thread library) support.
*/
/* FIXME: using constant initializer if available */
#if !defined(MPICH_DEBUG_MUTEX) || !defined(PTHREAD_MUTEX_ERRORCHECK_NP)
#define MPE_Thread_mutex_create(mutex_ptr_, err_ptr_) \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment