diff options
author | rpj <rpj> | 2001-07-06 18:16:50 +0000 |
---|---|---|
committer | rpj <rpj> | 2001-07-06 18:16:50 +0000 |
commit | 06974b302eaf8f08382e6e786aea53f420c12222 (patch) | |
tree | 1b574a41dacc634a105a74127b2dac30a60bda13 | |
parent | 7a3104dc65b469cbb9c88b6a9c7b7bea4126a43e (diff) |
Spinlocks and barriers fixed and working. Beta level.
* spin.c: Revamped and working; included static initialiser.
* barrier.c: Likewise.
* condvar.c: Macro constant change; inline auto init routine.
* mutex.c: Likewise.
* rwlock.c: Likewise.
* private.c: Add support for spinlock initialiser.
* global.c: Likewise.
* implement.h: Likewise.
* pthread.h (PTHREAD_SPINLOCK_INITIALIZER): Fix typo.
tests/ChangeLog:
* spin3.c: Changed test and fixed.
* spin4.c: Fixed.
* barrier3.c: Fixed.
* barrier4.c: Fixed.
-rw-r--r-- | ChangeLog | 13 | ||||
-rw-r--r-- | Makefile | 4 | ||||
-rw-r--r-- | barrier.c | 16 | ||||
-rw-r--r-- | condvar.c | 12 | ||||
-rw-r--r-- | config.h | 9 | ||||
-rw-r--r-- | global.c | 6 | ||||
-rw-r--r-- | implement.h | 36 | ||||
-rw-r--r-- | mutex.c | 14 | ||||
-rw-r--r-- | private.c | 2 | ||||
-rw-r--r-- | pthread.h | 3 | ||||
-rw-r--r-- | rwlock.c | 18 | ||||
-rw-r--r-- | spin.c | 234 | ||||
-rw-r--r-- | tests/ChangeLog | 7 | ||||
-rw-r--r-- | tests/barrier3.c | 19 | ||||
-rw-r--r-- | tests/barrier4.c | 39 | ||||
-rw-r--r-- | tests/spin3.c | 2 | ||||
-rw-r--r-- | tests/spin4.c | 19 |
17 files changed, 297 insertions, 156 deletions
@@ -1,3 +1,16 @@ +2001-07-07 Ross Johnson <rpj@setup1.ise.canberra.edu.au>
+
+ * spin.c: Revamped and working; included static initialiser.
+ Now beta level.
+ * barrier.c: Likewise.
+ * condvar.c: Macro constant change; inline auto init routine.
+ * mutex.c: Likewise.
+ * rwlock.c: Likewise.
+ * private.c: Add support for spinlock initialiser.
+ * global.c: Likewise.
+ * implement.h: Likewise.
+ * pthread.h (PTHREAD_SPINLOCK_INITIALIZER): Fix typo.
+
2001-07-05 Ross Johnson <rpj@setup1.ise.canberra.edu.au>
* barrier.c: Remove static initialisation - irrelevent
@@ -22,7 +22,8 @@ VCFLAGS = /D__CLEANUP_C CFLAGS = /W3 /MT /nologo /Yd /Zi /I. /D_WIN32_WINNT=0x400 /DPTW32_BUILD
-OBJ=attr.obj \
+OBJ= attr.obj \
+ barrier.obj \
cancel.obj \
cleanup.obj \
condvar.obj \
@@ -40,6 +41,7 @@ OBJ=attr.obj \ sched.obj \
semaphore.obj \
signal.obj \
+ spin.obj \
sync.obj \
tsd.obj
@@ -148,22 +148,22 @@ pthread_barrier_wait(pthread_barrier_t *barrier) } else { + pthread_t self; + int oldCancelState; + + (void) pthread_mutex_unlock(&(b->mtxExclusiveAccess)); + + self = pthread_self(); + /* * pthread_barrier_wait() is not a cancelation point * so temporarily prevent sem_wait() from being one. */ - pthread_t self = pthread_self(); - int oldCancelState; - if (self->cancelType == PTHREAD_CANCEL_DEFERRED) { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldCancelState); } - /* Could still be PTHREAD_CANCEL_ASYNCHRONOUS. */ - pthread_cleanup_push(pthread_mutex_unlock, - (void *) &(b->mtxExclusiveAccess)); - if (0 != sem_wait(&(b->semBarrierBreeched))) { result = errno; @@ -173,8 +173,6 @@ pthread_barrier_wait(pthread_barrier_t *barrier) { pthread_setcancelstate(oldCancelState, NULL); } - - pthread_cleanup_pop(1); } } @@ -152,7 +152,7 @@ #include "pthread.h" #include "implement.h" -static int +static INLINE int ptw32_cond_check_need_init (pthread_cond_t *cond) { int result = 0; @@ -187,7 +187,7 @@ ptw32_cond_check_need_init (pthread_cond_t *cond) * re-initialise it only by calling pthread_cond_init() * explicitly. */ - if (*cond == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT) + if (*cond == PTHREAD_COND_INITIALIZER) { result = pthread_cond_init(cond, NULL); } @@ -584,7 +584,7 @@ pthread_cond_destroy (pthread_cond_t * cond) return EINVAL; } - if (*cond != (pthread_cond_t) PTW32_OBJECT_AUTO_INIT) + if (*cond != PTHREAD_COND_INITIALIZER) { cv = *cond; @@ -649,7 +649,7 @@ pthread_cond_destroy (pthread_cond_t * cond) /* * Check again. */ - if (*cond == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT) + if (*cond == PTHREAD_COND_INITIALIZER) { /* * This is all we need to do to destroy a statically @@ -827,7 +827,7 @@ ptw32_cond_timedwait (pthread_cond_t * cond, * again inside the guarded section of ptw32_cond_check_need_init() * to avoid race conditions. */ - if (*cond == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT) + if (*cond == PTHREAD_COND_INITIALIZER) { result = ptw32_cond_check_need_init(cond); } @@ -950,7 +950,7 @@ ptw32_cond_unblock (pthread_cond_t * cond, * No-op if the CV is static and hasn't been initialised yet. * Assuming that any race condition is harmless. */ - if (cv == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT) + if (cv == PTHREAD_COND_INITIALIZER) { return 0; } @@ -1,4 +1,4 @@ -/* config.h.in. Generated automatically from configure.in by autoheader. */ +/* config.h.in Generated automatically from configure.in by autoheader. */ #ifndef PTW32_CONFIG_H #define PTW32_CONFIG_H @@ -30,6 +30,9 @@ /* Define if you need to convert string parameters to unicode. (eg. WinCE) */ #undef NEED_UNICODE_CONSTS +/* Define if your C (not C++) compiler supports "inline" functions. */ +#undef HAVE_C_INLINE + /* Do we know about type mode_t? */ #undef HAVE_MODE_T @@ -54,6 +57,10 @@ #define HAVE_STRUCT_TIMESPEC #endif +#ifdef __GNUC__ +#define HAVE_C_INLINE +#endif + #ifdef __MINGW32__ #define HAVE_MODE_T #endif @@ -52,6 +52,12 @@ CRITICAL_SECTION ptw32_cond_test_init_lock; */ CRITICAL_SECTION ptw32_rwlock_test_init_lock; +/* + * Global lock for testing internal state of PTHREAD_SPINLOCK_INITIALIZER + * created spin locks. + */ +CRITICAL_SECTION ptw32_spinlock_test_init_lock; + #ifdef _UWIN /* * Keep a count of the number of threads. diff --git a/implement.h b/implement.h index 8d19406..5a146c0 100644 --- a/implement.h +++ b/implement.h @@ -39,6 +39,12 @@ #include "semaphore.h" #include "sched.h" +#if defined(HAVE_C_INLINE) || defined(__cplusplus) +#define INLINE inline +#else +#define INLINE +#endif + typedef enum { /* * This enumeration represents the state of the thread; @@ -136,9 +142,6 @@ struct sem_t_ { #define PTW32_OBJECT_AUTO_INIT ((void *) -1) #define PTW32_OBJECT_INVALID NULL -#define PTW32_SPIN_UNLOCKED (1) -#define PTW32_SPIN_LOCKED (2) -#define PTW32_SPIN_INTERLOCK_MASK (~3L) struct pthread_mutex_t_ { LONG lock_idx; @@ -154,10 +157,32 @@ struct pthread_mutexattr_t_ { int kind; }; +/* + * Possible values, other than PTW32_OBJECT_INVALID, + * for the "interlock" element in a spinlock. + * + * In this implementation, when a spinlock is initialised, + * the number of cpus available to the process is checked. + * If there is only one cpu then "interlock" is set equal to + * PTW32_SPIN_USE_MUTEX and u.mutex is a initialised mutex. + * If the number of cpus is greater than 1 then "interlock" + * is set equal to PTW32_SPIN_UNLOCKED and the number is + * stored in u.cpus. This arrangement allows the spinlock + * routines to attempt an InterlockedCompareExchange on "interlock" + * immediately and, if that fails, to try the inferior mutex. + * + * "u.cpus" isn't used for anything yet, but could be used at + * some point to optimise spinlock behaviour. + */ +#define PTW32_SPIN_UNLOCKED (1) +#define PTW32_SPIN_LOCKED (2) +#define PTW32_SPIN_USE_MUTEX (3) + struct pthread_spinlock_t_ { + long interlock; /* Locking element for multi-cpus. */ union { - LONG interlock; - pthread_mutex_t mx; + int cpus; /* No. of cpus if multi cpus, or */ + pthread_mutex_t mutex; /* mutex if single cpu. */ } u; }; @@ -359,6 +384,7 @@ extern int ptw32_concurrency; extern CRITICAL_SECTION ptw32_mutex_test_init_lock; extern CRITICAL_SECTION ptw32_cond_test_init_lock; extern CRITICAL_SECTION ptw32_rwlock_test_init_lock; +extern CRITICAL_SECTION ptw32_spinlock_test_init_lock; #ifdef _UWIN extern int pthread_count; @@ -27,7 +27,7 @@ #include "implement.h" -static int +static INLINE int ptw32_mutex_check_need_init(pthread_mutex_t *mutex) { int result = 0; @@ -63,7 +63,7 @@ ptw32_mutex_check_need_init(pthread_mutex_t *mutex) * re-initialise it only by calling pthread_mutex_init() * explicitly. */ - if (*mutex == (pthread_mutex_t) PTW32_OBJECT_AUTO_INIT) + if (*mutex == PTHREAD_MUTEX_INITIALIZER) { result = pthread_mutex_init(mutex, NULL); } @@ -170,7 +170,7 @@ pthread_mutex_destroy(pthread_mutex_t *mutex) /* * Check to see if we have something to delete. */ - if (*mutex != (pthread_mutex_t) PTW32_OBJECT_AUTO_INIT) + if (*mutex != PTHREAD_MUTEX_INITIALIZER) { mx = *mutex; @@ -225,7 +225,7 @@ pthread_mutex_destroy(pthread_mutex_t *mutex) /* * Check again. */ - if (*mutex == (pthread_mutex_t) PTW32_OBJECT_AUTO_INIT) + if (*mutex == PTHREAD_MUTEX_INITIALIZER) { /* * This is all we need to do to destroy a statically @@ -630,7 +630,7 @@ pthread_mutex_lock(pthread_mutex_t *mutex) * again inside the guarded section of ptw32_mutex_check_need_init() * to avoid race conditions. */ - if (*mutex == (pthread_mutex_t) PTW32_OBJECT_AUTO_INIT) + if (*mutex == PTHREAD_MUTEX_INITIALIZER) { if ((result = ptw32_mutex_check_need_init(mutex)) != 0) { @@ -694,7 +694,7 @@ pthread_mutex_unlock(pthread_mutex_t *mutex) * race condition. If another thread holds the * lock then we shouldn't be in here. */ - if (mx != (pthread_mutex_t) PTW32_OBJECT_AUTO_INIT) + if (mx != PTHREAD_MUTEX_INITIALIZER) { if (mx->ownerThread == (pthread_t) PTW32_MUTEX_OWNER_ANONYMOUS || pthread_equal(mx->ownerThread, pthread_self())) @@ -745,7 +745,7 @@ pthread_mutex_trylock(pthread_mutex_t *mutex) * again inside the guarded section of ptw32_mutex_check_need_init() * to avoid race conditions. */ - if (*mutex == (pthread_mutex_t) PTW32_OBJECT_AUTO_INIT) + if (*mutex == PTHREAD_MUTEX_INITIALIZER) { result = ptw32_mutex_check_need_init(mutex); } @@ -88,6 +88,7 @@ ptw32_processInitialize (void) InitializeCriticalSection(&ptw32_mutex_test_init_lock); InitializeCriticalSection(&ptw32_cond_test_init_lock); InitializeCriticalSection(&ptw32_rwlock_test_init_lock); + InitializeCriticalSection(&ptw32_spinlock_test_init_lock); return (ptw32_processInitialized); @@ -142,6 +143,7 @@ ptw32_processTerminate (void) /* * Destroy the global test and init check locks. */ + DeleteCriticalSection(&ptw32_spinlock_test_init_lock); DeleteCriticalSection(&ptw32_rwlock_test_init_lock); DeleteCriticalSection(&ptw32_cond_test_init_lock); DeleteCriticalSection(&ptw32_mutex_test_init_lock); @@ -457,8 +457,7 @@ struct pthread_once_t_ #define PTHREAD_RWLOCK_INITIALIZER ((pthread_rwlock_t) -1) -#define PTHREAD_SPINLOCK_INITIALIZER {1} - +#define PTHREAD_SPINLOCK_INITIALIZER ((pthread_spinlock_t) -1) enum { @@ -29,7 +29,7 @@ #include "pthread.h" #include "implement.h" -static int +static INLINE int ptw32_rwlock_check_need_init(pthread_rwlock_t *rwlock) { int result = 0; @@ -65,7 +65,7 @@ ptw32_rwlock_check_need_init(pthread_rwlock_t *rwlock) * re-initialise it only by calling pthread_rwlock_init() * explicitly. */ - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { result = pthread_rwlock_init(rwlock, NULL); } @@ -163,7 +163,7 @@ pthread_rwlock_destroy(pthread_rwlock_t *rwlock) return EINVAL; } - if (*rwlock != (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock != PTHREAD_RWLOCK_INITIALIZER) { rwl = *rwlock; @@ -226,7 +226,7 @@ pthread_rwlock_destroy(pthread_rwlock_t *rwlock) /* * Check again. */ - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { /* * This is all we need to do to destroy a statically @@ -268,7 +268,7 @@ pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) * again inside the guarded section of ptw32_rwlock_check_need_init() * to avoid race conditions. */ - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { result = ptw32_rwlock_check_need_init(rwlock); @@ -340,7 +340,7 @@ pthread_rwlock_wrlock(pthread_rwlock_t * rwlock) * again inside the guarded section of ptw32_rwlock_check_need_init() * to avoid race conditions. */ - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { result = ptw32_rwlock_check_need_init(rwlock); @@ -435,7 +435,7 @@ pthread_rwlock_unlock(pthread_rwlock_t * rwlock) return(EINVAL); } - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { /* * Assume any race condition here is harmless. @@ -493,7 +493,7 @@ pthread_rwlock_tryrdlock(pthread_rwlock_t * rwlock) * again inside the guarded section of ptw32_rwlock_check_need_init() * to avoid race conditions. */ - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { result = ptw32_rwlock_check_need_init(rwlock); @@ -553,7 +553,7 @@ pthread_rwlock_trywrlock(pthread_rwlock_t * rwlock) * again inside the guarded section of ptw32_rwlock_check_need_init() * to avoid race conditions. */ - if (*rwlock == (pthread_rwlock_t) PTW32_OBJECT_AUTO_INIT) + if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { result = ptw32_rwlock_check_need_init(rwlock); @@ -26,25 +26,64 @@ #include "pthread.h" #include "implement.h" -/* - * This works because the mask that is formed exposes all but the - * first two LSBs. If the spinlock is using a mutex rather than - * the interlock mechanism then there will always be high bits - * to indicate this. This is all just to save the overhead of - * using a non-simple struct for spinlocks. - */ -#define PTW32_SPIN_SPINS(_lock) \ - (0 == ((long) ((_lock->u).mx) & ~(PTW32_SPIN_LOCKED | PTW32_SPIN_UNLOCKED | (long) PTW32_OBJECT_INVALID))) +#ifdef __MINGW32__ +#define _LONG long +#define _LPLONG long* +#else +#define _LONG PVOID +#define _LPLONG PVOID* +#endif + +static INLINE int +ptw32_spinlock_check_need_init(pthread_spinlock_t *lock) +{ + int result = 0; + + /* + * The following guarded test is specifically for statically + * initialised spinlocks (via PTHREAD_SPINLOCK_INITIALIZER). + * + * Note that by not providing this synchronisation we risk + * introducing race conditions into applications which are + * correctly written. + */ + EnterCriticalSection(&ptw32_spinlock_test_init_lock); + + /* + * We got here possibly under race + * conditions. Check again inside the critical section + * and only initialise if the spinlock is valid (not been destroyed). + * If a static spinlock has been destroyed, the application can + * re-initialise it only by calling pthread_spin_init() + * explicitly. + */ + if (*lock == PTHREAD_SPINLOCK_INITIALIZER) + { + result = pthread_spin_init(lock, PTHREAD_PROCESS_PRIVATE); + } + else if (*lock == NULL) + { + /* + * The spinlock has been destroyed while we were waiting to + * initialise it, so the operation that caused the + * auto-initialisation should fail. + */ + result = EINVAL; + } + + LeaveCriticalSection(&ptw32_spinlock_test_init_lock); + + return(result); +} int pthread_spin_init(pthread_spinlock_t *lock, int pshared) { pthread_spinlock_t s; - int CPUs = 1; int result = 0; - if (lock == NULL || *lock == NULL) + if (lock == NULL) { return EINVAL; } @@ -56,9 +95,12 @@ pthread_spin_init(pthread_spinlock_t *lock, int pshared) return ENOMEM; } - (void) pthread_getprocessors_np(&CPUs); + if (0 != pthread_getprocessors_np(&(s->u.cpus))) + { + s->u.cpus = 1; + } - if (CPUs > 1) + if (s->u.cpus > 1) { if (pshared == PTHREAD_PROCESS_SHARED) { @@ -84,7 +126,7 @@ pthread_spin_init(pthread_spinlock_t *lock, int pshared) } - s->u.interlock = PTW32_SPIN_UNLOCKED; + s->interlock = PTW32_SPIN_UNLOCKED; } else { @@ -94,7 +136,11 @@ pthread_spin_init(pthread_spinlock_t *lock, int pshared) if (0 == result) { ma->pshared = pshared; - result = pthread_mutex_init(&(s->u.mx), &ma); + result = pthread_mutex_init(&(s->u.mutex), &ma); + if (0 == result) + { + s->interlock = PTW32_SPIN_USE_MUTEX; + } } } @@ -106,126 +152,150 @@ FAIL0: int pthread_spin_destroy(pthread_spinlock_t *lock) { - pthread_spinlock_t s; + register pthread_spinlock_t s; if (lock == NULL || *lock == NULL) { return EINVAL; } - s = *lock; - - if (PTW32_SPIN_SPINS(s)) + if ((s = *lock) != PTHREAD_SPINLOCK_INITIALIZER) { - if ( PTW32_SPIN_UNLOCKED != - InterlockedCompareExchange((LPLONG) &(s->u.interlock), - (LONG) PTW32_OBJECT_INVALID, - (LONG) PTW32_SPIN_UNLOCKED)) + if (s->interlock == PTW32_SPIN_USE_MUTEX) { - return EINVAL; + return pthread_mutex_destroy(&(s->u.mutex)); } - else + + if ( (_LONG) PTW32_SPIN_UNLOCKED == + InterlockedCompareExchange((_LPLONG) &(s->interlock), + (_LONG) PTW32_OBJECT_INVALID, + (_LONG) PTW32_SPIN_UNLOCKED)) { return 0; } + + return EINVAL; } else { - return pthread_mutex_destroy(&(s->u.mx)); + int result = 0; + + /* + * See notes in ptw32_spinlock_check_need_init() above also. + */ + EnterCriticalSection(&ptw32_spinlock_test_init_lock); + + /* + * Check again. + */ + if (*lock == PTHREAD_SPINLOCK_INITIALIZER) + { + /* + * This is all we need to do to destroy a statically + * initialised spinlock that has not yet been used (initialised). + * If we get to here, another thread + * waiting to initialise this mutex will get an EINVAL. + */ + *lock = NULL; + } + else + { + /* + * The spinlock has been initialised while we were waiting + * so assume it's in use. + */ + result = EBUSY; + } + + LeaveCriticalSection(&ptw32_spinlock_test_init_lock); + return(result); } } - +/* + * NOTE: For speed, these routines don't check if "lock" is valid. + */ int pthread_spin_lock(pthread_spinlock_t *lock) { - pthread_spinlock_t s; + register pthread_spinlock_t s = *lock; - if (lock == NULL || *lock == NULL) + if (s == PTHREAD_SPINLOCK_INITIALIZER) { - return EINVAL; + int result; + + if ((result = ptw32_spinlock_check_need_init(lock)) != 0) + { + return(result); + } } - s = *lock; + while ( (_LONG) PTW32_SPIN_LOCKED == + InterlockedCompareExchange((_LPLONG) &(s->interlock), + (_LONG) PTW32_SPIN_LOCKED, + (_LONG) PTW32_SPIN_UNLOCKED) ) + {} - if (PTW32_SPIN_SPINS(s)) + if (s->interlock == PTW32_SPIN_LOCKED) { - while ( PTW32_SPIN_UNLOCKED != - InterlockedCompareExchange((LPLONG) &(s->u.interlock), - (LONG) PTW32_SPIN_LOCKED, - (LONG) PTW32_SPIN_UNLOCKED) ) - { - /* Spin */ - } + return 0; } - else + else if (s->interlock == PTW32_SPIN_USE_MUTEX) { - return pthread_mutex_lock(&(s->u.mx)); + return pthread_mutex_lock(&(s->u.mutex)); } - return 0; + return EINVAL; } int pthread_spin_unlock(pthread_spinlock_t *lock) { - pthread_spinlock_t s; + register pthread_spinlock_t s = *lock; - if (lock == NULL || lock == NULL) + if (s->interlock == PTW32_SPIN_USE_MUTEX) { - return EINVAL; + return pthread_mutex_unlock(&(s->u.mutex)); } - s = *lock; - - if (PTW32_SPIN_SPINS(s)) - { - if (PTW32_SPIN_LOCKED != - InterlockedCompareExchange((LPLONG) &(s->u.interlock), - (LONG) PTW32_SPIN_UNLOCKED, - (LONG) PTW32_SPIN_LOCKED ) ) - { - return 0; - } - else - { - return EINVAL; - } - } - else + if ((_LONG) PTW32_SPIN_LOCKED == + InterlockedCompareExchange((_LPLONG) &(s->interlock), + (_LONG) PTW32_SPIN_UNLOCKED, + (_LONG) PTW32_SPIN_LOCKED ) ) { - return pthread_mutex_unlock(&(s->u.mx)); + return 0; } + + return EINVAL; } int pthread_spin_trylock(pthread_spinlock_t *lock) { - pthread_spinlock_t s; + pthread_spinlock_t s = *lock; - if (lock == NULL || *lock == NULL) + if (s == PTHREAD_SPINLOCK_INITIALIZER) { - return EINVAL; - } - - s = *lock; + int result; - if (PTW32_SPIN_SPINS(s)) - { - if (PTW32_SPIN_UNLOCKED != - InterlockedCompareExchange((LPLONG) &(s->u.interlock), - (LONG) PTW32_SPIN_LOCKED, - (LONG) PTW32_SPIN_UNLOCKED ) ) + if ((result = ptw32_spinlock_check_need_init(lock)) != 0) { - return EBUSY; - } - else - { - return 0; + return(result); } } - else + + if ((_LONG) PTW32_SPIN_UNLOCKED == + InterlockedCompareExchange((_LPLONG) &(s->interlock), + (_LONG) PTW32_SPIN_LOCKED, + (_LONG) PTW32_SPIN_UNLOCKED ) ) { - return pthread_mutex_trylock(&(s->u.mx)); + return 0; } + + if (s->interlock == PTW32_SPIN_USE_MUTEX) + { + return pthread_mutex_trylock(&(s->u.mutex)); + } + + return EINVAL; } diff --git a/tests/ChangeLog b/tests/ChangeLog index 565f0ad..1c5d766 100644 --- a/tests/ChangeLog +++ b/tests/ChangeLog @@ -1,3 +1,10 @@ +2001-07-07 Ross Johnson <rpj@setup1.ise.canberra.edu.au> + + * spin3.c: Changed test and fixed. + * spin4.c: Fixed. + * barrier3.c: Fixed. + * barrier4.c: Fixed. + 2001-07-05 Ross Johnson <rpj@special.ise.canberra.edu.au> * spin1.c: New; testing spinlocks. diff --git a/tests/barrier3.c b/tests/barrier3.c index 497b76a..97f6dc2 100644 --- a/tests/barrier3.c +++ b/tests/barrier3.c @@ -1,7 +1,7 @@ /* * barrier3.c * - * Declare a single barrier object, multiple wait on it, + * Declare a single barrier object with barrier attribute, wait on it, * and then destroy it. * */ @@ -9,8 +9,7 @@ #include "test.h" pthread_barrier_t barrier = NULL; -static int result1 = -1; -static int result2 = -1; +static int result = 1; void * func(void * arg) { @@ -21,20 +20,20 @@ int main() { pthread_t t; + pthread_barrierattr_t ba; - assert(pthread_barrier_init(&barrier, NULL, 2) == 0); + assert(pthread_barrierattr_init(&ba) == 0); + assert(pthread_barrierattr_setpshared(&ba, PTHREAD_PROCESS_PRIVATE) == 0); + assert(pthread_barrier_init(&barrier, &ba, 1) == 0); assert(pthread_create(&t, NULL, func, NULL) == 0); - result1 = pthread_barrier_wait(&barrier); + assert(pthread_join(t, (void *) &result) == 0); - assert(pthread_join(t, &result2) == 0); - - assert(result1 != result2); - assert(result1 == 0 || result1 == PTHREAD_BARRIER_SERIAL_THREAD); - assert(result2 == 0 || result2 == PTHREAD_BARRIER_SERIAL_THREAD); + assert(result == PTHREAD_BARRIER_SERIAL_THREAD); assert(pthread_barrier_destroy(&barrier) == 0); + assert(pthread_barrierattr_destroy(&ba) == 0); return 0; } diff --git a/tests/barrier4.c b/tests/barrier4.c index 1dd8291..8f33e85 100644 --- a/tests/barrier4.c +++ b/tests/barrier4.c @@ -14,12 +14,11 @@ enum { pthread_barrier_t barrier = NULL; pthread_mutex_t mx = PTHREAD_MUTEX_INITIALIZER; -static int result1 = -1; -static int result2 = -1; static int serialThreadCount = 0; static int otherThreadCount = 0; -void * func(void * arg) +void * +func(void * arg) { int result = pthread_barrier_wait(&barrier); @@ -32,31 +31,39 @@ void * func(void * arg) { otherThreadCount++; } - assert(pthread_mutex_lock(&mx) == 0); + assert(pthread_mutex_unlock(&mx) == 0); return NULL; } - + int main() { + int i, j; pthread_t t[NUMTHREADS + 1]; - assert(pthread_barrier_init(&barrier, NULL, NUMTHREADS) == 0); - - for (i = 0; i < NUMTHREADS; i++) + for (j = 1; j <= NUMTHREADS; j++) { - assert(pthread_create(&t[i], NULL, func, NULL) == 0); - } + printf("Barrier height = %d\n", j); - for (i = 0; i < NUMTHREADS; i++) - { - assert(pthread_join(t[i], NULL) == 0); - } + serialThreadCount = 0; + + assert(pthread_barrier_init(&barrier, NULL, j) == 0); + + for (i = 1; i <= j; i++) + { + assert(pthread_create(&t[i], NULL, func, NULL) == 0); + } - assert(serialThreadCount == 1); + for (i = 1; i <= j; i++) + { + assert(pthread_join(t[i], NULL) == 0); + } - assert(pthread_barrier_destroy(&barrier) == 0); + assert(serialThreadCount == 1); + + assert(pthread_barrier_destroy(&barrier) == 0); + } assert(pthread_mutex_destroy(&mx) == 0); diff --git a/tests/spin3.c b/tests/spin3.c index 8b383de..acd6e75 100644 --- a/tests/spin3.c +++ b/tests/spin3.c @@ -26,7 +26,6 @@ int main() { pthread_t t; - pthread_spinattr_t ma; wasHere = 0; assert(pthread_spin_init(&spin, PTHREAD_PROCESS_PRIVATE) == 0); @@ -34,6 +33,7 @@ main() assert(pthread_create(&t, NULL, unlocker, (void *) 0) == 0); assert(pthread_join(t, NULL) == 0); assert(pthread_spin_unlock(&spin) == EPERM); + assert(pthread_spin_destroy(&spin) == 0); assert(wasHere == 2); return 0; diff --git a/tests/spin4.c b/tests/spin4.c index a435d04..5f04a27 100644 --- a/tests/spin4.c +++ b/tests/spin4.c @@ -1,19 +1,14 @@ /* * spin4.c * - * Declare a spinlock object, lock it, spin on it, + * Declare a static spinlock object, lock it, spin on it, * and then unlock it again. - * - * For this to work on a single processor machine we have - * to static initialise the spinlock. This bypasses the - * check of the number of processors done by pthread_spin_init. - * This is a non-portable side-effect of this implementation. */ #include "test.h" #include <sys/timeb.h> -pthread_spinlock_t lock = PTHREADS_SPINLOCK_INITIALIZER; +pthread_spinlock_t lock = PTHREAD_SPINLOCK_INITIALIZER; struct _timeb currSysTimeStart; struct _timeb currSysTimeStop; @@ -37,7 +32,16 @@ int main() { long result = 0; + int i; pthread_t t; + int CPUs; + + if (pthread_getprocessors_np(&CPUs) != 0 || CPUs == 1) + { + printf("This test is not applicable to this system.\n"); + printf("Either there is only 1 CPU or the no. could not be determined.\n"); + exit(0); + } assert(pthread_spin_lock(&lock) == 0); @@ -47,6 +51,7 @@ main() * This should relinqish the CPU to the func thread enough times * to waste approximately 2000 millisecs only if the lock really * is spinning in the func thread (assuming 10 millisec CPU quantum). + */ for (i = 0; i < 200; i++) { sched_yield(); |