summaryrefslogtreecommitdiff
path: root/barrier.c
diff options
context:
space:
mode:
authorrpj <rpj>2001-07-08 16:44:06 +0000
committerrpj <rpj>2001-07-08 16:44:06 +0000
commitf58aab44e671bb39b8afb29804a9ca94c238c523 (patch)
treed1bac0558d5146c6468f8f421f22762f382c6c6e /barrier.c
parent704925281289e0f937eab045bd327b4275b2e03a (diff)
Barriers fixed and tested more extensively.
* barrier.c: Fix several bugs in all routines. Now passes tests/barrier5.c which is fairly rigorous. There is still a non-optimal work-around for a race condition between the barrier breeched event signal and event wait. Basically the last (signalling) thread to hit the barrier yields to allow any other threads, which may have lost the race, to complete. tests/ChangeLog: * barrier3.c: Fixed. * barrier4.c: Fixed. * barrier5.c: New; proves that all threads in the group reaching the barrier wait and then resume together. Repeats the test using groups of 1 to 16 threads. Each group of threads must negotiate a large number of barriers (10000). * spin4.c: Fixed. * test.h (error_string): Modified the success (0) value.
Diffstat (limited to 'barrier.c')
-rw-r--r--barrier.c69
1 files changed, 45 insertions, 24 deletions
diff --git a/barrier.c b/barrier.c
index 41bcf07..6a18f25 100644
--- a/barrier.c
+++ b/barrier.c
@@ -59,23 +59,26 @@ pthread_barrier_init(pthread_barrier_t * barrier,
result = pthread_mutex_init(&(b->mtxExclusiveAccess), NULL);
if (0 != result)
{
- goto FAIL0;
- }
-
- result = sem_init(&(b->semBarrierBreeched), pshared, 0);
- if (0 != result)
- {
goto FAIL1;
}
- goto DONE;
+ b->eventBarrierBreeched = CreateEvent(NULL, /* Security attributes */
+ TRUE, /* Manual reset */
+ FALSE, /* Initially signaled */
+ NULL); /* Name */
- FAIL1:
+ if (NULL != b->eventBarrierBreeched)
+ {
+ goto DONE;
+ }
(void) pthread_mutex_destroy(&(b->mtxExclusiveAccess));
+ result = ENOMEM;
- FAIL0:
+ FAIL1:
(void) free(b);
+ b = NULL;
+ FAIL0:
DONE:
*barrier = b;
return(result);
@@ -94,7 +97,9 @@ pthread_barrier_destroy(pthread_barrier_t *barrier)
b = *barrier;
- if (0 == pthread_mutex_trylock(&(b->mtxExclusiveAccess)))
+ result = pthread_mutex_trylock(&(b->mtxExclusiveAccess));
+
+ if (0 == result)
{
/*
* FIXME!!!
@@ -105,16 +110,24 @@ pthread_barrier_destroy(pthread_barrier_t *barrier)
*/
*barrier = NULL;
- (void) sem_destroy(&(b->semBarrierBreeched));
+ result = CloseHandle(b->eventBarrierBreeched);
(void) pthread_mutex_unlock(&(b->mtxExclusiveAccess));
- (void) pthread_mutex_destroy(&(b->mtxExclusiveAccess));
- (void) free(b);
+ if (result == TRUE)
+ {
+ (void) pthread_mutex_destroy(&(b->mtxExclusiveAccess));
+ (void) free(b);
+ result = 0;
+ }
+ else
+ {
+ *barrier = b;
+ result = EINVAL;
+ }
}
return(result);
}
-
int
pthread_barrier_wait(pthread_barrier_t *barrier)
{
@@ -136,8 +149,14 @@ pthread_barrier_wait(pthread_barrier_t *barrier)
{
b->nCurrentBarrierHeight = b->nInitialBarrierHeight;
(void) pthread_mutex_unlock(&(b->mtxExclusiveAccess));
- (void) sem_post_multiple(&(b->semBarrierBreeched),
- b->nInitialBarrierHeight);
+ /*
+ * This is a work-around for the FIXME below. We
+ * give any threads that didn't quite get to register
+ * their wait another quantum. This is temporary
+ * - there is a better way to do this.
+ */
+ Sleep(0);
+ (void) PulseEvent(b->eventBarrierBreeched);
/*
* Would be better if the first thread to return
* from this routine got this value. On a single
@@ -151,23 +170,25 @@ pthread_barrier_wait(pthread_barrier_t *barrier)
pthread_t self;
int oldCancelState;
- (void) pthread_mutex_unlock(&(b->mtxExclusiveAccess));
-
self = pthread_self();
-
+
/*
* pthread_barrier_wait() is not a cancelation point
- * so temporarily prevent sem_wait() from being one.
+ * so temporarily prevent pthreadCancelableWait() from being one.
*/
if (self->cancelType == PTHREAD_CANCEL_DEFERRED)
{
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldCancelState);
}
- if (0 != sem_wait(&(b->semBarrierBreeched)))
- {
- result = errno;
- }
+ (void) pthread_mutex_unlock(&(b->mtxExclusiveAccess));
+
+ /* FIXME!!! It's possible for a thread to be left behind at a
+ * barrier because of the time gap between the unlock
+ * and the registration that the thread is waiting on the
+ * event.
+ */
+ result = pthreadCancelableWait(b->eventBarrierBreeched);
if (self->cancelType == PTHREAD_CANCEL_DEFERRED)
{