summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrpj <rpj>2011-03-25 09:22:50 +0000
committerrpj <rpj>2011-03-25 09:22:50 +0000
commit559e1bc4051f90d7a014bd4e4f5649e136dec412 (patch)
tree70e6f76c3b353b0b74b698a51088f5b9a38e53ab
parent395de1c8a9a25c343a6784cfc3ecf729c48024d8 (diff)
Robust mutex streamlining
-rw-r--r--implement.h13
-rwxr-xr-xpthread_mutex_consistent.c154
-rw-r--r--pthread_mutex_init.c1
-rw-r--r--pthread_mutex_lock.c26
-rw-r--r--pthread_mutex_timedlock.c18
-rw-r--r--pthread_mutex_trylock.c8
-rw-r--r--pthread_mutex_unlock.c18
-rw-r--r--pthread_win32_attach_detach_np.c5
8 files changed, 14 insertions, 229 deletions
diff --git a/implement.h b/implement.h
index 1067bce..b4c80b3 100644
--- a/implement.h
+++ b/implement.h
@@ -243,11 +243,7 @@ typedef enum ptw32_robust_state_t_ ptw32_robust_state_t;
struct ptw32_robust_node_t_
{
pthread_mutex_t mx;
- ptw32_mcs_lock_t lock; /* Exclusive access to this robust mutex */
ptw32_robust_state_t stateInconsistent;
-#if 0
- int inList;
-#endif
ptw32_robust_node_t* prev;
ptw32_robust_node_t* next;
};
@@ -600,15 +596,8 @@ extern "C"
int ptw32_rwlock_check_need_init (pthread_rwlock_t * rwlock);
int ptw32_robust_mutex_inherit(pthread_mutex_t * mutex);
-#if 1
void ptw32_robust_mutex_add(pthread_mutex_t* mutex, pthread_t self);
- void ptw32_robust_mutex_remove(pthread_mutex_t* mutex);
- void ptw32_robust_mutex_quick_remove(pthread_mutex_t* mutex, ptw32_thread_t* otp);
-#else
- void ptw32_robust_mutex_add(pthread_mutex_t* mutex);
- void ptw32_robust_mutex_remove(pthread_mutex_t* mutex, pthread_t self);
- void ptw32_robust_mutex_quick_remove(pthread_mutex_t* mutex, ptw32_thread_t* tp);
-#endif
+ void ptw32_robust_mutex_remove(pthread_mutex_t* mutex, ptw32_thread_t* otp);
DWORD
ptw32_RegisterCancelation (PAPCFUNC callback,
diff --git a/pthread_mutex_consistent.c b/pthread_mutex_consistent.c
index b18f597..5ceab0c 100755
--- a/pthread_mutex_consistent.c
+++ b/pthread_mutex_consistent.c
@@ -99,22 +99,16 @@ ptw32_robust_mutex_inherit(pthread_mutex_t * mutex)
return result;
}
-#if 1
INLINE
void
ptw32_robust_mutex_add(pthread_mutex_t* mutex, pthread_t self)
{
- ptw32_mcs_local_node_t listLock;
- ptw32_mcs_local_node_t mx1Lock;
- ptw32_mcs_local_node_t mx2Lock;
ptw32_robust_node_t** list;
pthread_mutex_t mx = *mutex;
ptw32_thread_t* tp = self.p;
ptw32_robust_node_t* robust = mx->robustNode;
- ptw32_mcs_lock_acquire(&tp->robustMxListLock, &listLock);
list = &tp->robustMxList;
- ptw32_mcs_lock_acquire(&mx->robustNode->lock, &mx1Lock);
mx->ownerThread = self;
if (NULL == *list)
{
@@ -126,164 +120,36 @@ ptw32_robust_mutex_add(pthread_mutex_t* mutex, pthread_t self)
{
robust->prev = NULL;
robust->next = *list;
- ptw32_mcs_lock_acquire(&(*list)->lock, &mx2Lock);
(*list)->prev = robust;
- ptw32_mcs_lock_release(&mx2Lock);
*list = robust;
}
- ptw32_mcs_lock_release(&mx1Lock);
- ptw32_mcs_lock_release(&listLock);
}
-#else
-INLINE
-void
-ptw32_robust_mutex_add(pthread_mutex_t* mutex)
-{
- ptw32_mcs_local_node_t listLock;
- ptw32_mcs_local_node_t mx1Lock;
- ptw32_mcs_local_node_t mx2Lock;
- ptw32_robust_node_t** list;
- pthread_mutex_t mx = *mutex;
- ptw32_robust_node_t* robust = mx->robustNode;
- ptw32_thread_t* tp = mx->ownerThread.p;
-
- if (NULL != tp)
- {
- ptw32_mcs_lock_acquire(&tp->robustMxListLock, &listLock);
- if (0 == robust->inList)
- {
- list = &tp->robustMxList;
- ptw32_mcs_lock_acquire(&mx->robustNode->lock, &mx1Lock);
- if (NULL == *list)
- {
- robust->prev = NULL;
- robust->next = NULL;
- *list = robust;
- }
- else
- {
- robust->prev = NULL;
- robust->next = *list;
- ptw32_mcs_lock_acquire(&(*list)->lock, &mx2Lock);
- (*list)->prev = robust;
- ptw32_mcs_lock_release(&mx2Lock);
- *list = robust;
- }
- ptw32_mcs_lock_release(&mx1Lock);
- robust->inList = 1;
- }
- ptw32_mcs_lock_release(&listLock);
- }
-}
-
-#endif
-#if 1
INLINE
void
-ptw32_robust_mutex_quick_remove(pthread_mutex_t* mutex, ptw32_thread_t* otp)
+ptw32_robust_mutex_remove(pthread_mutex_t* mutex, ptw32_thread_t* otp)
{
- ptw32_mcs_local_node_t mx1Lock;
- ptw32_mcs_local_node_t mx2Lock;
ptw32_robust_node_t** list;
pthread_mutex_t mx = *mutex;
ptw32_thread_t* tp = mx->ownerThread.p;
+ ptw32_robust_node_t* robust = mx->robustNode;
- if (NULL != tp)
+ list = &tp->robustMxList;
+ mx->ownerThread.p = otp;
+ if (robust->next != NULL)
{
- ptw32_robust_node_t* robust = mx->robustNode;
-
- list = &tp->robustMxList;
- if (list != NULL)
- {
- ptw32_mcs_lock_acquire(&robust->lock, &mx1Lock);
- mx->ownerThread.p = otp;
- if (NULL != robust->next)
- {
- ptw32_mcs_lock_acquire(&robust->next->lock, &mx2Lock);
- robust->next->prev = robust->prev;
- ptw32_mcs_lock_release(&mx2Lock);
- }
- if (NULL != robust->prev)
- {
- ptw32_mcs_lock_acquire(&robust->prev->lock, &mx2Lock);
- robust->prev->next = robust->next;
- ptw32_mcs_lock_release(&mx2Lock);
- }
- if (*list == robust)
- {
- *list = robust->next;
- }
- ptw32_mcs_lock_release(&mx1Lock);
- }
+ robust->next->prev = robust->prev;
}
-}
-
-INLINE
-void
-ptw32_robust_mutex_remove(pthread_mutex_t* mutex)
-{
- ptw32_mcs_local_node_t listLock;
- ptw32_thread_t* tp = (*mutex)->ownerThread.p;
-
- if (NULL != tp)
+ if (robust->prev != NULL)
{
- ptw32_mcs_lock_acquire(&tp->robustMxListLock, &listLock);
- ptw32_robust_mutex_quick_remove(mutex, NULL);
- ptw32_mcs_lock_release(&listLock);
+ robust->prev->next = robust->next;
}
-}
-#else
-INLINE
-void
-ptw32_robust_mutex_quick_remove(pthread_mutex_t* mutex, ptw32_thread_t* tp)
-{
- ptw32_mcs_local_node_t mx1Lock;
- ptw32_mcs_local_node_t mx2Lock;
- ptw32_robust_node_t** list;
- pthread_mutex_t mx = *mutex;
- ptw32_robust_node_t* robust = mx->robustNode;
-
- list = &tp->robustMxList;
- if (list != NULL)
+ if (*list == robust)
{
- ptw32_mcs_lock_acquire(&robust->lock, &mx1Lock);
- if (NULL != robust->next)
- {
- ptw32_mcs_lock_acquire(&robust->next->lock, &mx2Lock);
- robust->next->prev = robust->prev;
- ptw32_mcs_lock_release(&mx2Lock);
- }
- if (NULL != robust->prev)
- {
- ptw32_mcs_lock_acquire(&robust->prev->lock, &mx2Lock);
- robust->prev->next = robust->next;
- ptw32_mcs_lock_release(&mx2Lock);
- }
- if (*list == robust)
- {
- *list = robust->next;
- }
- ptw32_mcs_lock_release(&mx1Lock);
+ *list = robust->next;
}
- robust->inList = 0;
}
-INLINE
-void
-ptw32_robust_mutex_remove(pthread_mutex_t* mutex, pthread_t self)
-{
- ptw32_mcs_local_node_t listLock;
- ptw32_thread_t* tp = self.p;
-
- ptw32_mcs_lock_acquire(&tp->robustMxListLock, &listLock);
- if (1 == (*mutex)->robustNode->inList)
- {
- ptw32_robust_mutex_quick_remove(mutex, tp);
- }
- ptw32_mcs_lock_release(&listLock);
-}
-#endif
int
pthread_mutex_consistent (pthread_mutex_t* mutex)
diff --git a/pthread_mutex_init.c b/pthread_mutex_init.c
index 93e7384..daf805e 100644
--- a/pthread_mutex_init.c
+++ b/pthread_mutex_init.c
@@ -104,7 +104,6 @@ pthread_mutex_init (pthread_mutex_t * mutex, const pthread_mutexattr_t * attr)
mx->robustNode = (ptw32_robust_node_t*) malloc(sizeof(ptw32_robust_node_t));
mx->robustNode->stateInconsistent = PTW32_ROBUST_CONSISTENT;
- mx->robustNode->lock = 0;
mx->robustNode->mx = mx;
mx->robustNode->next = NULL;
mx->robustNode->prev = NULL;
diff --git a/pthread_mutex_lock.c b/pthread_mutex_lock.c
index eb81c9b..ba410a6 100644
--- a/pthread_mutex_lock.c
+++ b/pthread_mutex_lock.c
@@ -172,13 +172,6 @@ pthread_mutex_lock (pthread_mutex_t * mutex)
(LPLONG) &mx->lock_idx,
(LONG) -1) != 0)
{
-#if 0
- /*
- * Only need to add the mutex to the list kept by the owner thread
- * when a thread blocks on the mutex.
- */
- ptw32_robust_mutex_add(mutex);
-#endif
if (WAIT_OBJECT_0 != WaitForSingleObject (mx->event, INFINITE))
{
result = EINVAL;
@@ -198,15 +191,11 @@ pthread_mutex_lock (pthread_mutex_t * mutex)
}
if (0 == result || EOWNERDEAD == result)
{
-#if 0
- mx->ownerThread = self;
-#else
/*
* Add mutex to the per-thread robust mutex currently-held list.
* If the thread terminates, all mutexes in this list will be unlocked.
*/
ptw32_robust_mutex_add(mutex, self);
-#endif
}
}
else
@@ -217,15 +206,11 @@ pthread_mutex_lock (pthread_mutex_t * mutex)
(PTW32_INTERLOCKED_LONG) 0) == 0)
{
mx->recursive_count = 1;
-#if 1
/*
* Add mutex to the per-thread robust mutex currently-held list.
* If the thread terminates, all mutexes in this list will be unlocked.
*/
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
else
{
@@ -247,13 +232,6 @@ pthread_mutex_lock (pthread_mutex_t * mutex)
(LPLONG) &mx->lock_idx,
(LONG) -1) != 0)
{
-#if 0
- /*
- * Only need to add the mutex to the list kept by the owner thread
- * when a thread blocks on the mutex.
- */
- ptw32_robust_mutex_add(mutex);
-#endif
if (WAIT_OBJECT_0 != WaitForSingleObject (mx->event, INFINITE))
{
result = EINVAL;
@@ -274,15 +252,11 @@ pthread_mutex_lock (pthread_mutex_t * mutex)
if (0 == result || EOWNERDEAD == result)
{
mx->recursive_count = 1;
-#if 1
/*
* Add mutex to the per-thread robust mutex currently-held list.
* If the thread terminates, all mutexes in this list will be unlocked.
*/
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
}
}
diff --git a/pthread_mutex_timedlock.c b/pthread_mutex_timedlock.c
index d4c280d..3f759b5 100644
--- a/pthread_mutex_timedlock.c
+++ b/pthread_mutex_timedlock.c
@@ -228,9 +228,6 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,
(LPLONG) &mx->lock_idx,
(LONG) -1) != 0)
{
-#if 0
- ptw32_robust_mutex_add(mutex);
-#endif
if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
{
return result;
@@ -249,15 +246,11 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,
if (0 == result || EOWNERDEAD == result)
{
-#if 1
/*
* Add mutex to the per-thread robust mutex currently-held list.
* If the thread terminates, all mutexes in this list will be unlocked.
*/
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
}
}
@@ -271,15 +264,11 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,
(PTW32_INTERLOCKED_LONG) 0) == 0)
{
mx->recursive_count = 1;
-#if 1
/*
* Add mutex to the per-thread robust mutex currently-held list.
* If the thread terminates, all mutexes in this list will be unlocked.
*/
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
else
{
@@ -301,9 +290,6 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,
(LPLONG) &mx->lock_idx,
(LONG) -1) != 0)
{
-#if 0
- ptw32_robust_mutex_add(mutex);
-#endif
if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
{
return result;
@@ -322,15 +308,11 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,
else if (0 == result || EOWNERDEAD == result)
{
mx->recursive_count = 1;
-#if 1
/*
* Add mutex to the per-thread robust mutex currently-held list.
* If the thread terminates, all mutexes in this list will be unlocked.
*/
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
}
}
diff --git a/pthread_mutex_trylock.c b/pthread_mutex_trylock.c
index f51c34c..6fcff75 100644
--- a/pthread_mutex_trylock.c
+++ b/pthread_mutex_trylock.c
@@ -123,11 +123,7 @@ pthread_mutex_trylock (pthread_mutex_t * mutex)
{
mx->recursive_count = 1;
}
-#if 1
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
else
{
@@ -141,11 +137,7 @@ pthread_mutex_trylock (pthread_mutex_t * mutex)
if (EOWNERDEAD == (result = ptw32_robust_mutex_inherit(mutex)))
{
mx->recursive_count = 1;
-#if 1
ptw32_robust_mutex_add(mutex, self);
-#else
- mx->ownerThread = self;
-#endif
}
else
{
diff --git a/pthread_mutex_unlock.c b/pthread_mutex_unlock.c
index 7a3f009..5cd51af 100644
--- a/pthread_mutex_unlock.c
+++ b/pthread_mutex_unlock.c
@@ -125,18 +125,11 @@ pthread_mutex_unlock (pthread_mutex_t * mutex)
(LONG)PTW32_ROBUST_INCONSISTENT);
if (PTHREAD_MUTEX_NORMAL == kind)
{
-#if 1
- ptw32_robust_mutex_remove(mutex);
-#else
- mx->ownerThread.p = NULL;
-#endif
+ ptw32_robust_mutex_remove(mutex, NULL);
if ((LONG) PTW32_INTERLOCKED_EXCHANGE((LPLONG) &mx->lock_idx,
(LONG) 0) < 0)
{
-#if 0
- ptw32_robust_mutex_remove(mutex, self);
-#endif
/*
* Someone may be waiting on that mutex.
*/
@@ -151,18 +144,11 @@ pthread_mutex_unlock (pthread_mutex_t * mutex)
if (kind != PTHREAD_MUTEX_RECURSIVE
|| 0 == --mx->recursive_count)
{
-#if 1
- ptw32_robust_mutex_remove(mutex);
-#else
- mx->ownerThread.p = NULL;
-#endif
+ ptw32_robust_mutex_remove(mutex, NULL);
if ((LONG) PTW32_INTERLOCKED_EXCHANGE((LPLONG) &mx->lock_idx,
(LONG) 0) < 0)
{
-#if 0
- ptw32_robust_mutex_remove(mutex, self);
-#endif
/*
* Someone may be waiting on that mutex.
*/
diff --git a/pthread_win32_attach_detach_np.c b/pthread_win32_attach_detach_np.c
index d03b55f..65e0913 100644
--- a/pthread_win32_attach_detach_np.c
+++ b/pthread_win32_attach_detach_np.c
@@ -191,7 +191,6 @@ pthread_win32_thread_detach_np ()
if (sp != NULL) // otherwise Win32 thread with no implicit POSIX handle.
{
ptw32_mcs_local_node_t stateLock;
- ptw32_mcs_local_node_t listLock;
ptw32_callUserDestroyRoutines (sp->ptHandle);
ptw32_mcs_lock_acquire (&sp->stateLock, &stateLock);
@@ -205,11 +204,10 @@ pthread_win32_thread_detach_np ()
/*
* Robust Mutexes
*/
- ptw32_mcs_lock_acquire(&sp->robustMxListLock, &listLock);
while (sp->robustMxList != NULL)
{
pthread_mutex_t mx = sp->robustMxList->mx;
- ptw32_robust_mutex_quick_remove(&mx, sp);
+ ptw32_robust_mutex_remove(&mx, sp);
(void) PTW32_INTERLOCKED_EXCHANGE(
(LPLONG)&mx->robustNode->stateInconsistent,
-1L);
@@ -220,7 +218,6 @@ pthread_win32_thread_detach_np ()
*/
SetEvent(mx->event);
}
- ptw32_mcs_lock_release(&listLock);
if (sp->detachState == PTHREAD_CREATE_DETACHED)