summaryrefslogtreecommitdiff
path: root/pthread_mutex_timedlock.c
diff options
context:
space:
mode:
authorrpj <rpj>2011-03-24 23:33:14 +0000
committerrpj <rpj>2011-03-24 23:33:14 +0000
commitdb171f2f9435b98f05f33fcbc0dcf0c5cc1cb917 (patch)
tree9d617a20a9e0ad1fcf415e353057c53e6d77d0f3 /pthread_mutex_timedlock.c
parente5229a33f8724a90cbb0b56c3ecc1d6691bf54d7 (diff)
First pass of robust mutexes
Diffstat (limited to 'pthread_mutex_timedlock.c')
-rw-r--r--pthread_mutex_timedlock.c240
1 files changed, 193 insertions, 47 deletions
diff --git a/pthread_mutex_timedlock.c b/pthread_mutex_timedlock.c
index a238552..d81b9c3 100644
--- a/pthread_mutex_timedlock.c
+++ b/pthread_mutex_timedlock.c
@@ -109,8 +109,9 @@ int
pthread_mutex_timedlock (pthread_mutex_t * mutex,
const struct timespec *abstime)
{
- int result;
pthread_mutex_t mx;
+ int kind;
+ int result = 0;
/*
* Let the system deal with invalid pointers.
@@ -131,66 +132,211 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,
}
mx = *mutex;
+ kind = mx->kind;
- if (mx->kind == PTHREAD_MUTEX_NORMAL)
+ if (kind >= 0)
{
- if ((LONG) PTW32_INTERLOCKED_EXCHANGE(
- (LPLONG) &mx->lock_idx,
- (LONG) 1) != 0)
- {
- while ((LONG) PTW32_INTERLOCKED_EXCHANGE(
- (LPLONG) &mx->lock_idx,
- (LONG) -1) != 0)
- {
- if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
- {
- return result;
- }
- }
- }
- }
- else
- {
- pthread_t self = pthread_self();
-
- if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE(
- (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx,
- (PTW32_INTERLOCKED_LONG) 1,
- (PTW32_INTERLOCKED_LONG) 0) == 0)
- {
- mx->recursive_count = 1;
- mx->ownerThread = self;
- }
- else
- {
- if (pthread_equal (mx->ownerThread, self))
- {
- if (mx->kind == PTHREAD_MUTEX_RECURSIVE)
- {
- mx->recursive_count++;
- }
- else
- {
- return EDEADLK;
- }
- }
- else
+ if (mx->kind == PTHREAD_MUTEX_NORMAL)
+ {
+ if ((LONG) PTW32_INTERLOCKED_EXCHANGE(
+ (LPLONG) &mx->lock_idx,
+ (LONG) 1) != 0)
{
while ((LONG) PTW32_INTERLOCKED_EXCHANGE(
(LPLONG) &mx->lock_idx,
(LONG) -1) != 0)
{
- if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
+ if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
{
return result;
}
- }
+ }
+ }
+ }
+ else
+ {
+ pthread_t self = pthread_self();
+ if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE(
+ (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx,
+ (PTW32_INTERLOCKED_LONG) 1,
+ (PTW32_INTERLOCKED_LONG) 0) == 0)
+ {
mx->recursive_count = 1;
mx->ownerThread = self;
}
- }
+ else
+ {
+ if (pthread_equal (mx->ownerThread, self))
+ {
+ if (mx->kind == PTHREAD_MUTEX_RECURSIVE)
+ {
+ mx->recursive_count++;
+ }
+ else
+ {
+ return EDEADLK;
+ }
+ }
+ else
+ {
+ while ((LONG) PTW32_INTERLOCKED_EXCHANGE(
+ (LPLONG) &mx->lock_idx,
+ (LONG) -1) != 0)
+ {
+ if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
+ {
+ return result;
+ }
+ }
+
+ mx->recursive_count = 1;
+ mx->ownerThread = self;
+ }
+ }
+ }
}
+ else
+ {
+ /*
+ * Robust types
+ * All types record the current owner thread.
+ * The mutex is added to a per thread list when ownership is acquired.
+ */
+ ptw32_robust_state_t* statePtr = &mx->robustNode->stateInconsistent;
- return 0;
+ if ((LONG)PTW32_ROBUST_NOTRECOVERABLE == PTW32_INTERLOCKED_EXCHANGE_ADD(
+ (LPLONG)statePtr,
+ 0L))
+ {
+ result = ENOTRECOVERABLE;
+ }
+ else
+ {
+ pthread_t self = pthread_self();
+
+ kind = -kind - 1; /* Convert to non-robust range */
+
+ if (PTHREAD_MUTEX_NORMAL == kind)
+ {
+ if ((LONG) PTW32_INTERLOCKED_EXCHANGE(
+ (LPLONG) &mx->lock_idx,
+ (LONG) 1) != 0)
+ {
+ while (0 == (result = ptw32_robust_mutex_inherit(mutex, self))
+ && (LONG) PTW32_INTERLOCKED_EXCHANGE(
+ (LPLONG) &mx->lock_idx,
+ (LONG) -1) != 0)
+ {
+#if 0
+ ptw32_robust_mutex_add(mutex);
+#endif
+ if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
+ {
+ return result;
+ }
+ if ((LONG)PTW32_ROBUST_NOTRECOVERABLE ==
+ PTW32_INTERLOCKED_EXCHANGE_ADD(
+ (LPLONG)statePtr,
+ 0L))
+ {
+ /* Unblock the next thread */
+ SetEvent(mx->event);
+ result = ENOTRECOVERABLE;
+ break;
+ }
+ }
+
+ if (0 == result || EOWNERDEAD == result)
+ {
+#if 1
+ /*
+ * Add mutex to the per-thread robust mutex currently-held list.
+ * If the thread terminates, all mutexes in this list will be unlocked.
+ */
+ ptw32_robust_mutex_add(mutex, self);
+#else
+ mx->ownerThread = self;
+#endif
+ }
+ }
+ }
+ else
+ {
+ pthread_t self = pthread_self();
+
+ if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE(
+ (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx,
+ (PTW32_INTERLOCKED_LONG) 1,
+ (PTW32_INTERLOCKED_LONG) 0) == 0)
+ {
+ mx->recursive_count = 1;
+#if 1
+ /*
+ * Add mutex to the per-thread robust mutex currently-held list.
+ * If the thread terminates, all mutexes in this list will be unlocked.
+ */
+ ptw32_robust_mutex_add(mutex, self);
+#else
+ mx->ownerThread = self;
+#endif
+ }
+ else
+ {
+ if (pthread_equal (mx->ownerThread, self))
+ {
+ if (PTHREAD_MUTEX_RECURSIVE == kind)
+ {
+ mx->recursive_count++;
+ }
+ else
+ {
+ return EDEADLK;
+ }
+ }
+ else
+ {
+ while (0 == (result = ptw32_robust_mutex_inherit(mutex, self))
+ && (LONG) PTW32_INTERLOCKED_EXCHANGE(
+ (LPLONG) &mx->lock_idx,
+ (LONG) -1) != 0)
+ {
+#if 0
+ ptw32_robust_mutex_add(mutex);
+#endif
+ if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))
+ {
+ return result;
+ }
+ }
+
+ if ((LONG)PTW32_ROBUST_NOTRECOVERABLE ==
+ PTW32_INTERLOCKED_EXCHANGE_ADD(
+ (LPLONG)statePtr,
+ 0L))
+ {
+ /* Unblock the next thread */
+ SetEvent(mx->event);
+ result = ENOTRECOVERABLE;
+ }
+ else if (0 == result || EOWNERDEAD == result)
+ {
+ mx->recursive_count = 1;
+#if 1
+ /*
+ * Add mutex to the per-thread robust mutex currently-held list.
+ * If the thread terminates, all mutexes in this list will be unlocked.
+ */
+ ptw32_robust_mutex_add(mutex, self);
+#else
+ mx->ownerThread = self;
+#endif
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return result;
}