diff options
| author | rpj <rpj> | 2011-05-06 02:11:50 +0000 | 
|---|---|---|
| committer | rpj <rpj> | 2011-05-06 02:11:50 +0000 | 
| commit | 2fe8aba6a8a4ce09f353f34881c77f93a9c01ca3 (patch) | |
| tree | fd7f179b1abaa525ec55e34bef23b12f8fd89021 /pthread_mutex_timedlock.c | |
| parent | 941d7cf87c60b55342b51e0b0fcd748589b76167 (diff) | |
Robust mutexes merged from devel branchpost_merge_with_ROBUST_MUTEXES
Diffstat (limited to 'pthread_mutex_timedlock.c')
| -rw-r--r-- | pthread_mutex_timedlock.c | 222 | 
1 files changed, 175 insertions, 47 deletions
| diff --git a/pthread_mutex_timedlock.c b/pthread_mutex_timedlock.c index a238552..3f759b5 100644 --- a/pthread_mutex_timedlock.c +++ b/pthread_mutex_timedlock.c @@ -109,8 +109,9 @@ int  pthread_mutex_timedlock (pthread_mutex_t * mutex,  			 const struct timespec *abstime)  { -  int result;    pthread_mutex_t mx; +  int kind; +  int result = 0;    /*     * Let the system deal with invalid pointers. @@ -131,66 +132,193 @@ pthread_mutex_timedlock (pthread_mutex_t * mutex,      }    mx = *mutex; +  kind = mx->kind; -  if (mx->kind == PTHREAD_MUTEX_NORMAL) +  if (kind >= 0)      { -      if ((LONG) PTW32_INTERLOCKED_EXCHANGE( -		   (LPLONG) &mx->lock_idx, -		   (LONG) 1) != 0) -	{ -          while ((LONG) PTW32_INTERLOCKED_EXCHANGE( -                          (LPLONG) &mx->lock_idx, -			  (LONG) -1) != 0) -            { -	      if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) -		{ -		  return result; -		} -	    } -	} -    } -  else -    { -      pthread_t self = pthread_self(); - -      if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE( -                   (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx, -		   (PTW32_INTERLOCKED_LONG) 1, -		   (PTW32_INTERLOCKED_LONG) 0) == 0) -	{ -	  mx->recursive_count = 1; -	  mx->ownerThread = self; -	} -      else -	{ -	  if (pthread_equal (mx->ownerThread, self)) -	    { -	      if (mx->kind == PTHREAD_MUTEX_RECURSIVE) -		{ -		  mx->recursive_count++; -		} -	      else -		{ -		  return EDEADLK; -		} -	    } -	  else +      if (mx->kind == PTHREAD_MUTEX_NORMAL) +        { +          if ((LONG) PTW32_INTERLOCKED_EXCHANGE( +		       (LPLONG) &mx->lock_idx, +		       (LONG) 1) != 0)  	    {                while ((LONG) PTW32_INTERLOCKED_EXCHANGE(                                (LPLONG) &mx->lock_idx,  			      (LONG) -1) != 0)                  { -		  if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) +	          if (0 != (result = ptw32_timed_eventwait (mx->event, abstime)))  		    {  		      return result;  		    } -		} +	        } +	    } +        } +      else +        { +          pthread_t self = pthread_self(); +          if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE( +                       (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx, +		       (PTW32_INTERLOCKED_LONG) 1, +		       (PTW32_INTERLOCKED_LONG) 0) == 0) +	    {  	      mx->recursive_count = 1;  	      mx->ownerThread = self;  	    } -	} +          else +	    { +	      if (pthread_equal (mx->ownerThread, self)) +	        { +	          if (mx->kind == PTHREAD_MUTEX_RECURSIVE) +		    { +		      mx->recursive_count++; +		    } +	          else +		    { +		      return EDEADLK; +		    } +	        } +	      else +	        { +                  while ((LONG) PTW32_INTERLOCKED_EXCHANGE( +                                  (LPLONG) &mx->lock_idx, +			          (LONG) -1) != 0) +                    { +		      if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) +		        { +		          return result; +		        } +		    } + +	          mx->recursive_count = 1; +	          mx->ownerThread = self; +	        } +	    } +        }      } +  else +    { +      /* +       * Robust types +       * All types record the current owner thread. +       * The mutex is added to a per thread list when ownership is acquired. +       */ +      ptw32_robust_state_t* statePtr = &mx->robustNode->stateInconsistent; -  return 0; +      if ((LONG)PTW32_ROBUST_NOTRECOVERABLE == PTW32_INTERLOCKED_EXCHANGE_ADD( +                                                 (LPLONG)statePtr, +                                                 0L)) +        { +          result = ENOTRECOVERABLE; +        } +      else +        { +          pthread_t self = pthread_self(); + +          kind = -kind - 1; /* Convert to non-robust range */ + +          if (PTHREAD_MUTEX_NORMAL == kind) +            { +              if ((LONG) PTW32_INTERLOCKED_EXCHANGE( +		           (LPLONG) &mx->lock_idx, +		           (LONG) 1) != 0) +	        { +                  while (0 == (result = ptw32_robust_mutex_inherit(mutex)) +                           && (LONG) PTW32_INTERLOCKED_EXCHANGE( +                                  (LPLONG) &mx->lock_idx, +			          (LONG) -1) != 0) +                    { +	              if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) +		        { +		          return result; +		        } +                      if ((LONG)PTW32_ROBUST_NOTRECOVERABLE == +                                  PTW32_INTERLOCKED_EXCHANGE_ADD( +                                    (LPLONG)statePtr, +                                    0L)) +                        { +                          /* Unblock the next thread */ +                          SetEvent(mx->event); +                          result = ENOTRECOVERABLE; +                          break; +                        } +	            } + +                  if (0 == result || EOWNERDEAD == result) +                    { +                      /* +                       * Add mutex to the per-thread robust mutex currently-held list. +                       * If the thread terminates, all mutexes in this list will be unlocked. +                       */ +                      ptw32_robust_mutex_add(mutex, self); +                    } +	        } +            } +          else +            { +              pthread_t self = pthread_self(); + +              if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE( +                           (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx, +		           (PTW32_INTERLOCKED_LONG) 1, +		           (PTW32_INTERLOCKED_LONG) 0) == 0) +	        { +	          mx->recursive_count = 1; +                  /* +                   * Add mutex to the per-thread robust mutex currently-held list. +                   * If the thread terminates, all mutexes in this list will be unlocked. +                   */ +                  ptw32_robust_mutex_add(mutex, self); +	        } +              else +	        { +	          if (pthread_equal (mx->ownerThread, self)) +	            { +	              if (PTHREAD_MUTEX_RECURSIVE == kind) +		        { +		          mx->recursive_count++; +		        } +	              else +		        { +		          return EDEADLK; +		        } +	            } +	          else +	            { +                      while (0 == (result = ptw32_robust_mutex_inherit(mutex)) +                               && (LONG) PTW32_INTERLOCKED_EXCHANGE( +                                          (LPLONG) &mx->lock_idx, +			                  (LONG) -1) != 0) +                        { +		          if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) +		            { +		              return result; +		            } +		        } + +                      if ((LONG)PTW32_ROBUST_NOTRECOVERABLE == +                                  PTW32_INTERLOCKED_EXCHANGE_ADD( +                                    (LPLONG)statePtr, +                                    0L)) +                        { +                          /* Unblock the next thread */ +                          SetEvent(mx->event); +                          result = ENOTRECOVERABLE; +                        } +                      else if (0 == result || EOWNERDEAD == result) +                        { +                          mx->recursive_count = 1; +                          /* +                           * Add mutex to the per-thread robust mutex currently-held list. +                           * If the thread terminates, all mutexes in this list will be unlocked. +                           */ +                          ptw32_robust_mutex_add(mutex, self); +                        } +	            } +	        } +            } +        } +    } + +  return result;  } | 
