diff options
author | rpj <rpj> | 2004-10-16 02:34:44 +0000 |
---|---|---|
committer | rpj <rpj> | 2004-10-16 02:34:44 +0000 |
commit | 45b1b8cb2a6588f9316f780d8cefe11c181a9a17 (patch) | |
tree | 24753e298d9933d48d764177baf183ef97f04156 /tests/rwlock8.c | |
parent | 9da8fdcb33373b4b2e1de2a8b7af3ed4b5811245 (diff) |
Mutex speedups cont'd
Diffstat (limited to 'tests/rwlock8.c')
-rw-r--r-- | tests/rwlock8.c | 205 |
1 files changed, 205 insertions, 0 deletions
diff --git a/tests/rwlock8.c b/tests/rwlock8.c new file mode 100644 index 0000000..c83a775 --- /dev/null +++ b/tests/rwlock8.c @@ -0,0 +1,205 @@ +/* + * rwlock8.c + * + * Hammer on a bunch of rwlocks to test robustness and fairness. + * Printed stats should be roughly even for each thread. + * + * Yield during each access to exercise lock contention code paths + * more than rwlock7.c does (particularly on uni-processor systems). + */ + +#include "test.h" +#include <sys/timeb.h> + +#ifdef __GNUC__ +#include <stdlib.h> +#endif + +#define THREADS 5 +#define DATASIZE 7 +#define ITERATIONS 100000 + +/* + * Keep statistics for each thread. + */ +typedef struct thread_tag { + int thread_num; + pthread_t thread_id; + int updates; + int reads; + int changed; + int seed; +} thread_t; + +/* + * Read-write lock and shared data + */ +typedef struct data_tag { + pthread_rwlock_t lock; + int data; + int updates; +} data_t; + +static thread_t threads[THREADS]; +static data_t data[DATASIZE]; + +/* + * Thread start routine that uses read-write locks + */ +void *thread_routine (void *arg) +{ + thread_t *self = (thread_t*)arg; + int iteration; + int element = 0; + int seed = self->seed; + int interval = 1 + rand_r (&seed) % 71; + + self->changed = 0; + + for (iteration = 0; iteration < ITERATIONS; iteration++) + { + if (iteration % (ITERATIONS / 10) == 0) + { + putchar('.'); + fflush(stdout); + } + /* + * Each "self->interval" iterations, perform an + * update operation (write lock instead of read + * lock). + */ + if ((iteration % interval) == 0) + { + assert(pthread_rwlock_wrlock (&data[element].lock) == 0); + data[element].data = self->thread_num; + data[element].updates++; + self->updates++; + interval = 1 + rand_r (&seed) % 71; + sched_yield(); + assert(pthread_rwlock_unlock (&data[element].lock) == 0); + } else { + /* + * Look at the current data element to see whether + * the current thread last updated it. Count the + * times, to report later. + */ + assert(pthread_rwlock_rdlock (&data[element].lock) == 0); + + self->reads++; + + if (data[element].data != self->thread_num) + { + self->changed++; + interval = 1 + self->changed % 71; + } + + sched_yield(); + + assert(pthread_rwlock_unlock (&data[element].lock) == 0); + } + + element = (element + 1) % DATASIZE; + + } + + return NULL; +} + +int +main (int argc, char *argv[]) +{ + int count; + int data_count; + int thread_updates = 0; + int data_updates = 0; + int seed = 1; + + struct _timeb currSysTime1; + struct _timeb currSysTime2; + + /* + * Initialize the shared data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) + { + data[data_count].data = 0; + data[data_count].updates = 0; + + assert(pthread_rwlock_init (&data[data_count].lock, NULL) == 0); + } + + _ftime(&currSysTime1); + + /* + * Create THREADS threads to access shared data. + */ + for (count = 0; count < THREADS; count++) + { + threads[count].thread_num = count; + threads[count].updates = 0; + threads[count].reads = 0; + threads[count].seed = 1 + rand_r (&seed) % 71; + + assert(pthread_create (&threads[count].thread_id, + NULL, thread_routine, (void*)&threads[count]) == 0); + } + + /* + * Wait for all threads to complete, and collect + * statistics. + */ + for (count = 0; count < THREADS; count++) + { + assert(pthread_join (threads[count].thread_id, NULL) == 0); + } + + putchar('\n'); + fflush(stdout); + + for (count = 0; count < THREADS; count++) + { + if (threads[count].changed > 0) + { + printf ("Thread %d found changed elements %d times\n", + count, threads[count].changed); + } + } + + putchar('\n'); + fflush(stdout); + + for (count = 0; count < THREADS; count++) + { + thread_updates += threads[count].updates; + printf ("%02d: seed %d, updates %d, reads %d\n", + count, threads[count].seed, + threads[count].updates, threads[count].reads); + } + + putchar('\n'); + fflush(stdout); + + /* + * Collect statistics for the data. + */ + for (data_count = 0; data_count < DATASIZE; data_count++) + { + data_updates += data[data_count].updates; + printf ("data %02d: value %d, %d updates\n", + data_count, data[data_count].data, data[data_count].updates); + assert(pthread_rwlock_destroy (&data[data_count].lock) == 0); + } + + printf ("%d thread updates, %d data updates\n", + thread_updates, data_updates); + + _ftime(&currSysTime2); + + printf( "\nstart: %ld/%d, stop: %ld/%d, duration:%ld\n", + currSysTime1.time,currSysTime1.millitm, + currSysTime2.time,currSysTime2.millitm, + (currSysTime2.time*1000+currSysTime2.millitm) - + (currSysTime1.time*1000+currSysTime1.millitm)); + + return 0; +} |