diff -ru ../../work.nc/linuxthreads-2.2.3_19/condvar.c ./condvar.c --- ../../work.nc/linuxthreads-2.2.3_19/condvar.c Thu Apr 12 23:02:02 2001 +++ ./condvar.c Tue Jan 10 18:14:20 2006 @@ -55,6 +55,11 @@ return did_remove; } +extern int __pthread_mutex_condwait_completelock(pthread_mutex_t *mutex); + +#define CVA_AVAIL 1 +#define CVA_EXTRA_RESTART 2 + int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { volatile pthread_descr self = thread_self(); @@ -74,6 +79,7 @@ /* Register extrication interface */ THREAD_SETMEM(self, p_condvar_avail, 0); + THREAD_SETMEM(self, p_condwait_mutex, mutex); __pthread_set_own_extricate_if(self, &extr); /* Atomically enqueue thread for waiting, but only if it is not @@ -102,10 +108,15 @@ while (1) { suspend(self); - if (THREAD_GETMEM(self, p_condvar_avail) == 0 + if ((THREAD_GETMEM(self, p_condvar_avail) & CVA_AVAIL) == 0 && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) { + if ((THREAD_GETMEM(self, p_condvar_avail) & + CVA_EXTRA_RESTART) == 0 && + !__compare_and_swap(&self->p_condvar_avail, + 0, CVA_EXTRA_RESTART)) + break; /* CVA_AVAIL set by other thread */ /* Count resumes that don't belong to us. */ spurious_wakeup_count++; continue; @@ -121,15 +132,35 @@ if (THREAD_GETMEM(self, p_woken_by_cancel) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { THREAD_SETMEM(self, p_woken_by_cancel, 0); - pthread_mutex_lock(mutex); + if (THREAD_GETMEM(self, p_condwait_mutex) == NULL) { + if ((THREAD_GETMEM(self, p_condvar_avail) & CVA_EXTRA_RESTART) != 0) { + if (spurious_wakeup_count > 0) + spurious_wakeup_count--; + else + suspend(self); + } + __pthread_mutex_condwait_completelock(mutex); + } else + pthread_mutex_lock(mutex); __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME); } + if (THREAD_GETMEM(self, p_condwait_mutex) == NULL && + (THREAD_GETMEM(self, p_condvar_avail) & CVA_EXTRA_RESTART) != 0) { + if (spurious_wakeup_count > 0) + spurious_wakeup_count--; + else + suspend(self); + } + /* Put back any resumes we caught that don't belong to us. */ while (spurious_wakeup_count--) restart(self); - pthread_mutex_lock(mutex); + if (THREAD_GETMEM(self, p_condwait_mutex) == NULL) + __pthread_mutex_condwait_completelock(mutex); + else + pthread_mutex_lock(mutex); return 0; } @@ -155,6 +186,7 @@ /* Register extrication interface */ THREAD_SETMEM(self, p_condvar_avail, 0); + THREAD_SETMEM(self, p_condwait_mutex, mutex); __pthread_set_own_extricate_if(self, &extr); /* Enqueue to wait on the condition and check for cancellation. */ @@ -196,10 +228,15 @@ suspend(self); } - if (THREAD_GETMEM(self, p_condvar_avail) == 0 + if ((THREAD_GETMEM(self, p_condvar_avail) & CVA_AVAIL) == 0 && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) { + if ((THREAD_GETMEM(self, p_condvar_avail) & + CVA_EXTRA_RESTART) == 0 && + !__compare_and_swap(&self->p_condvar_avail, + 0, CVA_EXTRA_RESTART)) + break; /* CVA_AVAIL set by other thread */ /* Count resumes that don't belong to us. */ spurious_wakeup_count++; continue; @@ -215,15 +252,35 @@ if (THREAD_GETMEM(self, p_woken_by_cancel) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { THREAD_SETMEM(self, p_woken_by_cancel, 0); - pthread_mutex_lock(mutex); + if (THREAD_GETMEM(self, p_condwait_mutex) == NULL) { + if ((THREAD_GETMEM(self, p_condvar_avail) & CVA_EXTRA_RESTART) != 0) { + if (spurious_wakeup_count > 0) + spurious_wakeup_count--; + else + suspend(self); + } + __pthread_mutex_condwait_completelock(mutex); + } else + pthread_mutex_lock(mutex); __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME); } + if (THREAD_GETMEM(self, p_condwait_mutex) == NULL && + (THREAD_GETMEM(self, p_condvar_avail) & CVA_EXTRA_RESTART) != 0) { + if (spurious_wakeup_count > 0) + spurious_wakeup_count--; + else + suspend(self); + } + /* Put back any resumes we caught that don't belong to us. */ while (spurious_wakeup_count--) restart(self); - pthread_mutex_lock(mutex); + if (THREAD_GETMEM(self, p_condwait_mutex) == NULL) + __pthread_mutex_condwait_completelock(mutex); + else + pthread_mutex_lock(mutex); return 0; } @@ -237,14 +294,34 @@ int pthread_cond_signal(pthread_cond_t *cond) { pthread_descr th; + long oldcva; __pthread_lock(&cond->__c_lock, NULL); th = dequeue(&cond->__c_waiting); __pthread_unlock(&cond->__c_lock); if (th != NULL) { - th->p_condvar_avail = 1; - WRITE_MEMORY_BARRIER(); - restart(th); + pthread_mutex_t *mutex = th->p_condwait_mutex; + if ((th->p_condvar_avail & CVA_AVAIL) == 0 && + mutex != NULL && + (mutex->__m_kind == PTHREAD_MUTEX_ERRORCHECK_NP || + mutex->__m_kind == PTHREAD_MUTEX_TIMED_NP) && + __pthread_alt_condwait_queuelock(&mutex->__m_lock, th) == 0) { + th->p_condwait_mutex = NULL; + WRITE_MEMORY_BARRIER(); + do { + READ_MEMORY_BARRIER(); + oldcva = th->p_condvar_avail; + } while (!__compare_and_swap(&th->p_condvar_avail, + oldcva, + oldcva | CVA_AVAIL)); + WRITE_MEMORY_BARRIER(); + if ((oldcva & CVA_EXTRA_RESTART) != 0) + restart(th); + } else { + th->p_condvar_avail = CVA_AVAIL; + WRITE_MEMORY_BARRIER(); + restart(th); + } } return 0; } @@ -252,6 +329,7 @@ int pthread_cond_broadcast(pthread_cond_t *cond) { pthread_descr tosignal, th; + long oldcva; __pthread_lock(&cond->__c_lock, NULL); /* Copy the current state of the waiting queue and empty it */ @@ -260,9 +338,28 @@ __pthread_unlock(&cond->__c_lock); /* Now signal each process in the queue */ while ((th = dequeue(&tosignal)) != NULL) { - th->p_condvar_avail = 1; - WRITE_MEMORY_BARRIER(); - restart(th); + pthread_mutex_t *mutex = th->p_condwait_mutex; + if ((th->p_condvar_avail & CVA_AVAIL) == 0 && + mutex != NULL && + (mutex->__m_kind == PTHREAD_MUTEX_ERRORCHECK_NP || + mutex->__m_kind == PTHREAD_MUTEX_TIMED_NP) && + __pthread_alt_condwait_queuelock(&mutex->__m_lock, th) == 0) { + th->p_condwait_mutex = NULL; + WRITE_MEMORY_BARRIER(); + do { + READ_MEMORY_BARRIER(); + oldcva = th->p_condvar_avail; + } while (!__compare_and_swap(&th->p_condvar_avail, + oldcva, + oldcva | CVA_AVAIL)); + WRITE_MEMORY_BARRIER(); + if ((oldcva & CVA_EXTRA_RESTART) != 0) + restart(th); + } else { + th->p_condvar_avail = CVA_AVAIL; + WRITE_MEMORY_BARRIER(); + restart(th); + } } return 0; } Only in .: condvar.c~ diff -ru ../../work.nc/linuxthreads-2.2.3_19/internals.h ./internals.h --- ../../work.nc/linuxthreads-2.2.3_19/internals.h Tue Jan 10 17:13:14 2006 +++ ./internals.h Tue Jan 10 17:33:30 2006 @@ -125,6 +125,13 @@ int pr_lock_count; } pthread_readlock_info; + +struct wait_node { + struct wait_node *next; /* Next node in null terminated linked list */ + pthread_descr thr; /* The thread waiting with this node */ + int abandoned; /* Atomic flag */ +}; + struct _pthread_descr_struct { union { struct { @@ -176,7 +183,7 @@ struct pthread_atomic p_resume_count; /* number of times restart() was called on thread */ char p_woken_by_cancel; /* cancellation performed wakeup */ - char p_condvar_avail; /* flag if conditional variable became avail */ + long p_condvar_avail; /* flag if conditional variable became avail */ char p_sem_avail; /* flag if semaphore became available */ pthread_extricate_if *p_extricate; /* See above */ pthread_readlock_info *p_readlock_list; /* List of readlock info structs */ @@ -189,6 +196,8 @@ hp_timing_t p_cpuclock_offset; /* Initial CPU clock for thread. */ #endif /* New elements must be added at the end. */ + pthread_mutex_t *p_condwait_mutex; + struct wait_node p_condwait_waitnode; } __attribute__ ((aligned(32))); /* We need to align the structure so that doubles are aligned properly. This is 8 bytes on MIPS and 16 bytes on MIPS64. Only in .: internals.h~ diff -ru ../../work.nc/linuxthreads-2.2.3_19/mutex.c ./mutex.c --- ../../work.nc/linuxthreads-2.2.3_19/mutex.c Sun Jan 7 05:35:20 2001 +++ ./mutex.c Tue Jan 10 17:13:46 2006 @@ -92,6 +92,24 @@ } strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock) +int __pthread_mutex_condwait_completelock(pthread_mutex_t *mutex) +{ + pthread_descr self; + + switch(mutex->__m_kind) { + case PTHREAD_MUTEX_ERRORCHECK_NP: + self = thread_self(); + if (mutex->__m_owner == self) return EDEADLK; + mutex->__m_owner = self; + return 0; + case PTHREAD_MUTEX_TIMED_NP: + return 0; + default: + return EINVAL; + } +} + + int __pthread_mutex_lock(pthread_mutex_t * mutex) { pthread_descr self; diff -ru ../../work.nc/linuxthreads-2.2.3_19/spinlock.c ./spinlock.c --- ../../work.nc/linuxthreads-2.2.3_19/spinlock.c Tue Jan 10 17:13:14 2006 +++ ./spinlock.c Tue Jan 10 17:13:46 2006 @@ -231,12 +231,6 @@ */ -struct wait_node { - struct wait_node *next; /* Next node in null terminated linked list */ - pthread_descr thr; /* The thread waiting with this node */ - int abandoned; /* Atomic flag */ -}; - static long wait_node_free_list; #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP static int wait_node_free_list_spinlock; @@ -359,6 +353,55 @@ } #endif + +int __pthread_alt_condwait_queuelock(struct _pthread_fastlock * lock, + pthread_descr th) +{ +#if defined HAS_COMPARE_AND_SWAP + long oldstatus, newstatus; +#endif + +#if defined TEST_FOR_COMPARE_AND_SWAP + if (!__pthread_has_cas) +#endif +#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP + { + __pthread_acquire(&lock->__spinlock); + + if (lock->__status == 0) { + WRITE_MEMORY_BARRIER(); + lock->__spinlock = __LT_SPINLOCK_INIT; + return 1; + } + th->p_condwait_waitnode.abandoned = 0; + th->p_condwait_waitnode.next = (struct wait_node *) lock->__status; + th->p_condwait_waitnode.thr = th; + lock->__status = (long) &th->p_condwait_waitnode; + + WRITE_MEMORY_BARRIER(); + lock->__spinlock = __LT_SPINLOCK_INIT; + return 0; + } +#endif + +#if defined HAS_COMPARE_AND_SWAP + do { + oldstatus = lock->__status; + if (oldstatus == 0) { + return 1; + } + th->p_condwait_waitnode.thr = th; + newstatus = (long) &th->p_condwait_waitnode; + th->p_condwait_waitnode.abandoned = 0; + th->p_condwait_waitnode.next = (struct wait_node *) oldstatus; + /* Make sure the store in wait_node.next completes before performing + the compare-and-swap */ + MEMORY_BARRIER(); + } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus)); + return 0; +#endif +} + void __pthread_alt_lock(struct _pthread_fastlock * lock, pthread_descr self) diff -ru ../../work.nc/linuxthreads-2.2.3_19/spinlock.h ./spinlock.h --- ../../work.nc/linuxthreads-2.2.3_19/spinlock.h Tue Jan 10 17:13:14 2006 +++ ./spinlock.h Tue Jan 10 17:13:46 2006 @@ -130,6 +130,9 @@ timed-out waits. Warning: do not mix these operations with the above ones over the same lock object! */ +extern int __pthread_alt_condwait_queuelock(struct _pthread_fastlock * lock, + pthread_descr th); + extern void __pthread_alt_lock(struct _pthread_fastlock * lock, pthread_descr self);