%patch Index: 2.4.x-xfs/include/asm-i386/rwsem.h =================================================================== --- 2.4.x-xfs.orig/include/asm-i386/rwsem.h 2004-03-11 20:33:14.000000000 +1100 +++ 2.4.x-xfs/include/asm-i386/rwsem.h 2004-04-19 19:58:47.000000000 +1000 @@ -4,8 +4,6 @@ * * Derived from asm-i386/semaphore.h * - * Trylock by Brian Watson (Brian.J.Watson@compaq.com). - * * * The MSW of the count is the negated number of active writers and waiting * lockers, and the LSW is the total number of active locks @@ -35,7 +33,7 @@ #define _I386_RWSEM_H #ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" #endif #ifdef __KERNEL__ @@ -48,6 +46,7 @@ extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); +extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); /* * the semaphore definition @@ -113,8 +112,8 @@ " jmp 1b\n" LOCK_SECTION_END "# ending down_read\n\t" - : "+m"(sem->count) - : "a"(sem) + : "=m"(sem->count) + : "a"(sem), "m"(sem->count) : "memory", "cc"); } @@ -151,8 +150,8 @@ tmp = RWSEM_ACTIVE_WRITE_BIAS; __asm__ __volatile__( "# beginning down_write\n\t" -LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %0,%0\n\t" /* was the count 0 before? */ +LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ + " testl %%edx,%%edx\n\t" /* was the count 0 before? */ " jnz 2f\n\t" /* jump if we weren't granted the lock */ "1:\n\t" LOCK_SECTION_START("") @@ -163,8 +162,8 @@ " jmp 1b\n" LOCK_SECTION_END "# ending down_write" - : "+d"(tmp), "+m"(sem->count) - : "a"(sem) + : "=m"(sem->count), "=d"(tmp) + : "a"(sem), "1"(tmp), "m"(sem->count) : "memory", "cc"); } @@ -202,8 +201,8 @@ " jmp 1b\n" LOCK_SECTION_END "# ending __up_read\n" - : "+m"(sem->count), "+d"(tmp) - : "a"(sem) + : "=m"(sem->count), "=d"(tmp) + : "a"(sem), "1"(tmp), "m"(sem->count) : "memory", "cc"); } @@ -228,20 +227,46 @@ " jmp 1b\n" LOCK_SECTION_END "# ending __up_write\n" - : "+m"(sem->count) - : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS) + : "=m"(sem->count) + : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) : "memory", "cc", "edx"); } /* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + __asm__ __volatile__( + "# beginning __downgrade_write\n\t" +LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ + " js 2f\n\t" /* jump if the lock is being waited upon */ + "1:\n\t" + LOCK_SECTION_START("") + "2:\n\t" + " pushl %%ecx\n\t" + " pushl %%edx\n\t" + " call rwsem_downgrade_wake\n\t" + " popl %%edx\n\t" + " popl %%ecx\n\t" + " jmp 1b\n" + LOCK_SECTION_END + "# ending __downgrade_write\n" + : "=m"(sem->count) + : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) + : "memory", "cc"); +} +#define HAVE_DOWNGRADE_WRITE + +/* * implement atomic add functionality */ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) { __asm__ __volatile__( LOCK_PREFIX "addl %1,%0" - :"=m"(sem->count) - :"ir"(delta), "m"(sem->count)); + : "=m"(sem->count) + : "ir"(delta), "m"(sem->count)); } /* Index: 2.4.x-xfs/include/asm-ia64/rwsem.h =================================================================== --- 2.4.x-xfs.orig/include/asm-ia64/rwsem.h 2003-08-26 09:46:48.000000000 +1000 +++ 2.4.x-xfs/include/asm-ia64/rwsem.h 2004-04-19 19:58:47.000000000 +1000 @@ -177,6 +177,7 @@ if (old < 0) rwsem_downgrade_wake(sem); } +#define HAVE_DOWNGRADE_WRITE /* * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 Index: 2.4.x-xfs/include/linux/rwsem.h =================================================================== --- 2.4.x-xfs.orig/include/linux/rwsem.h 2004-03-11 20:33:14.000000000 +1100 +++ 2.4.x-xfs/include/linux/rwsem.h 2004-04-19 19:58:47.000000000 +1000 @@ -101,6 +101,17 @@ rwsemtrace(sem,"Leaving up_write"); } +#ifdef HAVE_DOWNGRADE_WRITE +/* + * downgrade write lock to read lock + */ +static inline void downgrade_write(struct rw_semaphore *sem) +{ + rwsemtrace(sem,"Entering downgrade_write"); + __downgrade_write(sem); + rwsemtrace(sem,"Leaving downgrade_write"); +} +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ Index: 2.4.x-xfs/lib/rwsem.c =================================================================== --- 2.4.x-xfs.orig/lib/rwsem.c 2001-07-11 20:44:26.000000000 +1000 +++ 2.4.x-xfs/lib/rwsem.c 2004-04-19 19:58:47.000000000 +1000 @@ -34,8 +34,9 @@ * - there must be someone on the queue * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having flags zeroised + * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) +static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct list_head *next; @@ -44,6 +45,9 @@ rwsemtrace(sem,"Entering __rwsem_do_wake"); + if (!wakewrite) + goto dont_wake_writers; + /* only wake someone up if we can transition the active part of the count from 0 -> 1 */ try_again: oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS; @@ -64,6 +68,12 @@ wake_up_process(waiter->task); goto out; + /* don't want to wake any writers */ + dont_wake_writers: + waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + if (waiter->flags & RWSEM_WAITING_FOR_WRITE) + goto out; + /* grant an infinite number of read locks to the readers at the front of the queue * - note we increment the 'active part' of the count by the number of readers (less one * for the activity decrement we've already done) before waking any processes up @@ -132,7 +142,7 @@ * - it might even be this process, since the waker takes a more active part */ if (!(count & RWSEM_ACTIVE_MASK)) - sem = __rwsem_do_wake(sem); + sem = __rwsem_do_wake(sem,1); spin_unlock(&sem->wait_lock); @@ -193,7 +203,7 @@ /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem); + sem = __rwsem_do_wake(sem,1); spin_unlock(&sem->wait_lock); @@ -202,9 +212,31 @@ return sem; } +/* + * downgrade a write lock into a read lock + * - caller incremented waiting part of count, and discovered it to be still negative + * - just wake up any readers at the front of the queue + */ +struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) +{ + rwsemtrace(sem,"Entering rwsem_downgrade_wake"); + + spin_lock(&sem->wait_lock); + + /* do nothing if list empty */ + if (!list_empty(&sem->wait_list)) + sem = __rwsem_do_wake(sem,0); + + spin_unlock(&sem->wait_lock); + + rwsemtrace(sem,"Leaving rwsem_downgrade_wake"); + return sem; +} + EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed); EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed); EXPORT_SYMBOL_NOVERS(rwsem_wake); +EXPORT_SYMBOL_NOVERS(rwsem_downgrade_wake); #if RWSEM_DEBUG EXPORT_SYMBOL(rwsemtrace); #endif %diffstat include/asm-i386/rwsem.h | 55 ++++++++++++++++++++++++++++++++++------------- include/asm-ia64/rwsem.h | 1 include/linux/rwsem.h | 11 +++++++++ lib/rwsem.c | 38 +++++++++++++++++++++++++++++--- 4 files changed, 87 insertions(+), 18 deletions(-)