[BACK]Return to rwsem-backport CVS log [TXT][DIR] Up to [Development] / linux-2.4-xfs / split-patches

File: [Development] / linux-2.4-xfs / split-patches / rwsem-backport (download)

Revision 1.3, Mon Nov 22 13:43:01 2004 UTC (12 years, 10 months ago) by nathans.longdrop.melbourne.sgi.com
Branch: MAIN
Changes since 1.2: +14 -95 lines

Merge up to 2.4.28.
Merge of 2.4.x-xfs-melb:linux:20239a by kenmcd.

%patch
Index: 2.4.x-xfs/include/asm-i386/rwsem.h
===================================================================
--- 2.4.x-xfs.orig/include/asm-i386/rwsem.h	Mon Nov 22 11:57:54 2004
+++ 2.4.x-xfs/include/asm-i386/rwsem.h	Mon Nov 22 12:15:45 2004
@@ -48,6 +48,7 @@
 extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
 extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
 extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
+extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem));
 
 /*
  * the semaphore definition
@@ -260,5 +261,31 @@
 	return tmp+delta;
 }
 
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+		"# beginning __downgrade_write\n\t"
+LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+		"  js        2f\n\t" /* jump if the lock is being waited upon */
+		"1:\n\t"
+		LOCK_SECTION_START("")
+		"2:\n\t"
+		"  pushl     %%ecx\n\t"
+		"  pushl     %%edx\n\t"
+		"  call      rwsem_downgrade_wake\n\t"
+		"  popl      %%edx\n\t"
+		"  popl      %%ecx\n\t"
+		"  jmp       1b\n"
+		LOCK_SECTION_END
+		"# ending __downgrade_write\n"
+		: "=m"(sem->count)
+		: "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
+		: "memory", "cc");
+}
+#define HAVE_DOWNGRADE_WRITE
+
 #endif /* __KERNEL__ */
 #endif /* _I386_RWSEM_H */
Index: 2.4.x-xfs/include/asm-ia64/rwsem.h
===================================================================
--- 2.4.x-xfs.orig/include/asm-ia64/rwsem.h	Mon Nov 22 11:56:28 2004
+++ 2.4.x-xfs/include/asm-ia64/rwsem.h	Mon Nov 22 12:04:08 2004
@@ -177,6 +177,7 @@
 	if (old < 0)
 		rwsem_downgrade_wake(sem);
 }
+#define HAVE_DOWNGRADE_WRITE
 
 /*
  * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
Index: 2.4.x-xfs/include/linux/rwsem.h
===================================================================
--- 2.4.x-xfs.orig/include/linux/rwsem.h	Mon Nov 22 11:56:28 2004
+++ 2.4.x-xfs/include/linux/rwsem.h	Mon Nov 22 12:04:08 2004
@@ -101,6 +101,17 @@
 	rwsemtrace(sem,"Leaving up_write");
 }
 
+#ifdef HAVE_DOWNGRADE_WRITE
+/*
+ * downgrade write lock to read lock
+ */
+static inline void downgrade_write(struct rw_semaphore *sem)
+{
+	rwsemtrace(sem,"Entering downgrade_write");
+	__downgrade_write(sem);
+	rwsemtrace(sem,"Leaving downgrade_write");
+}
+#endif
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_H */
Index: 2.4.x-xfs/lib/rwsem.c
===================================================================
--- 2.4.x-xfs.orig/lib/rwsem.c	Mon Nov 22 11:57:54 2004
+++ 2.4.x-xfs/lib/rwsem.c	Mon Nov 22 12:04:08 2004
@@ -35,8 +35,9 @@
  *   - there must be someone on the queue
  * - the spinlock must be held by the caller
  * - woken process blocks are discarded from the list after having flags zeroised
+ * - writers are only woken if wakewrite is non-zero
  */
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
 {
 	struct rwsem_waiter *waiter;
 	struct task_struct *tsk;
@@ -45,6 +46,9 @@
 
 	rwsemtrace(sem,"Entering __rwsem_do_wake");
 
+	if (!wakewrite)
+		goto dont_wake_writers;
+
 	/* only wake someone up if we can transition the active part of the count from 0 -> 1 */
  try_again:
 	oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
@@ -68,6 +72,12 @@
 	free_task_struct(tsk);
 	goto out;
 
+	/* don't want to wake any writers */
+ dont_wake_writers:
+	waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+	if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+		goto out;
+
 	/* grant an infinite number of read locks to the readers at the front of the queue
 	 * - note we increment the 'active part' of the count by the number of readers (less one
 	 *   for the activity decrement we've already done) before waking any processes up
@@ -140,7 +150,7 @@
 	 * - it might even be this process, since the waker takes a more active part
 	 */
 	if (!(count & RWSEM_ACTIVE_MASK))
-		sem = __rwsem_do_wake(sem);
+		sem = __rwsem_do_wake(sem,1);
 
 	spin_unlock(&sem->wait_lock);
 
@@ -201,7 +211,7 @@
 
 	/* do nothing if list empty */
 	if (!list_empty(&sem->wait_list))
-		sem = __rwsem_do_wake(sem);
+		sem = __rwsem_do_wake(sem,1);
 
 	spin_unlock(&sem->wait_lock);
 
@@ -210,9 +220,31 @@
 	return sem;
 }
 
+/*
+ * downgrade a write lock into a read lock
+ * - caller incremented waiting part of count, and discovered it to be still negative
+ * - just wake up any readers at the front of the queue
+ */
+struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+{
+	rwsemtrace(sem,"Entering rwsem_downgrade_wake");
+
+	spin_lock(&sem->wait_lock);
+
+	/* do nothing if list empty */
+	if (!list_empty(&sem->wait_list))
+		sem = __rwsem_do_wake(sem,0);
+
+	spin_unlock(&sem->wait_lock);
+
+	rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
+	return sem;
+}
+
 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
 EXPORT_SYMBOL_NOVERS(rwsem_wake);
+EXPORT_SYMBOL_NOVERS(rwsem_downgrade_wake);
 #if RWSEM_DEBUG
 EXPORT_SYMBOL(rwsemtrace);
 #endif