[parisc-linux-cvs] Improve rwlocks

Matthew Wilcox willy@ldl.fc.hp.com
Tue, 17 Apr 2001 17:21:59 -0600


This should make rwlocks more efficient.  Would someone mind checking
it over?  Clearly, for UP the complex part is a NOP anyway, but it
should remove some double-irq-disables.  Corrections/clarifications to
the comments are especially welcome as I had to stare very hard at the
code in order to understand it originally, and I'd hope that the comment
changes have made it easier.

Index: include/asm-parisc/spinlock.h
===================================================================
RCS file: /home/cvs/parisc/linux/include/asm-parisc/spinlock.h,v
retrieving revision 1.10
diff -u -p -r1.10 spinlock.h
--- spinlock.h	2001/02/16 05:39:24	1.10
+++ spinlock.h	2001/04/17 21:58:13
@@ -12,28 +12,24 @@
 
 #define spin_lock_init(x)	do { (x)->lock = 1; } while(0)
 
-#define spin_is_locked(x) ((x)->lock == 0)
+#define spin_is_locked(x)	((x)->lock == 0)
 
 #define spin_unlock_wait(x)	do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)
 
-#if 1
 #define spin_lock(x) do { \
 	while (__ldcw (&(x)->lock) == 0) \
 		while (((x)->lock) == 0) ; } while (0)
 
-#else
-#define spin_lock(x) \
-	do { while(__ldcw(&(x)->lock) == 0); } while(0)
-#endif
-	
 #define spin_unlock(x) \
 	do { (x)->lock = 1; } while(0)
 
 #define spin_trylock(x) (__ldcw(&(x)->lock) != 0)
 
 /*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Readers each take the spinlock, increment the counter and release the lock.
+ * A writer takes the spinlock and ensures the counter is zero.  Everyone
+ * blocks behind the writer until it releases the spinlock.
  */
 typedef struct {
 	spinlock_t lock;
@@ -42,16 +38,19 @@ typedef struct {
 
 #define RW_LOCK_UNLOCKED (rwlock_t) { SPIN_LOCK_UNLOCKED, 0 }
 
-/* read_lock, read_unlock are pretty straightforward.  Of course it somehow
- * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
+/* We redefine the generic read_lock functions in order to be a little
+ * more efficient. */
 
+#undef read_lock_irqsave
+#undef read_unlock_irqrestore
+#undef read_lock_irq
+#undef read_unlock_irq
+
 static inline void read_lock(rwlock_t *rw)
 {
 	unsigned long flags;
 	spin_lock_irqsave(&rw->lock, flags);
-
 	rw->counter++;
-
 	spin_unlock_irqrestore(&rw->lock, flags);
 }
 
@@ -59,12 +58,38 @@ static inline void read_unlock(rwlock_t 
 {
 	unsigned long flags;
 	spin_lock_irqsave(&rw->lock, flags);
-
 	rw->counter--;
+	spin_unlock_irqrestore(&rw->lock, flags);
+}
 
+static inline void read_lock_irqsave(rwlock_t *rw, unsigned long flags)
+{
+	spin_lock_irqsave(&rw->lock, flags);
+	rw->counter++;
+	spin_unlock(&rw->lock); /* leave irqs off */
+}
+
+static inline void read_unlock_irqrestore(rwlock_t *rw, unsigned long flags)
+{
+	spin_lock(&rw->lock); /* irqs must already be off at this point */
+	rw->counter--;
 	spin_unlock_irqrestore(&rw->lock, flags);
 }
 
+static inline void read_lock_irq(rwlock_t *rw)
+{
+	spin_lock_irq(&rw->lock);
+	rw->counter++;
+	spin_unlock(&rw->lock); /* leave irqs off */
+}
+
+static inline void read_unlock_irq(rwlock_t *rw)
+{
+	spin_lock(&rw->lock); /* irqs must already be off at this point */
+	rw->counter--;
+	spin_unlock_irq(&rw->lock);
+}
+
 /* write_lock is less trivial.  We optimistically grab the lock and check
  * if we surprised any readers.  If so we release the lock and wait till
  * they're all gone before trying again
@@ -78,17 +103,14 @@ static inline void write_lock(rwlock_t *
 {
 retry:
 	spin_lock(&rw->lock);
-
-	if(rw->counter != 0) {
-		/* this basically never happens */
-		spin_unlock(&rw->lock);
-
-		while(rw->counter != 0);
+	if (rw->counter == 0)
+		return;
 
-		goto retry;
-	}
+	/* Contention with readers.  Wait for them to go away. */
+	spin_unlock(&rw->lock);
 
-	/* got it.  now leave without unlocking */
+	while (rw->counter != 0);
+	goto retry;
 }
 
 /* write_unlock is absolutely trivial - we don't have to wait for anything */