[parisc-linux-cvs] SMP Interrupt fixes

Matthew Wilcox willy@ldl.fc.hp.com
Tue, 20 Feb 2001 11:43:38 -0700


 * Remove use of global_irq_count; it's unnecessary

 * added explanation of the irq locking scheme.

 * fix __global_cli to check PSW_I instead of using in_interrupt()

 * fix __global_save_flags to check PSW_I instead of `7', which probably
   came from some other port [Spotted by Grant]

 * Whitespace changes

Index: arch/parisc/kernel/irq_smp.c
===================================================================
RCS file: /home/cvs/parisc/linux/arch/parisc/kernel/irq_smp.c,v
retrieving revision 1.1
diff -u -p -r1.1 irq_smp.c
--- irq_smp.c	2001/01/18 18:25:19	1.1
+++ irq_smp.c	2001/02/20 18:26:47
@@ -20,9 +20,8 @@
 #include <asm/io.h>
 
 
-int global_irq_holder = NO_PROC_ID;     /* Who has global_irq_lock. */
+int global_irq_holder = NO_PROC_ID;	/* Who has global_irq_lock. */
 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED; /* protects IRQ's. */
-atomic_t global_irq_count;              /* global count of IRQ active */
 
 
 /* Global IRQ locking depth. */
@@ -34,18 +33,18 @@ static void *previous_irqholder = NULL;
 static void
 show(char * str, void *where)
 {
-        int cpu = smp_processor_id();
+	int cpu = smp_processor_id();
 
-        printk("\n%s, CPU %d: %p\n", str, cpu, where);
-        printk("irq:  %d [%d %d]\n",
-	       irqs_running(),
-               local_irq_count(0),
-               local_irq_count(1));
-
-        printk("bh:   %d [%d %d]\n",
-	       spin_is_locked(&global_bh_lock) ? 1 : 0,
-	       local_bh_count(0),
-	       local_bh_count(1));
+	printk("\n%s, CPU %d: %p\n", str, cpu, where);
+	printk("irq:  %d [%d %d]\n",
+		irqs_running(),
+		local_irq_count(0),
+		local_irq_count(1));
+
+	printk("bh:   %d [%d %d]\n",
+		spin_is_locked(&global_bh_lock) ? 1 : 0,
+		local_bh_count(0),
+		local_bh_count(1));
 }
 
 static inline void
@@ -137,12 +136,12 @@ get_irqlock(int cpu, void* where)
 void
 __global_cli(void)
 {
-	if (in_interrupt())
-	{
-                int cpu = smp_processor_id();
+	unsigned int flags;
+	__save_flags(flags);
+	if (flags & PSW_I) {
+		int cpu = smp_processor_id();
 		__cli(); 
-		if (!local_irq_count(cpu))
-		{
+		if (!local_irq_count(cpu)) {
 			void *where = __builtin_return_address(0);
 			get_irqlock(cpu, where);
 		}
@@ -152,9 +151,9 @@ __global_cli(void)
 void
 __global_sti(void)
 {
-        int cpu = smp_processor_id();
+	int cpu = smp_processor_id();
 
-        if (!local_irq_count(cpu))
+	if (!local_irq_count(cpu))
 		release_irqlock(cpu);
 	__sti();
 }
@@ -169,22 +168,22 @@ __global_sti(void)
 unsigned long
 __global_save_flags(void)
 {
-        int retval;
-        int local_enabled;
-        unsigned long flags;
+	int retval;
+	int local_enabled;
+	unsigned long flags;
 	int cpu = smp_processor_id();
 
-        __save_flags(flags);
-        local_enabled = (!(flags & 7));
-        /* default to local */
-        retval = 2 + local_enabled;
-
-        /* Check for global flags if we're not in an interrupt.  */
-        if (!local_irq_count(cpu)) {
-                if (local_enabled)
-                        retval = 1;
-                if (global_irq_holder == cpu)
-                        retval = 0;
+	__save_flags(flags);
+	local_enabled = (flags & PSW_I) != 0;
+	/* default to local */
+	retval = 2 + local_enabled;
+
+	/* Check for global flags if we're not in an interrupt.  */
+	if (!local_irq_count(cpu)) {
+		if (local_enabled)
+			retval = 1;
+		if (global_irq_holder == cpu)
+			retval = 0;
 	}
 	return retval;
 }
@@ -192,23 +191,23 @@ __global_save_flags(void)
 void
 __global_restore_flags(unsigned long flags)
 {
-        switch (flags) {
-        case 0:
-                __global_cli();
-                break;
-        case 1:
-                __global_sti();
-                break;
-        case 2:
-                __cli();
-                break;
-        case 3:
-                __sti();
-                break;
-        default:
-                printk(KERN_ERR "global_restore_flags: %08lx (%p)\n",
-                        flags, __builtin_return_address(0));
-        }
+	switch (flags) {
+	case 0:
+		__global_cli();
+		break;
+	case 1:
+		__global_sti();
+		break;
+	case 2:
+		__cli();
+		break;
+	case 3:
+		__sti();
+		break;
+	default:
+		printk(KERN_ERR "global_restore_flags: %08lx (%p)\n",
+			flags, __builtin_return_address(0));
+	}
 }
 
 /*
Index: include/asm-parisc/hardirq.h
===================================================================
RCS file: /home/cvs/parisc/linux/include/asm-parisc/hardirq.h,v
retrieving revision 1.9
diff -u -p -r1.9 hardirq.h
--- hardirq.h	2001/01/18 18:25:22	1.9
+++ hardirq.h	2001/02/20 18:26:48
@@ -1,11 +1,19 @@
-/* hardirq.h: 32-bit Sparc hard IRQ support.
+/* hardirq.h: PA-RISC hard IRQ support.
  *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
+ *
+ * The locking is really quite interesting.  There's a cpu-local
+ * count of how many interrupts are being handled, and a global
+ * lock.  An interrupt can only be serviced if the global lock
+ * is free.  You can't be sure no more interrupts are being
+ * serviced until you've acquired the lock and then checked
+ * all the per-cpu interrupt counts are all zero.  It's a specialised
+ * br_lock, and that's exactly how Sparc does it.  We don't because
+ * it's more locking for us.  This way is lock-free in the interrupt path.
  */
 
-#ifndef __PARISC_HARDIRQ_H
-#define __PARISC_HARDIRQ_H
+#ifndef _PARISC_HARDIRQ_H
+#define _PARISC_HARDIRQ_H
 
 #include <linux/config.h>
 #include <linux/threads.h>
@@ -49,11 +57,10 @@ typedef struct {
 
 extern int global_irq_holder;
 extern spinlock_t global_irq_lock;
-extern atomic_t global_irq_count;
 
 static inline int irqs_running (void)
 {
-        int i;
+	int i;
 
 	for (i = 0; i < smp_num_cpus; i++)
 		if (local_irq_count(i))
@@ -74,19 +81,19 @@ static inline void release_irqlock(int c
 static inline void irq_enter(int cpu, int irq)
 {
 	++local_irq_count(cpu);
-	atomic_inc(&global_irq_count);
+
+	while (spin_is_locked(&global_irq_lock))
+		barrier();
 }
 
 static inline void irq_exit(int cpu, int irq)
 {
-	atomic_dec(&global_irq_count);
 	--local_irq_count(cpu);
 }
 
 static inline int hardirq_trylock(int cpu)
 {
-	return (! atomic_read(&global_irq_count) &&
-		! spin_is_locked (&global_irq_lock));
+	return !local_irq_count(cpu) && !spin_is_locked (&global_irq_lock);
 }
 
 #define hardirq_endlock(cpu)	do { } while (0)
@@ -95,4 +102,4 @@ extern void synchronize_irq(void);
 
 #endif /* CONFIG_SMP */
 
-#endif /* __PARISC_HARDIRQ_H */
+#endif /* _PARISC_HARDIRQ_H */