[parisc-linux-cvs] Re: DIFF 2.4.18-pa17 xtime_lock deadlock

Grant Grundler grundler@dsl2.external.hp.com
Fri, 12 Apr 2002 01:45:47 -0600


Index: Makefile
===================================================================
RCS file: /var/cvs/linux/Makefile,v
retrieving revision 1.287
diff -u -p -r1.287 Makefile
--- Makefile	2002/04/07 21:11:46	1.287
+++ Makefile	2002/04/12 07:38:32
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
 SUBLEVEL = 18
-EXTRAVERSION = -pa16
+EXTRAVERSION = -pa17
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
Index: arch/parisc/kernel/irq.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/irq.c,v
retrieving revision 1.51
diff -u -p -r1.51 irq.c
--- arch/parisc/kernel/irq.c	2001/11/13 23:48:39	1.51
+++ arch/parisc/kernel/irq.c	2002/04/12 07:38:32
@@ -391,14 +391,17 @@ void do_cpu_irq_mask(unsigned long mask,
 #endif
 
 	/*
-	 * do_cpu_irq_mask is called with the PSW_I bit off. we don't
-	 * enable it until we've masked (cleared the bits in the eiem)
-	 * the current set of interrupts we are processing.
+	 * do_cpu_irq_mask is called with the PSW_I bit off.
+	 * PSW_I can't be enabled until eiem bits are masked (cleared)
+	 * for the current set of interrupts to be processed.
+	 *
+	 * PSW_I cannot be enabled until after the interrupts are processed.
+	 * timer_interrupt() assumes it won't get interrupted when it
+	 * holds the xtime_lock...an unmasked interrupt source could
+	 * interrupt and deadlock by trying to grab xtime_lock too.
+	 * Keeping PSW_I disabled avoids this.
 	 */
 
-	orig_eiem = get_eiem();
-	set_eiem(orig_eiem & ~mask);
-	local_irq_enable();
 	for (bit = (1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) {
 		int irq_num;
 		if (!(bit&mask))
@@ -408,9 +411,9 @@ void do_cpu_irq_mask(unsigned long mask,
 		irq_num = region->data.irqbase + irq;
 		do_irq(&region->action[irq], irq_num, regs);
 	}
-	set_eiem(orig_eiem);
 
-	/* Leaving with PSW_I bit set */
+	/* Leave with PSW_I bit set */
+	local_irq_enable();
 }
 
 
Index: arch/parisc/kernel/processor.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/processor.c,v
retrieving revision 1.10
diff -u -p -r1.10 processor.c
--- arch/parisc/kernel/processor.c	2002/04/05 08:02:02	1.10
+++ arch/parisc/kernel/processor.c	2002/04/12 07:38:32
@@ -85,6 +85,12 @@ static int __init processor_probe(struct
 	}
 #endif
 
+	/* logical CPU ID and update global counter
+	 * May get overwritten by PAT code.
+	 */
+	cpuid = boot_cpu_data.cpu_count;
+	txn_addr = dev->hpa;	/* for legacy PDC */
+
 #ifdef __LP64__
 	if (is_pdc_pat()) {
 		ulong status;
@@ -103,6 +109,13 @@ static int __init processor_probe(struct
 
 		txn_addr = pa_pdc_cell.mod[0];   /* id_eid for IO sapic */
 
+#undef USE_PAT_CPUID
+#ifdef USE_PAT_CPUID
+/* We need contiguous numbers for cpuid. Firmware's notion
+ * of cpuid is for physical CPUs and we just don't care yet.
+ * We'll care when we need to query PAT PDC about a CPU *after*
+ * boot time (ie shutdown a CPU from an OS perspective).
+ */
 		/* get the cpu number */
 		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa);
 
@@ -119,14 +132,9 @@ static int __init processor_probe(struct
 		} else {
 			cpuid = cpu_info.cpu_num;
 		}
-	} else
 #endif
-	{
-		txn_addr = dev->hpa;	/* for legacy PDC */
-
-		/* logical CPU ID and update global counter */
-		cpuid = boot_cpu_data.cpu_count;
 	}
+#endif
 
 	p = &cpu_data[cpuid];
 	boot_cpu_data.cpu_count++;
@@ -137,7 +145,7 @@ static int __init processor_probe(struct
 	p->dev = dev;		/* Save IODC data in case we need it */
 	p->hpa = dev->hpa;	/* save CPU hpa */
 	p->cpuid = cpuid;	/* save CPU id */
-	p->txn_addr = txn_addr;	/* save CPU hpa */
+	p->txn_addr = txn_addr;	/* save CPU IRQ address */
 #ifdef CONFIG_SMP
 	p->lock = SPIN_LOCK_UNLOCKED;
 #endif
Index: arch/parisc/kernel/smp.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/smp.c,v
retrieving revision 1.17
diff -u -p -r1.17 smp.c
--- arch/parisc/kernel/smp.c	2002/03/05 22:51:43	1.17
+++ arch/parisc/kernel/smp.c	2002/04/12 07:38:32
@@ -3,7 +3,7 @@
 **
 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
-** Copyright (C) 2001 Grant Grundler <grundler@puffin.external.hp.com>
+** Copyright (C) 2001 Grant Grundler <grundler@parisc-linux.org>
 ** 
 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
@@ -60,9 +60,6 @@ spinlock_t smp_lock = SPIN_LOCK_UNLOCKED
 volatile struct task_struct *smp_init_current_idle_task;
 spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
 
-volatile int __cpu_number_map[NR_CPUS] = { -1, };  /* cpu # -> Logical cpu ID*/
-volatile int __cpu_logical_map[NR_CPUS] = { -1, }; /* logical ID -> cup # */
-
 static volatile int smp_commenced = 0;   /* Set when the idlers are all forked */
 static volatile int cpu_now_booting = 0;      /* track which CPU is booting */
 volatile unsigned long cpu_online_map = 0;   /* Bitmap of online CPUs */
@@ -290,9 +287,8 @@ send_IPI_allbutself(enum ipi_message_typ
 	int i;
 	
 	for (i = 0; i < smp_num_cpus; i++) {
-		int cpu_id = __cpu_logical_map[i];
-		if (cpu_id != smp_processor_id())
-			send_IPI_single(cpu_id, op);
+		if (i != smp_processor_id())
+			send_IPI_single(i, op);
 	}
 }
 
@@ -543,15 +539,11 @@ static int smp_boot_one_cpu(int cpuid, i
 	idle = init_task.prev_task;
 	if (!idle)
 		panic("SMP: No idle process for CPU:%d", cpuid);
-
-	__cpu_number_map[cpuid] = cpunum;
-	__cpu_logical_map[cpunum] = cpuid;
 
-	init_tasks[cpunum] = idle;
+	task_set_cpu(idle, cpunum);	/* manually schedule idle task */
 	del_from_runqueue(idle);
 	unhash_process(idle);
-
-	idle->processor = cpunum;
+	init_tasks[cpunum] = idle;
 
 	/* Let _start know what logical CPU we're booting
 	** (offset into init_tasks[],cpu_data[])
@@ -589,8 +581,6 @@ static int smp_boot_one_cpu(int cpuid, i
 		barrier();
 	}
 
-	__cpu_logical_map[cpunum] = NO_PROC_ID;
-	__cpu_number_map[cpuid] = NO_PROC_ID;
 	init_tasks[cpunum] = NULL;
 	free_task_struct(idle);
 
@@ -598,8 +588,6 @@ static int smp_boot_one_cpu(int cpuid, i
 	return -1;
 
 alive:
-	__cpu_logical_map[cpunum] = cpuid;
-	__cpu_number_map[cpuid] = cpunum;
 	/* Remember the Slave data */
 #if (kDEBUG>=100)
 	printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
@@ -631,25 +619,16 @@ void __init smp_boot_cpus(void)
 	/* REVISIT - assumes first CPU reported by PAT PDC is BSP */
 	int bootstrap_processor=cpu_data[0].cpuid;	/* CPU ID of BSP */
 
-	/* Take care of some initial bookkeeping.  */
-	for(i=0;i<NR_CPUS;i++) {
-		__cpu_number_map[i] = NO_PROC_ID;
-		__cpu_logical_map[i] = NO_PROC_ID;
-	}
-
 	/* Setup BSP mappings */
-	__cpu_number_map[bootstrap_processor] = 0;
-	__cpu_logical_map[0] = bootstrap_processor;
 	printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
-	init_task.processor = 0; 
-	current->processor = 0;	/*These are set already*/
+	init_task.processor = bootstrap_processor; 
+	current->processor = bootstrap_processor;
+	cpu_online_map = 1 << bootstrap_processor; /* Mark Boostrap processor as present */
 	current->active_mm = &init_mm;
-	cpu_online_map = 1; /* Mark Boostrap processor as present */
 
 #ifdef ENTRY_SYS_CPUS
 	cpu_data[0].state = STATE_RUNNING;
 #endif
-
 
 	/* Nothing to do when told not to.  */
 	if (max_cpus == 0) {
Index: include/asm-parisc/smp.h
===================================================================
RCS file: /var/cvs/linux/include/asm-parisc/smp.h,v
retrieving revision 1.7
diff -u -p -r1.7 smp.h
--- include/asm-parisc/smp.h	2001/11/17 07:39:12	1.7
+++ include/asm-parisc/smp.h	2002/04/12 07:38:32
@@ -20,18 +20,13 @@ extern volatile unsigned long cpu_online
 
 /*
  *	Private routines/data
+ *
+ *	physical and logical are equivalent until we support CPU hotplug.
  */
-extern volatile int __cpu_number_map[NR_CPUS];
-static inline int cpu_number_map(int cpu)
-{
-	return __cpu_number_map[cpu];
-}
+#define cpu_number_map(cpu)	(cpu)
+#define cpu_logical_map(cpu)	(cpu)
+
 extern void smp_send_reschedule(int cpu);
-extern volatile int __cpu_logical_map[NR_CPUS];
-static inline int cpu_logical_map(int cpu)
-{
-	return __cpu_logical_map[cpu];
-}
 
 #endif /* !ASSEMBLY */