[parisc-linux-cvs] Re: DIFF 2.4.18-pa27
Grant Grundler
grundler@dsl2.external.hp.com
Mon, 27 May 2002 16:26:17 -0600
Grant Grundler wrote:
> Log message:
> 2.4.18-pa27
> o irq.c: clean up EIEM/EIRR support in irq.c
> o entry.S: code optimizations to interrupt path and changes to match
> do_cpu_irq_mask() interface.
> o irq.c: make /proc/interrupt header the same as IA64/x86
> o hardware.c: added missing model names
> o sba_iommu.c: remove #ifdef SBA_SELF_MOD_CODE crud
> o smp.c: ifdef 0 around interrupt stack support that isn't functional (yet)
> (corresponding code in entry.S is also #if 0'd out)
> o processor.c: whitespace
DIFF is appended.
The main change driving this was fixing EIEM and EIRR handling.
Code optimizations in entry.S...well, I couldn't help myself.
The new do_cpu_irq_mask() does all the EIRR bit twiddling and
*loops* a few times in case interrupts come in back-to-back.
This saves us entering/exiting the interrupt context.
I'm wondering if I should drop the count limiter since
we'll just re-enter this code when intr_return (entry.S)
re-enables interrupts (ssm is first thing there).
Tested on rp2470 - I was able to download 25MB for apt-get upgrade
and build a kernel at the same time - CONFIG_SMP=y! :^)
(also tested CONFIG_UP but just to see that it boots)
Note that enableing EARLY_PDC_DEBUG in pdc_cons.c is a guaranteed
HPMC when doing PCI bus walks since console is "busy" while the
console device gets disabled. At least I was able to reproduced
reliably 5 or 6 times in a row until I realized that's what
the problem was.
enjoy!
grant
Index: Makefile
===================================================================
RCS file: /var/cvs/linux/Makefile,v
retrieving revision 1.297
diff -u -p -r1.297 Makefile
--- Makefile 2002/05/25 06:06:14 1.297
+++ Makefile 2002/05/27 22:08:54
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 18
-EXTRAVERSION = -pa26
+EXTRAVERSION = -pa27
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
Index: arch/parisc/kernel/entry.S
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/entry.S,v
retrieving revision 1.92
diff -u -p -r1.92 entry.S
--- arch/parisc/kernel/entry.S 2002/04/03 03:11:01 1.92
+++ arch/parisc/kernel/entry.S 2002/05/27 22:08:54
@@ -152,23 +152,6 @@
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
copy %r9,%r29
- copy %r8,%r26
- .endm
-
- .macro get_stack_use_cr31
-
- /* We put a struct pt_regs on the per processor interrupt stack
- * pointed to by %cr31, and save the registers there.
- * N.B: Caller puts value of cr31 in %r1!
- */
-
- tophys %r1,%r9
- STREG %r30, PT_GR30(%r9)
- ldo PT_SZ_ALGN(%r1),%r30
- STREG %r29,PT_GR29(%r9)
- STREG %r26,PT_GR26(%r9)
- copy %r9,%r29
- copy %r8,%r26
.endm
.macro get_stack_use_r30
@@ -181,7 +164,6 @@
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
copy %r9,%r29
- copy %r8,%r26
.endm
.macro rest_stack
@@ -201,11 +183,8 @@
/* Interrupt interruption handler
* (calls irq.c:do_cpu_irq_mask) */
.macro extint code
- mfctl %cr23, %r8
- mfctl %cr15, %r9
- and %r8,%r9,%r8 /* Only process non masked interrupts */
b intr_extint
- mtctl %r8, %cr23
+ mfsp %sr7,%r16
.align 32
.endm
@@ -714,7 +693,6 @@ syscall_exit_rfi:
STREG %r19,PT_SR7(%r16)
intr_return:
-
ssm PSW_SM_I, %r0
/* Check for software interrupts */
@@ -801,15 +779,14 @@ intr_do_resched:
CMPIB= 0,%r20,intr_restore /* backward */
nop
- bl schedule,%r2
#ifdef __LP64__
ldo -16(%r30),%r29 /* Reference param save area */
-#else
- nop
#endif
- b intr_return /* start over if we got a resched */
- nop
+ ldil L%intr_return, %r2
+ b schedule
+ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
+
.import do_signal,code
intr_do_signal:
@@ -834,27 +811,25 @@ intr_do_signal:
nop
/*
- * External interrupts. r8 contains argument for do_cpu_irq_mask.
- * "get_stack" macros move the value of r8 to r26.
+ * External interrupts.
*/
intr_extint:
- mfsp %sr7,%r16
CMPIB=,n 0,%r16,1f
get_stack_use_cr30
b,n 3f
1:
#if 0 /* Interrupt Stack support not working yet! */
- mfctl %cr31,%r1
- copy %r30,%r17
+ mfctl %cr31,%r1
+ copy %r30,%r17
/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
#ifdef __LP64__
- depdi 0,63,15,%r17
+ depdi 0,63,15,%r17
#else
- depi 0,31,15,%r17
+ depi 0,31,15,%r17
#endif
- CMPB=,n %r1,%r17,2f
+ CMPB=,n %r1,%r17,2f
get_stack_use_cr31
b,n 3f
#endif
@@ -866,33 +841,33 @@ intr_extint:
virt_map
save_general %r29
- ldo PT_FR0(%r29), %r24
- save_fp %r24
+ ldo PT_FR0(%r29), %r24
+ save_fp %r24
loadgp
- copy %r29, %r24 /* arg2 is pt_regs */
- copy %r29, %r16 /* save pt_regs */
+ copy %r29, %r25 /* arg1 is pt_regs */
+ copy %r29, %r16 /* save pt_regs */
#ifdef CONFIG_KWDB
- copy %r29, %r3 /* KWDB - update frame pointer (gr3) */
+ copy %r29, %r3 /* KWDB - update frame pointer (gr3) */
#endif
- /*
- * We need to either load the CPU's ID or IRQ region.
- * Until we have "per CPU" IRQ regions, this is easy.
- */
- ldil L%cpu_irq_region, %r25
- ldo R%cpu_irq_region(%r25), %r25
-
- bl do_cpu_irq_mask,%r2
#ifdef __LP64__
- ldo -16(%r30),%r29 /* Reference param save area */
+ ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
#endif
-
- b intr_return
- nop
+
+ /*
+ * We need to either load the CPU's ID or IRQ region.
+ * Until we have "per CPU" IRQ regions, this is easy.
+ */
+ ldil L%cpu_irq_region, %r26
+ ldil L%intr_return, %r2
+ ldo R%cpu_irq_region(%r26), %r26
+
+ b do_cpu_irq_mask
+ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@@ -902,10 +877,12 @@ intr_save:
mfsp %sr7,%r16
CMPIB=,n 0,%r16,1f
get_stack_use_cr30
- b,n 2f
+ b 2f
+ copy %r8,%r26
1:
get_stack_use_r30
+ copy %r8,%r26
2:
save_specials %r29
@@ -968,10 +945,13 @@ skip_save_ior:
#ifdef __LP64__
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl handle_interruption,%r2
+
+ ldil L%intr_return, %r2
copy %r25, %r16 /* save pt_regs */
+
+ b handle_interruption
+ ldo R%intr_return(%r2), %r2 /* return to intr_return */
- b,n intr_return
/*
* Note for all tlb miss handlers:
Index: arch/parisc/kernel/hardware.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/hardware.c,v
retrieving revision 1.37
diff -u -p -r1.37 hardware.c
--- arch/parisc/kernel/hardware.c 2002/03/23 22:47:02 1.37
+++ arch/parisc/kernel/hardware.c 2002/05/27 22:08:54
@@ -218,9 +218,19 @@ static struct hp_hardware hp_hardware_li
{HPHW_NPROC,0x5D9,0x4,0x91,"Rhapsody wave 2 W+"},
{HPHW_NPROC,0x5DA,0x4,0x91,"Marcato W+ DC-"},
{HPHW_NPROC,0x5DB,0x4,0x91,"Marcato W+"},
+ {HPHW_NPROC,0x5DC,0x4,0x91,"Allegro W2"},
{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
+ {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+ {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
+ {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
+ {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
+ {HPHW_NPROC,0x5E3,0x4,0x91,"Crescendo 750 W2"},
+ {HPHW_NPROC,0x5E4,0x4,0x91,"Keystone/Matterhorn W2 750"},
+ {HPHW_NPROC,0x5E5,0x4,0x91,"PowerBar W+"},
+ {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
{HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
+ {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
{HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
{HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
{HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
Index: arch/parisc/kernel/irq.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/irq.c,v
retrieving revision 1.54
diff -u -p -r1.54 irq.c
--- arch/parisc/kernel/irq.c 2002/05/17 16:28:49 1.54
+++ arch/parisc/kernel/irq.c 2002/05/27 22:08:54
@@ -53,22 +53,42 @@ extern void ipi_interrupt(int, void *, s
#endif /* DEBUG_IRQ */
#define EIEM_MASK(irq) (1UL<<(MAX_CPU_IRQ-IRQ_OFFSET(irq)))
-#define CLEAR_EIEM_BIT(irq) set_eiem(get_eiem() & ~EIEM_MASK(irq))
-#define SET_EIEM_BIT(irq) set_eiem(get_eiem() | EIEM_MASK(irq))
+/* Bits in EIEM correlate with cpu_irq_action[].
+** Numbered *Big Endian*! (ie bit 0 is MSB)
+*/
+static unsigned long cpu_eiem = 0;
+
static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
+#ifdef CONFIG_SMP
+static cpu_set_eiem(void *info)
+{
+ set_eiem((unsigned long) info);
+}
+#endif
+
static inline void disable_cpu_irq(void *unused, int irq)
{
- CLEAR_EIEM_BIT(irq);
+ unsigned long eirr_bit = EIEM_MASK(irq);
+
+ cpu_eiem &= ~eirr_bit;
+ set_eiem(cpu_eiem);
+#ifdef CONFIG_SMP
+ smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+#endif
}
static void enable_cpu_irq(void *unused, int irq)
{
- unsigned long mask = EIEM_MASK(irq);
+ unsigned long eirr_bit = EIEM_MASK(irq);
- mtctl(mask, 23);
- SET_EIEM_BIT(irq);
+ mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
+ cpu_eiem |= eirr_bit;
+ set_eiem(cpu_eiem);
+#ifdef CONFIG_SMP
+ smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+#endif
}
/* mask and disable are the same at the CPU level
@@ -78,10 +98,18 @@ static void enable_cpu_irq(void *unused,
static inline void unmask_cpu_irq(void *unused, int irq)
{
- SET_EIEM_BIT(irq);
+ unsigned long eirr_bit = EIEM_MASK(irq);
+ cpu_eiem |= eirr_bit;
+ set_eiem(cpu_eiem);
+#ifdef CONFIG_SMP
+ /* NOTE: sending an IPI will cause do_cpu_irq_mask() to
+ ** handle *any* unmasked pending interrupts.
+ ** ie We don't need to check for pending interrupts here.
+ */
+ smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+#endif
}
-
static struct irqaction cpu_irq_actions[IRQ_PER_REGION] = {
[IRQ_OFFSET(TIMER_IRQ)] { handler: timer_interrupt, name: "timer", },
#ifdef CONFIG_SMP
@@ -105,17 +133,18 @@ struct irq_region *irq_region[NR_IRQ_REG
/*
** Generic interfaces that device drivers can use:
-** mask_irq() temporarily block IRQ
+** mask_irq() block IRQ
** unmask_irq() re-enable IRQ and trigger if IRQ is pending
-** disable_irq() turn IRQ off - ie ignore it
-** enable_irq() turn IRQ on - ie start using it
+** disable_irq() block IRQ
+** enable_irq() clear pending and re-enable IRQ
*/
void mask_irq(int irq)
{
struct irq_region *region;
- DBG_IRQ(irq, ("mask_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
+ DBG_IRQ(irq, ("mask_irq(%d) %d+%d eiem 0x%lx\n", irq,
+ IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
irq = irq_cannonicalize(irq);
region = irq_region[IRQ_REGION(irq)];
if (region->ops.mask_irq)
@@ -126,7 +155,8 @@ void unmask_irq(int irq)
{
struct irq_region *region;
- DBG_IRQ(irq, ("unmask_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
+ DBG_IRQ(irq, ("unmask_irq(%d) %d+%d eiem 0x%lx\n", irq,
+ IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
irq = irq_cannonicalize(irq);
region = irq_region[IRQ_REGION(irq)];
if (region->ops.unmask_irq)
@@ -137,7 +167,8 @@ void disable_irq(int irq)
{
struct irq_region *region;
- DBG_IRQ(irq, ("disable_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
+ DBG_IRQ(irq, ("disable_irq(%d) %d+%d eiem 0x%lx\n", irq,
+ IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
irq = irq_cannonicalize(irq);
region = irq_region[IRQ_REGION(irq)];
if (region->ops.disable_irq)
@@ -150,7 +181,8 @@ void enable_irq(int irq)
{
struct irq_region *region;
- DBG_IRQ(irq, ("enable_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
+ DBG_IRQ(irq, ("enable_irq(%d) %d+%d eiem 0x%lx\n", irq,
+ IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
irq = irq_cannonicalize(irq);
region = irq_region[IRQ_REGION(irq)];
@@ -164,9 +196,14 @@ int get_irq_list(char *buf)
{
#ifdef CONFIG_PROC_FS
char *p = buf;
- int regnr;
+ unsigned int regnr = 0;
- p += sprintf(p, " IRQ count Region ISR");
+ p += sprintf(p, " ");
+#ifdef CONFIG_SMP
+ for (; regnr < smp_num_cpus; regnr++)
+#endif
+ p += sprintf(p, " CPU%d ", regnr);
+
#ifdef PARISC_IRQ_CR16_COUNTS
p += sprintf(p, "[min/avg/max] (CPU cycle counts)");
#endif
@@ -177,7 +214,7 @@ int get_irq_list(char *buf)
*/
spin_lock(&irq_lock);
for (regnr = 0; regnr < NR_IRQ_REGS; regnr++) {
- int i;
+ unsigned int i;
struct irq_region *region = irq_region[regnr];
if (!region || !region->action)
@@ -187,7 +224,7 @@ int get_irq_list(char *buf)
struct irqaction *action = ®ion->action[i];
unsigned int irq_no = IRQ_FROM_REGION(regnr) + i;
#ifdef CONFIG_SMP
- int j;
+ unsigned int j;
#endif
if (!action->handler)
@@ -379,45 +416,53 @@ void do_irq(struct irqaction *action, in
/* ONLY called from entry.S:intr_extint() */
-void do_cpu_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs)
+void do_cpu_irq_mask(struct irq_region *region, struct pt_regs *regs)
{
- unsigned long bit;
- unsigned long orig_eiem;
- int irq;
+ unsigned long eirr_val;
+ unsigned int i=3; /* limit time in interrupt context */
-#ifdef DEBUG_IRQ
- if (mask != (1L << MAX_CPU_IRQ))
- printk(KERN_DEBUG "do_irq_mask %08lx %p %p\n", mask, region, regs);
-#endif
-
/*
- * do_cpu_irq_mask is called with the PSW_I bit off.
- * PSW_I can't be enabled until eiem bits are masked (cleared)
- * for the current set of interrupts to be processed.
- *
- * PSW_I cannot be enabled until after the interrupts are processed.
+ * PSW_I or EIEM bits cannot be enabled until after the
+ * interrupts are processed.
* timer_interrupt() assumes it won't get interrupted when it
* holds the xtime_lock...an unmasked interrupt source could
* interrupt and deadlock by trying to grab xtime_lock too.
- * Keeping PSW_I disabled avoids this.
+ * Keeping PSW_I and EIEM disabled avoids this.
*/
+ set_eiem(0UL); /* disable all extr interrupt for now */
- orig_eiem = get_eiem();
- set_eiem(orig_eiem & ~mask);
+ /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
+ * 2) We loop here on EIRR contents in order to avoid
+ * nested interrupts or having to take another interupt
+ * when we could have just handled it right away.
+ * 3) Limit the number of times we loop to make sure other
+ * processing can occur.
+ */
+ while ((eirr_val = (mfctl(23) & cpu_eiem)) && --i) {
+ unsigned long bit = (1UL<<MAX_CPU_IRQ);
+ unsigned int irq = 0;
- for (bit = (1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) {
- int irq_num;
- if (!(bit&mask))
- continue;
+ mtctl(eirr_val, 23); /* reset bits we are going to process */
- mask &= ~bit; /* clear bit in mask - can exit loop sooner */
- irq_num = region->data.irqbase + irq;
- do_irq(®ion->action[irq], irq_num, regs);
- }
- set_eiem(orig_eiem);
+#ifdef DEBUG_IRQ
+ if (eirr_val != (1UL << MAX_CPU_IRQ))
+ printk(KERN_DEBUG "do_cpu_irq_mask %x\n", eirr_val);
+#endif
+
+ for (; eirr_val && bit; bit>>=1, irq++)
+ {
+ unsigned int irq_num;
+ if (!(bit&eirr_val))
+ continue;
- /* Leave with PSW_I bit set */
- local_irq_enable();
+ /* clear bit in mask - can exit loop sooner */
+ eirr_val &= ~bit;
+
+ irq_num = region->data.irqbase + irq;
+ do_irq(®ion->action[irq], irq_num, regs);
+ }
+ }
+ set_eiem(cpu_eiem);
}
@@ -425,7 +470,7 @@ void do_cpu_irq_mask(unsigned long mask,
void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs)
{
unsigned long bit;
- int irq;
+ unsigned int irq;
#ifdef DEBUG_IRQ
if (mask != (1L<<MAX_CPU_IRQ))
@@ -433,7 +478,7 @@ void do_irq_mask(unsigned long mask, str
#endif
for (bit = (1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) {
- int irq_num;
+ unsigned int irq_num;
if (!(bit&mask))
continue;
@@ -785,8 +830,15 @@ int probe_irq_off(unsigned long val)
void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
- mtctl(-1L, 23); /* EIRR : clear all pending interrupts */
- set_eiem(-1L); /* enable all EIR bits */
+ mtctl(-1L, 23); /* EIRR : clear all pending external intr */
+#ifdef CONFIG_SMP
+ if (!cpu_eiem)
+ cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+#else
+ cpu_eiem = EIEM_MASK(TIMER_IRQ);
+#endif
+ set_eiem(cpu_eiem); /* EIEM : enable all external intr */
+
}
#ifdef CONFIG_PROC_FS
Index: arch/parisc/kernel/processor.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/processor.c,v
retrieving revision 1.11
diff -u -p -r1.11 processor.c
--- arch/parisc/kernel/processor.c 2002/04/12 07:43:34 1.11
+++ arch/parisc/kernel/processor.c 2002/05/27 22:08:55
@@ -216,7 +216,7 @@ void __init collect_boot_cpu_data(void)
/**
* init_cpu_profiler - enable/setup per cpu profiling hooks.
- * @cpuid: The processor instance.
+ * @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
*/
@@ -286,6 +286,7 @@ int __init init_per_cpu(int cpunum)
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
+
return ret;
}
Index: arch/parisc/kernel/sba_iommu.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/sba_iommu.c,v
retrieving revision 1.67
diff -u -p -r1.67 sba_iommu.c
--- arch/parisc/kernel/sba_iommu.c 2002/05/25 06:06:14 1.67
+++ arch/parisc/kernel/sba_iommu.c 2002/05/27 22:08:55
@@ -57,14 +57,6 @@
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_DMB_TRAP
-/* XXX Need to determine if it's worth using self modifying code or not
-** in sba_dump_pdir_entry(). Avoids test/branch in critical code path.
-** But we have to eat a function call instead since sba_dump_pdir_entry()
-** is normally inline but can't be unless I figure out how to modify
-** three invocations. Right now, I think not.
-*/
-#undef SBA_SELF_MOD_CODE
-
#define SBA_INLINE __inline__
#ifdef DEBUG_SBA_INIT
@@ -720,10 +712,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t
pa |= 0x8000000000000000ULL; /* set "valid" bit */
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
-#ifdef SBA_SELF_MOD_CODE
- asm("\nsba_iopdir_fdc:");
- asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr));
-#else
+
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
* (bit #61, big endian), we have to flush and sync every time
@@ -732,7 +721,6 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr));
}
-#endif
}
@@ -1818,20 +1806,16 @@ sba_common_init(struct sba_device *sba_d
sba_dev->sba_lock = SPIN_LOCK_UNLOCKED;
-#ifdef SBA_SELF_MOD_CODE
+#ifdef DEBUG_SBA_INIT
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
- printk(KERN_INFO MODULE_NAME " FDC/SYNC needed\n");
+ printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
} else {
- extern unsigned int sba_iopdir_fdc;
- printk(KERN_INFO MODULE_NAME " FDC/SYNC removed\n");
-
- ((unsigned int *) &sba_iopdir_fdc)[0] = 0x08000240; /* NOP */
- ((unsigned int *) &sba_iopdir_fdc)[1] = 0x08000240; /* NOP */
+ printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
}
#endif
}
Index: arch/parisc/kernel/smp.c
===================================================================
RCS file: /var/cvs/linux/arch/parisc/kernel/smp.c,v
retrieving revision 1.18
diff -u -p -r1.18 smp.c
--- arch/parisc/kernel/smp.c 2002/04/12 07:43:34 1.18
+++ arch/parisc/kernel/smp.c 2002/05/27 22:08:55
@@ -311,7 +311,7 @@ smp_send_reschedule(int cpu) { send_IPI_
* [RETURNS] 0 on success, else a negative status code.
*
* Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
+ * or have executed.
*/
int
@@ -470,17 +470,20 @@ void __init smp_callin(void)
{
extern void cpu_idle(void); /* arch/parisc/kernel/process.c */
int slave_id = cpu_now_booting;
+#if 0
void *istack;
+#endif
smp_cpu_init(slave_id);
+#if 0 /* NOT WORKING YET - see entry.S */
istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
if (istack == NULL) {
printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
BUG();
}
-
mtctl(istack,31);
+#endif
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local();
@@ -493,8 +496,9 @@ void __init smp_callin(void)
mb(); /* PARANOID */
cpu_idle(); /* Wait for timer to schedule some work */
+
/* NOTREACHED */
-panic("smp_callin() AAAAaaaaahhhh....\n");
+ panic("smp_callin() AAAAaaaaahhhh....\n");
}
/*