[parisc-linux-cvs] Large Memory Patch
John Marvin
jsm@udlkern.fc.hp.com
Thu, 22 Mar 2001 09:39:24 -0700 (MST)
This is a somewhat large patch to add support for large memory. I first
implemented it assuming that we would use the CONFIG_DISCONTIGMEM option,
however, the support for that in Linux is broken (you can wind up swapping
heavily even when you have many gigabytes of free memory). So I also
implemented support by merging memory ranges and reserving the holes, as
long as the range is not too far away.
This patch also makes significant changes to entry.S to fix a bunch of
64 bit clean problems (that weren't seen until we tried to use 64 bit
addresses with bits set in the high word).
Hey, this is also a "low memory" patch. I managed to free up an additional
megabyte of memory, which will help machines that don't have much to begin
with.
I also fixed the fault/interrupt paths to no longer use temporary control
registers, and instead use shadowed general registers.
John
--- Documentation/parisc/registers.old Thu Mar 22 05:19:44 2001
+++ Documentation/parisc/registers Thu Mar 22 08:32:27 2001
@@ -21,12 +21,12 @@ CR17-CR22 interruption parameters
CR23 (EIRR) read for pending interrupts/write clears bits
CR24 (TR 0) Kernel Space Page Directory Pointer
CR25 (TR 1) User Space Page Directory Pointer
-CR26 (TR 2)
-CR27 (TR 3)
-CR28 (TR 4) used by interruption handlers
-CR29 (TR 5) used by interruption handlers
+CR26 (TR 2) not used
+CR27 (TR 3) Reserved for libpthread support
+CR28 (TR 4) not used
+CR29 (TR 5) not used
CR30 (TR 6) current / 0
-CR31 (TR 7) used by interruption handlers
+CR31 (TR 7) not used
Space Registers (kernel mode)
--- arch/parisc/kernel/traps.c.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/kernel/traps.c Thu Mar 22 07:57:33 2001
@@ -64,15 +64,16 @@ static void printbinary(unsigned long x,
}
}
-void show_regs(struct pt_regs *regs)
-{
- int i;
#ifdef __LP64__
-#define RFMT " %016lx"
+#define RFMT "%016lx"
#else
-#define RFMT " %08lx"
+#define RFMT "%08lx"
#endif
+void show_regs(struct pt_regs *regs)
+{
+ int i;
+
printk("\n"); /* don't want to have that pretty register dump messed up */
printk(" YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\nPSW: ");
@@ -83,7 +84,7 @@ void show_regs(struct pt_regs *regs)
int j;
printk("r%d-%d\t", i, i + 3);
for (j = 0; j < 4; j++) {
- printk(RFMT, i + j == 0 ? 0 : regs->gr[i + j]);
+ printk(" " RFMT, i + j == 0 ? 0 : regs->gr[i + j]);
}
printk("\n");
}
@@ -92,7 +93,7 @@ void show_regs(struct pt_regs *regs)
int j;
printk("sr%d-%d\t", i, i + 3);
for (j = 0; j < 4; j++) {
- printk(RFMT, regs->sr[i + j]);
+ printk(" " RFMT, regs->sr[i + j]);
}
printk("\n");
}
@@ -105,9 +106,9 @@ void show_regs(struct pt_regs *regs)
}
#endif
- printk("\nIASQ:" RFMT RFMT " IAOQ:" RFMT RFMT "\n",
+ printk("\nIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
- printk(" IIR: %08lx ISR:" RFMT " IOR:" RFMT "\nORIG_R28:" RFMT
+ printk(" IIR: %08lx ISR: " RFMT " IOR: " RFMT "\nORIG_R28: " RFMT
"\n", regs->iir, regs->isr, regs->ior, regs->orig_r28);
}
@@ -268,19 +269,6 @@ void handle_interruption(int code, struc
else
sti();
-#ifdef __LP64__
-
- /*
- * FIXME:
- * For 32 bit processes we don't want the b bits (bits 0 & 1)
- * in the ior. This is more appropriately handled in the tlb
- * miss handlers. Changes need to be made to support addresses
- * >32 bits for 64 bit processes.
- */
-
- regs->ior &= 0x3FFFFFFFFFFFFFFFUL;
-#endif
-
#if 0
printk("interrupted with code %d, regs %p\n", code, regs);
show_regs(regs);
@@ -458,17 +446,17 @@ void show_stack(struct pt_regs *regs)
if (regs->cr30 == 0) {
unsigned long sp = regs->gr[30];
- __u32 *stackptr;
- __u32 *dumpptr;
+ unsigned int *stackptr;
+ unsigned int *dumpptr;
/* Stack Dump! */
- stackptr = (__u32 *)sp;
- dumpptr = (__u32 *)(sp & ~(INIT_TASK_SIZE - 1));
+ stackptr = (unsigned int *)sp;
+ dumpptr = (unsigned int *)(sp & ~(INIT_TASK_SIZE - 1));
printk("\nDumping Stack from %p to %p:\n",dumpptr,stackptr);
while (dumpptr < stackptr) {
- printk("%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- ((__u32)dumpptr) & 0xffff,
+ printk("%04lx %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ ((unsigned long)dumpptr) & 0xffff,
dumpptr[0], dumpptr[1], dumpptr[2], dumpptr[3],
dumpptr[4], dumpptr[5], dumpptr[6], dumpptr[7]);
dumpptr += 8;
@@ -490,7 +478,7 @@ void parisc_terminate(char *msg, struct
show_stack(regs);
#endif
- printk("\n%s: Code=%d regs=%p (Addr=%08lx)\n",msg,code,regs,offset);
+ printk("\n%s: Code=%d regs=%p (Addr=" RFMT ")\n",msg,code,regs,offset);
show_regs(regs);
for(;;)
--- arch/parisc/kernel/entry.S.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/kernel/entry.S Thu Mar 22 07:57:33 2001
@@ -34,13 +34,6 @@
* - handle in assembly and use shadowed registers only
* - save registers to kernel stack and handle in assembly or C */
- .text
-
-#ifdef __LP64__
- .level 2.0w
-#else
- .level 2.0
-#endif
#include <asm/assembly.h> /* for LDREG/STREG defines */
#include <asm/pgtable.h>
@@ -48,15 +41,30 @@
#include <asm/signal.h>
#include <asm/unistd.h>
+/* FIXME! asm_get_current macro has hardcoded dependency on kernel stack size */
+
#ifdef __LP64__
#define FRAME_SIZE 128
+#define CMPIB cmpib,*
+
+ .macro asm_get_current reg
+ depdi 0,63,14,\reg
+ .endm
+
+ .level 2.0w
#else
#define FRAME_SIZE 64
+#define CMPIB cmpib,
+ .macro asm_get_current reg
+ depi 0,31,14,\reg
+ .endm
+
+ .level 2.0
#endif
/* Switch to virtual mapping, trashing only %r1 */
- .macro virt_map rfi_type
- mtsm %r0
+ .macro virt_map
+ rsm PSW_SM_Q,%r0
tovirt_r1 %r29
mfsp %sr7, %r1
mtsp %r1, %sr3
@@ -74,48 +82,72 @@
mtctl %r1, %cr18
ldo 4(%r1), %r1
mtctl %r1, %cr18
- \rfi_type
+ rfir
nop
4:
.endm
+ /*
+ * The get_stack macro is responsible for determining the
+ * kernel stack value. If cr30 is zero then we are already
+ * on the kernel stack, so we just use the existing sp.
+ * Otherwise, a non-zero value in cr30 indicates we just
+ * faulted in userland, and cr30 contains the value to use
+ * for the kernel stack pointer.
+ *
+ * Note that we use shadowed registers for temps until
+ * we can save %r26 and %r29. %r26 is used to preserve
+ * %r8 (a shadowed register) which temporarily contained
+ * either the fault type ("code") or the eirr. We need
+ * to use a non-shadowed register to carry the value over
+ * the rfir in virt_map. We use %r26 since this value winds
+ * up being passed as the argument to either do_irq_mask
+ * or handle_interruption. %r29 is used to hold a pointer
+ * the register save area, and once again, it needs to
+ * be a non-shadowed register so that it survives the rfir.
+ */
+
.macro get_stack
/* TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame */
mfctl %cr30, %r1
- comib,=,n 0, %r1, 0f /* forward so predicted not taken */
+ CMPIB=,n 0, %r1, 0f /* forward so predicted not taken */
/* we save the registers in the task struct */
- tophys %r1,%r29
- ldo TASK_REGS(%r29),%r29
- STREG %r30, PT_GR30(%r29)
- STREG %r1, PT_CR30(%r29)
+ tophys %r1,%r9
+ ldo TASK_REGS(%r9),%r9
+ STREG %r30, PT_GR30(%r9)
+ STREG %r1, PT_CR30(%r9)
ldo TASK_SZ_ALGN(%r1), %r30
b 1f /* unconditional so predicted taken */
mtctl %r0,%cr30
0:
/* we put a struct pt_regs on the stack and save the registers there */
- tophys %r30,%r29
+ tophys %r30,%r9
ldo PT_SZ_ALGN(%r30),%r30
- STREG %r30,PT_GR30(%r29)
- STREG %r0,PT_CR30(%r29)
+ STREG %r30,PT_GR30(%r9)
+ STREG %r0,PT_CR30(%r9)
1:
+ STREG %r29,PT_GR29(%r9)
+ STREG %r26,PT_GR26(%r9)
+ copy %r9,%r29
+ copy %r8,%r26
.endm
- .macro rest_stack regs
- LDREG PT_CR30(\regs), %r1
- comib,=,n 0, %r1, 2f/* forward so predicted not taken */
+ .macro rest_stack
+ LDREG PT_CR30(%r29), %r1
+ CMPIB=,n 0, %r1, 2f/* forward so predicted not taken */
/* we restore the registers out of the task struct */
mtctl %r1, %cr30
- LDREG PT_GR1(\regs), %r1
- LDREG PT_GR30(\regs),%r30
+ LDREG PT_GR1(%r29), %r1
+ LDREG PT_GR30(%r29),%r30
b 3f
- LDREG PT_GR29(\regs),%r29
+ LDREG PT_GR29(%r29),%r29
2:
/* we take a struct pt_regs off the stack */
- LDREG PT_GR1(\regs), %r1
- LDREG PT_GR29(\regs), %r29
+ LDREG PT_GR1(%r29), %r1
+ LDREG PT_GR29(%r29), %r29
ldo -PT_SZ_ALGN(%r30), %r30
3:
.endm
@@ -123,23 +155,17 @@
/* default interruption handler
* (calls traps.c:handle_interruption) */
.macro def code
- mtctl %r29, %cr31
- mtctl %r1, %cr28
- ldi \code, %r1
b intr_save
- mtctl %r1, %cr29
+ ldi \code, %r8
.align 32
.endm
/* Interrupt interruption handler
* (calls irq.c:do_irq_mask) */
.macro extint code
- mtctl %r29, %cr31
- mtctl %r1, %cr28
- mfctl %cr23, %r1
- mtctl %r1, %cr23
+ mfctl %cr23, %r8
b intr_extint
- mtctl %r1, %cr29
+ mtctl %r8, %cr23
.align 32
.endm
@@ -355,6 +381,8 @@
.export fault_vector_20
+ .text
+
.align 4096
fault_vector_20:
@@ -537,7 +565,7 @@ __execve:
bl sys_execve, %r2
copy %r16, %r26
- comib,<>,n 0,%r28,__execve_failed
+ cmpib,<>,n 0,%r28,__execve_failed
b intr_return
STREG %r17, PT_CR30(%r16)
@@ -595,8 +623,7 @@ _switch_to_ret:
.export syscall_exit_rfi
syscall_exit_rfi:
copy %r30,%r16
- /* FIXME! depi below has hardcoded dependency on kernel stack size */
- depi 0,31,14,%r16 /* get task pointer */
+ asm_get_current %r16
ldo TASK_REGS(%r16),%r16
/* Force iaoq to userspace, as the user has had access to our current
* context via sigcontext.
@@ -620,8 +647,7 @@ intr_return:
#ifdef CONFIG_SMP
copy %r30,%r1
- /* FIXME! depi below has hardcoded dependency on kernel stack size */
- depi 0,31,14,%r1 /* get task pointer */
+ asm_get_current %r1
ldw TASK_PROCESSOR(%r1),%r1 /* get cpu # - int */
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
** irq_stat[] is defined using ____cacheline_aligned.
@@ -637,29 +663,28 @@ intr_return:
ldw IRQSTAT_SI_ACTIVE(%r19),%r20 /* hardirq.h: unsigned int */
ldw IRQSTAT_SI_MASK(%r19),%r19 /* hardirq.h: unsigned int */
and %r19,%r20,%r20
- comib,<>,n 0,%r20,intr_do_softirq /* forward */
+ cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
intr_check_resched:
/* check for reschedule */
copy %r30,%r1
- /* FIXME! depi below has hardcoded dependency on kernel stack size */
- depi 0,31,14,%r1 /* get task pointer */
+ asm_get_current %r1
LDREG TASK_NEED_RESCHED(%r1),%r19 /* sched.h: long need_resched */
- comib,<>,n 0,%r19,intr_do_resched /* forward */
+ CMPIB<>,n 0,%r19,intr_do_resched /* forward */
intr_check_sig:
/* As above */
copy %r30,%r1
- depi 0,31,14,%r1 /* get task pointer */
+ asm_get_current %r1
ldw TASK_SIGPENDING(%r1),%r19 /* sched.h: int sigpending */
- comib,<>,n 0,%r19,intr_do_signal /* forward */
+ cmpib,<>,n 0,%r19,intr_do_signal /* forward */
intr_restore:
- copy %r16, %r1
- ldo PT_FR31(%r1), %r29
- rest_fp %r29
- rest_general %r1
+ copy %r16,%r29
+ ldo PT_FR31(%r29),%r1
+ rest_fp %r1
+ rest_general %r29
ssm 0,%r0
nop
nop
@@ -668,10 +693,10 @@ intr_restore:
nop
nop
nop
- tophys %r1,%r29
- mtsm %r0
+ tophys_r1 %r29
+ rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0
rest_specials %r29
- rest_stack %r29
+ rest_stack
rfi
nop
nop
@@ -697,7 +722,7 @@ intr_do_softirq:
intr_do_resched:
/* Only do reschedule if we are returning to user space */
LDREG PT_SR7(%r16), %r20
- comib,= 0,%r20,intr_restore /* backward */
+ CMPIB= 0,%r20,intr_restore /* backward */
nop
#ifdef __LP64__
@@ -714,7 +739,7 @@ intr_do_resched:
intr_do_signal:
/* Only do signals if we are returning to user space */
LDREG PT_SR7(%r16), %r20
- comib,= 0,%r20,intr_restore /* backward */
+ CMPIB= 0,%r20,intr_restore /* backward */
nop
copy %r0, %r24 /* unsigned long in_syscall */
@@ -729,14 +754,15 @@ intr_do_signal:
b intr_restore
nop
- /* CR28 - saved GR1
- * CR29 - argument for do_irq_mask */
+ /*
+ * External interrupts. r8 contains argument for do_irq_mask.
+ * get_stack moves value of r8 to r26.
+ */
- /* External interrupts */
intr_extint:
get_stack
save_specials %r29
- virt_map rfi
+ virt_map
save_general %r29
ldo PT_FR0(%r29), %r24
@@ -761,7 +787,7 @@ intr_extint:
nop
#endif
- b intr_return
+ b intr_return
nop
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@@ -772,12 +798,48 @@ intr_save:
get_stack
save_specials %r29
- mfctl %cr20, %r1
- STREG %r1, PT_ISR(%r29)
- mfctl %cr21, %r1
- STREG %r1, PT_IOR(%r29)
+ /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
+
+ /*
+ * FIXME: 1) Use a #define for the hardwired "6" below (and in
+ * traps.c.
+ * 2) Once we start executing code above 4 Gb, we need
+ * to adjust iasq/iaoq here in the same way we
+ * adjust isr/ior below.
+ */
+
+ CMPIB=,n 6,%r26,skip_save_ior
+
+ /* save_specials left ipsw value in r8 for us to test */
- virt_map rfi
+ mfctl %cr20, %r16 /* isr */
+ mfctl %cr21, %r17 /* ior */
+
+#ifdef __LP64__
+ /*
+ * If the interrupted code was running with W bit off (32 bit),
+ * clear the b bits (bits 0 & 1) in the ior.
+ */
+ extrd,u,*<> %r8,PSW_W_BIT,1,%r0
+ depdi 0,1,2,%r17
+
+ /*
+ * FIXME: This code has hardwired assumptions about the split
+ * between space bits and offset bits. This will change
+ * when we allow alternate page sizes.
+ */
+
+ /* adjust isr/ior. */
+
+ extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
+ depd %r1,31,7,%r17 /* deposit them into ior */
+ depdi 0,63,7,%r16 /* clear them from isr */
+#endif
+ STREG %r16, PT_ISR(%r29)
+ STREG %r17, PT_IOR(%r29)
+
+skip_save_ior:
+ virt_map
save_general %r29
ldo PT_FR0(%r29), %r25
@@ -830,9 +892,9 @@ intr_save:
dtlb_miss_20w:
- extrd,u spc,31,7,t1 /* adjust va */
+ extrd,u spc,63,7,t1 /* adjust va */
depd t1,31,7,va /* adjust va */
- depdi 0,31,7,spc /* adjust space */
+ depdi 0,63,7,spc /* adjust space */
mfctl %cr25,ptp /* Assume user space miss */
or,*<> %r0,spc,%r0 /* If it is user space, nullify */
mfctl %cr24,ptp /* Load kernel pgd instead */
@@ -840,7 +902,7 @@ dtlb_miss_20w:
mfsp %sr7,t0 /* Get current space */
or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,dtlb_fault /* forward */
+ cmpb,*<>,n t0,spc,dtlb_fault /* forward */
/* First level page table lookup */
@@ -890,7 +952,7 @@ dtlb_check_alias_20w:
/* Check to see if fault is in the temporary alias region */
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
+ cmpib,*<>,n 0,spc,dtlb_fault /* forward */
ldil L%(TMPALIAS_MAP_START),t0
copy va,t1
depdi 0,63,23,t1
@@ -914,9 +976,9 @@ dtlb_check_alias_20w:
nadtlb_miss_20w:
- extrd,u spc,31,7,t1 /* adjust va */
+ extrd,u spc,63,7,t1 /* adjust va */
depd t1,31,7,va /* adjust va */
- depdi 0,31,7,spc /* adjust space */
+ depdi 0,63,7,spc /* adjust space */
mfctl %cr25,ptp /* Assume user space miss */
or,*<> %r0,spc,%r0 /* If it is user space, nullify */
mfctl %cr24,ptp /* Load kernel pgd instead */
@@ -924,7 +986,7 @@ nadtlb_miss_20w:
mfsp %sr7,t0 /* Get current space */
or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,nadtlb_fault /* forward */
+ cmpb,*<>,n t0,spc,nadtlb_fault /* forward */
/* First level page table lookup */
@@ -980,7 +1042,7 @@ dtlb_miss_11:
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,dtlb_fault /* forward */
+ cmpb,<>,n t0,spc,dtlb_fault /* forward */
/* First level page table lookup */
@@ -1063,7 +1125,7 @@ nadtlb_miss_11:
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,nadtlb_fault /* forward */
+ cmpb,<>,n t0,spc,nadtlb_fault /* forward */
/* First level page table lookup */
@@ -1119,7 +1181,7 @@ dtlb_miss_20:
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,dtlb_fault /* forward */
+ cmpb,<>,n t0,spc,dtlb_fault /* forward */
/* First level page table lookup */
@@ -1192,7 +1254,7 @@ nadtlb_miss_20:
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,nadtlb_fault /* forward */
+ cmpb,<>,n t0,spc,nadtlb_fault /* forward */
/* First level page table lookup */
@@ -1256,11 +1318,11 @@ nadtlb_emulate:
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
b,l get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
- cmpib,=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
b,l get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
- cmpib,=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
b,l set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
@@ -1281,9 +1343,9 @@ itlb_miss_20w:
* on the gateway page which is in the kernel address space.
*/
- extrd,u spc,31,7,t1 /* adjust va */
+ extrd,u spc,63,7,t1 /* adjust va */
depd t1,31,7,va /* adjust va */
- depdi 0,31,7,spc /* adjust space */
+ depdi 0,63,7,spc /* adjust space */
cmpib,*= 0,spc,itlb_miss_kernel_20w
extrd,u va,33,9,t1 /* Get pgd index */
@@ -1350,14 +1412,14 @@ itlb_miss_11:
* on the gateway page which is in the kernel address space.
*/
- comib,= 0,spc,itlb_miss_kernel_11
+ cmpib,= 0,spc,itlb_miss_kernel_11
extru va,9,10,t1 /* Get pgd index */
mfctl %cr25,ptp /* load user pgd */
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,itlb_fault /* forward */
+ cmpb,<>,n t0,spc,itlb_fault /* forward */
/* First level page table lookup */
@@ -1417,14 +1479,14 @@ itlb_miss_20:
* on the gateway page which is in the kernel address space.
*/
- comib,= 0,spc,itlb_miss_kernel_20
+ cmpib,= 0,spc,itlb_miss_kernel_20
extru va,9,10,t1 /* Get pgd index */
mfctl %cr25,ptp /* load user pgd */
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,itlb_fault /* forward */
+ cmpb,<>,n t0,spc,itlb_fault /* forward */
/* First level page table lookup */
@@ -1474,10 +1536,10 @@ itlb_miss_kernel_20:
dbit_trap_20w:
- extrd,u spc,31,7,t1 /* adjust va */
+ extrd,u spc,63,7,t1 /* adjust va */
depd t1,31,7,va /* adjust va */
depdi 0,1,2,va /* adjust va */
- depdi 0,31,7,spc /* adjust space */
+ depdi 0,63,7,spc /* adjust space */
mfctl %cr25,ptp /* Assume user space miss */
or,*<> %r0,spc,%r0 /* If it is user space, nullify */
mfctl %cr24,ptp /* Load kernel pgd instead */
@@ -1485,7 +1547,7 @@ dbit_trap_20w:
mfsp %sr7,t0 /* Get current space */
or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,dbit_fault /* forward */
+ cmpb,*<>,n t0,spc,dbit_fault /* forward */
/* First level page table lookup */
@@ -1539,7 +1601,7 @@ dbit_trap_11:
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,dbit_fault /* forward */
+ cmpb,<>,n t0,spc,dbit_fault /* forward */
/* First level page table lookup */
@@ -1594,7 +1656,7 @@ dbit_trap_20:
mfsp %sr7,t0 /* Get current space */
or,= %r0,t0,%r0 /* If kernel, nullify following test */
- comb,<>,n t0,spc,dbit_fault /* forward */
+ cmpb,<>,n t0,spc,dbit_fault /* forward */
/* First level page table lookup */
@@ -1636,57 +1698,24 @@ dbit_trap_20:
.import handle_interruption,code
kernel_bad_space:
- b tlb_fault
- ldi 31,%r1 /* Use an unused code */
+ b intr_save
+ ldi 31,%r8 /* Use an unused code */
dbit_fault:
- b tlb_fault
- ldi 20,%r1
+ b intr_save
+ ldi 20,%r8
itlb_fault:
- b tlb_fault
- ldi 6,%r1
+ b intr_save
+ ldi 6,%r8
nadtlb_fault:
- b tlb_fault
- ldi 17,%r1
+ b intr_save
+ ldi 17,%r8
dtlb_fault:
- ldi 15,%r1
-
- /* Fall Through */
-
-tlb_fault:
- mtctl %r1,%cr29
- mtctl %r29,%cr31
-
- get_stack
- save_specials %r29 /* Note this saves a trashed r1 */
-
- SAVE_CR (%cr20, PT_ISR(%r29))
- SAVE_CR (%cr21, PT_IOR(%r29))
-
- virt_map rfir
-
- STREG %r1,PT_GR1(%r29) /* save good value after rfir */
-
- save_general %r29
-
- ldo PT_FR0(%r29), %r25
- save_fp %r25
-
- loadgp
-
- copy %r29, %r25
-
-#ifdef __LP64__
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
- bl handle_interruption, %r2
- copy %r25, %r16
-
- b intr_return
- nop
+ b intr_save
+ ldi 15,%r8
/* Register saving semantics for system calls:
@@ -1858,7 +1887,7 @@ sys_vfork_wrapper:
/* If exec succeeded we need to load the args */
ldo -1024(%r0),%r1
- comb,>>= %r28,%r1,error_\execve
+ cmpb,>>= %r28,%r1,error_\execve
copy %r2,%r19
error_\execve:
@@ -2012,20 +2041,20 @@ syscall_check_bh:
ldw IRQSTAT_SI_ACTIVE(%r19),%r20 /* hardirq.h: unsigned int */
ldw IRQSTAT_SI_MASK(%r19),%r19 /* hardirq.h: unsigned int */
and %r19,%r20,%r20
- comib,<>,n 0,%r20,syscall_do_softirq /* forward */
+ cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
syscall_check_resched:
/* check for reschedule */
LDREG TASK_NEED_RESCHED-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
- comib,<>,n 0,%r19,syscall_do_resched /* forward */
+ CMPIB<>,n 0,%r19,syscall_do_resched /* forward */
syscall_check_sig:
ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
/* check for pending signals */
ldw TASK_SIGPENDING(%r1),%r19
- comib,<>,n 0,%r19,syscall_do_signal /* forward */
+ cmpib,<>,n 0,%r19,syscall_do_signal /* forward */
syscall_restore:
/* disable interrupts while dicking with the kernel stack, */
--- arch/parisc/kernel/head.S.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/kernel/head.S Sat Mar 17 04:06:28 2001
@@ -199,8 +199,8 @@ $is_pa20:
$install_iva:
mtctl %r10,%cr14
- /* Disable (most) interruptions */
- mtsm %r0
+ /* Disable Q bit so we can load the iia queue */
+ rsm PSW_SM_Q,%r0
/* kernel PSW:
* - no interruptions except HPMC and TOC (which are handled by PDC)
--- arch/parisc/kernel/real2.S.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/kernel/real2.S Sat Mar 17 04:03:37 2001
@@ -153,7 +153,7 @@ rfi_virt2real:
nop
nop
- mtsm 0 /* disable interruptions */
+ rsm (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q & I bits to load iia queue */
mtctl 0, %cr17 /* space 0 */
mtctl 0, %cr17
load32 PA(rfi_v2r_1), %r1
@@ -190,7 +190,7 @@ rfi_real2virt:
nop
nop
- mtsm 0 /* disable interruptions */
+ rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
mtctl 0, %cr17 /* space 0 */
mtctl 0, %cr17
load32 (rfi_r2v_1), %r1
--- arch/parisc/kernel/pacache.S.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/kernel/pacache.S Sat Mar 17 04:01:40 2001
@@ -66,7 +66,7 @@ __flush_tlb_all:
* consolidated.
*/
- rsm PSW_I,%r19 /* relied upon translation! */
+ rsm PSW_SM_I,%r19 /* relied upon translation! */
nop
nop
nop
@@ -75,7 +75,7 @@ __flush_tlb_all:
nop
nop
- mtsm %r0 /* Turn off Q bit */
+ rsm PSW_SM_Q,%r0 /* Turn off Q bit to load iia queue */
ldil L%REAL_MODE_PSW, %r1
ldo R%REAL_MODE_PSW(%r1), %r1
mtctl %r1, %cr22
@@ -180,7 +180,7 @@ fdtdone:
/* Switch back to virtual mode */
- mtsm 0 /* clear Q bit */
+ rsm PSW_SM_Q,%r0 /* clear Q bit to load iia queue */
ldil L%KERNEL_PSW, %r1
ldo R%KERNEL_PSW(%r1), %r1
or %r1,%r19,%r1 /* Set I bit if set on entry */
@@ -259,7 +259,7 @@ flush_data_cache:
LDREG DCACHE_STRIDE(%r1),%arg1
LDREG DCACHE_COUNT(%r1),%arg2
LDREG DCACHE_LOOP(%r1),%arg3
- rsm PSW_I,%r22
+ rsm PSW_SM_I,%r22
ADDIB= -1,%arg3,fdoneloop /* Preadjust and test */
movb,<,n %arg3,%r31,fdsync /* If loop < 0, do sync */
@@ -789,7 +789,7 @@ disable_sr_hashing_asm:
nop
nop
- mtsm %r0 /* disable interruptions */
+ rsm (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q&I to load the iia queue */
ldil L%REAL_MODE_PSW, %r1
ldo R%REAL_MODE_PSW(%r1), %r1
mtctl %r1, %cr22
@@ -841,7 +841,7 @@ srdis_done:
/* Switch back to virtual mode */
- mtsm 0 /* clear Q bit */
+ rsm PSW_SM_Q,%r0 /* clear Q bit to load iia queue */
ldil L%KERNEL_PSW, %r1
ldo R%KERNEL_PSW(%r1), %r1
mtctl %r1, %cr22
--- arch/parisc/lib/lusercopy.S.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/lib/lusercopy.S Sat Mar 17 04:13:38 2001
@@ -46,7 +46,11 @@
.macro get_sr
copy %r30,%r1 ;! Get task structure
+#ifdef __LP64__
+ depdi 0,63,14,%r1 ;! into r1
+#else
depi 0,31,14,%r1 ;! into r1
+#endif
ldw TASK_SEGMENT(%r1),%r22
mfsp %sr3,%r1
or,<> %r22,%r0,%r0
--- arch/parisc/mm/init.c.old Sat Mar 17 02:33:09 2001
+++ arch/parisc/mm/init.c Thu Mar 22 09:06:22 2001
@@ -25,6 +25,13 @@ extern char _text; /* start of kernel co
extern int data_start;
extern char _end; /* end of BSS, defined by linker */
+#ifdef CONFIG_DISCONTIGMEM
+struct node_map_data node_data[MAX_PHYSMEM_RANGES];
+bootmem_data_t bmem_data[MAX_PHYSMEM_RANGES];
+unsigned char *chunkmap;
+unsigned int maxchunkmap;
+#endif
+
/*
** KLUGE ALERT!
**
@@ -79,6 +86,38 @@ int npmem_ranges;
#define MAX_MEM (3584U*1024U*1024U)
#endif /* !__LP64__ */
+static unsigned long mem_limit = MAX_MEM;
+
+static void __init mem_limit_func(void)
+{
+ char *cp, *end;
+ unsigned long limit;
+ extern char saved_command_line[];
+
+ /* We need this before __setup() functions are called */
+
+ limit = MAX_MEM;
+ for (cp = saved_command_line; *cp; ) {
+ if (memcmp(cp, "mem=", 4) == 0) {
+ cp += 4;
+ limit = memparse(cp, &end);
+ if (end != cp)
+ break;
+ cp = end;
+ } else {
+ while (*cp != ' ' && *cp)
+ ++cp;
+ while (*cp == ' ')
+ ++cp;
+ }
+ }
+
+ if (limit < mem_limit)
+ mem_limit = limit;
+}
+
+#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
+
static void __init setup_bootmem(void)
{
unsigned long bootmap_size;
@@ -86,15 +125,75 @@ static void __init setup_bootmem(void)
unsigned long bootmap_pages;
unsigned long bootmap_start_pfn;
unsigned long bootmap_pfn;
+#ifndef CONFIG_DISCONTIGMEM
+ physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
+ int npmem_holes;
+#endif
int i;
disable_sr_hashing(); /* Turn off space register hashing */
+#ifdef CONFIG_DISCONTIGMEM
+ /*
+ * The below is still true as of 2.4.2. If this is ever fixed,
+ * we can remove this warning!
+ */
+
+ printk("\n\nCONFIG_DISCONTIGMEM is enabled, which is probably a mistake. This\n");
+ printk("option can lead to heavy swapping, even when there are gigabytes\n");
+ printk("of free memory.\n\n");
+#endif
+
#ifdef __LP64__
- /* Print the memory ranges, even if we are not going to use them */
+#ifndef CONFIG_DISCONTIGMEM
+ /*
+ * Sort the ranges. Since the number of ranges is typically
+ * small, and performance is not an issue here, just do
+ * a simple insertion sort.
+ */
+
+ for (i = 1; i < npmem_ranges; i++) {
+ int j;
+
+ for (j = i; j > 0; j--) {
+ unsigned long tmp;
+
+ if (pmem_ranges[j-1].start_pfn <
+ pmem_ranges[j].start_pfn) {
+
+ break;
+ }
+ tmp = pmem_ranges[j-1].start_pfn;
+ pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
+ pmem_ranges[j].start_pfn = tmp;
+ tmp = pmem_ranges[j-1].pages;
+ pmem_ranges[j-1].pages = pmem_ranges[j].pages;
+ pmem_ranges[j].pages = tmp;
+ }
+ }
+
+ /*
+ * Throw out ranges that are too far apart (controlled by
+ * MAX_GAP). If CONFIG_DISCONTIGMEM wasn't implemented so
+ * poorly, we would recommend enabling that option, but,
+ * until it is fixed, this is the best way to go.
+ */
+
+ for (i = 1; i < npmem_ranges; i++) {
+ if (pmem_ranges[i].start_pfn -
+ (pmem_ranges[i-1].start_pfn +
+ pmem_ranges[i-1].pages) > MAX_GAP) {
+ npmem_ranges = i;
+ break;
+ }
+ }
+#endif
if (npmem_ranges > 1) {
+
+ /* Print the memory ranges */
+
printk("Memory Ranges:\n");
for (i = 0; i < npmem_ranges; i++) {
@@ -108,27 +207,7 @@ static void __init setup_bootmem(void)
}
}
-#ifndef CONFIG_DISCONTIGMEM
- if (npmem_ranges > 1) {
- printk("\n\n\n\n");
- printk("WARNING! This machine has additional memory in discontiguous\n");
- printk(" ranges, however CONFIG_DISCONTIGMEM needs to be enabled\n");
- printk(" in order to access it.\n\n");
- printk(" Memory will be limited to the first range reported above.\n\n\n\n");
-
- npmem_ranges = 1;
- }
-#endif
-
-#else /* !__LP64__ */
-
-#ifdef CONFIG_DISCONTIGMEM
- printk("\n\n\n\n");
- printk("32 bit kernels do not support discontiguous memory, so there is\n");
- printk("no good reason to enable CONFIG_DISCONTIGMEM. There is a slight\n");
- printk("performance penalty for doing so.\n\n\n\n");
-#endif
-#endif /* !__LP64__ */
+#endif /* __LP64__ */
/*
* For 32 bit kernels we limit the amount of memory we can
@@ -139,34 +218,66 @@ static void __init setup_bootmem(void)
* to work with multiple memory ranges).
*/
- mem_max = 0;
+ mem_limit_func(); /* check for "mem=" argument */
- for (i = 0; (i < npmem_ranges) && (mem_max < MAX_MEM); i++) {
+ mem_max = 0;
+ for (i = 0; i < npmem_ranges; i++) {
unsigned long rsize;
rsize = pmem_ranges[i].pages << PAGE_SHIFT;
- if ((mem_max + rsize) > MAX_MEM) {
- printk("Memory truncated to %ld Mb\n", MAX_MEM >> 20);
- pmem_ranges[i].pages = (MAX_MEM >> PAGE_SHIFT)
- - (mem_max >> PAGE_SHIFT);
- mem_max = MAX_MEM;
- npmem_ranges = i + 1;
+ if ((mem_max + rsize) > mem_limit) {
+ printk("Memory truncated to %ld Mb\n", mem_limit >> 20);
+ if (mem_max == mem_limit)
+ npmem_ranges = i;
+ else {
+ pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
+ - (mem_max >> PAGE_SHIFT);
+ npmem_ranges = i + 1;
+ mem_max = mem_limit;
+ }
break;
}
mem_max += rsize;
}
- printk("\nTotal Memory: %ld Mb\n",mem_max >> 20);
+ printk("Total Memory: %ld Mb\n",mem_max >> 20);
+
+#ifndef CONFIG_DISCONTIGMEM
+
+ /* Merge the ranges, keeping track of the holes */
+
+ {
+ unsigned long end_pfn;
+ unsigned long hole_pages;
+
+ npmem_holes = 0;
+ end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
+ for (i = 1; i < npmem_ranges; i++) {
+
+ hole_pages = pmem_ranges[i].start_pfn - end_pfn;
+ if (hole_pages) {
+ pmem_holes[npmem_holes].start_pfn = end_pfn;
+ pmem_holes[npmem_holes++].pages = hole_pages;
+ end_pfn += hole_pages;
+ }
+ end_pfn += pmem_ranges[i].pages;
+ }
+
+ pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
+ npmem_ranges = 1;
+ }
+#endif
bootmap_pages = 0;
for (i = 0; i < npmem_ranges; i++)
bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
+ bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
- bootmap_start_pfn = __pa((unsigned long) &_end);
- bootmap_start_pfn = (bootmap_start_pfn + PAGE_SIZE) & PAGE_MASK;
- bootmap_start_pfn = bootmap_start_pfn >> PAGE_SHIFT;
-
+#ifdef CONFIG_DISCONTIGMEM
+ for (i = 0; i < npmem_ranges; i++)
+ node_data[i].pg_data.bdata = &bmem_data[i];
+#endif
/*
* Initialize and free the full range of memory in each range.
* Note that the only writing these routines do are to the bootmap,
@@ -187,15 +298,12 @@ static void __init setup_bootmem(void)
bootmap_pfn,
start_pfn,
(start_pfn + npages) );
-
free_bootmem_node(NODE_DATA(i),
(start_pfn << PAGE_SHIFT),
(npages << PAGE_SHIFT) );
-
bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if ((start_pfn + npages) > max_pfn)
max_pfn = start_pfn + npages;
-
}
if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
@@ -203,14 +311,27 @@ static void __init setup_bootmem(void)
BUG();
}
- /* Now, reserve bootmap, kernel text/data/bss, and pdc memory */
+ /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
- /* HACK! reserve 0 to end of bootmaps in one call
- * This will be fixed in a future checkin, but is equivalent
- * to what we were doing previously (wasting memory).
- */
+#define PDC_CONSOLE_IO_IODC_SIZE 32768
+
+ reserve_bootmem_node(NODE_DATA(0), 0UL,
+ (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
+ reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
+ (unsigned long)(&_end - &_text));
+ reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
+ ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
+
+#ifndef CONFIG_DISCONTIGMEM
+
+ /* reserve the holes */
- reserve_bootmem_node(NODE_DATA(0), 0UL, (bootmap_pfn << PAGE_SHIFT));
+ for (i = 0; i < npmem_holes; i++) {
+ reserve_bootmem_node(NODE_DATA(0),
+ (pmem_holes[i].start_pfn << PAGE_SHIFT),
+ (pmem_holes[i].pages << PAGE_SHIFT));
+ }
+#endif
#ifdef CONFIG_BLK_DEV_INITRD
printk("initrd: %08x-%08x\n", (int) initrd_start, (int) initrd_end);
@@ -255,10 +376,15 @@ unsigned long pcxl_dma_start;
void __init mem_init(void)
{
- max_mapnr = max_pfn;
+ int i;
+
high_memory = __va((max_pfn << PAGE_SHIFT));
+ max_mapnr = (virt_to_page(high_memory - 1) - mem_map) + 1;
+
+ num_physpages = 0;
+ for (i = 0; i < npmem_ranges; i++)
+ num_physpages += free_all_bootmem_node(NODE_DATA(i));
- num_physpages = free_all_bootmem_node(NODE_DATA(0));
printk("Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
if (hppa_dma_ops == &pcxl_dma_ops) {
@@ -543,19 +669,69 @@ static void __init gateway_init(void)
void __init paging_init(void)
{
+ int i;
+
setup_bootmem();
pagetable_init();
gateway_init();
flush_all_caches(); /* start with a known state */
- /* Need to fix this for each node ... */
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0, };
- {
- unsigned long zones_size[MAX_NR_ZONES] = { max_pfn, 0, 0, };
- unsigned long zholes_size[MAX_NR_ZONES] = { 0, 0, 0, };
+ zones_size[ZONE_DMA] = pmem_ranges[i].pages;
+ free_area_init_node(i,NODE_DATA(i),NULL,zones_size,
+ (pmem_ranges[i].start_pfn << PAGE_SHIFT),0);
+ }
+
+#ifdef CONFIG_DISCONTIGMEM
+ /*
+ * Initialize support for virt_to_page() macro.
+ *
+ * Note that MAX_ADDRESS is the largest virtual address that
+ * we can map. However, since we map all physical memory into
+ * the kernel address space, it also has an effect on the maximum
+ * physical address we can map (MAX_ADDRESS - PAGE_OFFSET).
+ */
+
+ maxchunkmap = MAX_ADDRESS >> CHUNKSHIFT;
+ chunkmap = (unsigned char *)alloc_bootmem(maxchunkmap);
+
+ for (i = 0; i < maxchunkmap; i++)
+ chunkmap[i] = BADCHUNK;
+
+ for (i = 0; i < npmem_ranges; i++) {
- free_area_init_node(0,NULL,NULL,zones_size,0UL,zholes_size);
+ ADJ_NODE_MEM_MAP(i) = NODE_MEM_MAP(i) - pmem_ranges[i].start_pfn;
+ {
+ unsigned long chunk_paddr;
+ unsigned long end_paddr;
+ int chunknum;
+
+ chunk_paddr = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ end_paddr = chunk_paddr + (pmem_ranges[i].pages << PAGE_SHIFT);
+ chunk_paddr &= CHUNKMASK;
+
+ chunknum = (int)CHUNKNUM(chunk_paddr);
+ while (chunk_paddr < end_paddr) {
+ if (chunknum >= maxchunkmap)
+ goto badchunkmap1;
+ if (chunkmap[chunknum] != BADCHUNK)
+ goto badchunkmap2;
+ chunkmap[chunknum] = (unsigned char)i;
+ chunk_paddr += CHUNKSZ;
+ chunknum++;
+ }
+ }
}
+
+ return;
+
+badchunkmap1:
+ panic("paging_init: Physical address exceeds maximum address space!\n");
+badchunkmap2:
+ panic("paging_init: Collision in chunk map array. CHUNKSZ needs to be smaller\n");
+#endif
}
/*
--- include/asm-parisc/page.h.old Sat Mar 17 02:33:09 2001
+++ include/asm-parisc/page.h Thu Mar 22 08:06:39 2001
@@ -109,8 +109,10 @@ extern int npmem_ranges;
#endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
+#ifndef CONFIG_DISCONTIGMEM
+#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
#endif /* __KERNEL__ */
--- include/asm-parisc/pgtable.h.old Sat Mar 17 02:33:09 2001
+++ include/asm-parisc/pgtable.h Thu Mar 22 08:49:51 2001
@@ -165,7 +165,6 @@ extern pte_t * __bad_pagetable(void);
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pmd_none(x) (!pmd_val(x))
#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
@@ -226,12 +225,21 @@ extern inline pte_t pte_mkwrite(pte_t pt
__pte; \
})
-#define mk_pte(page,pgprot) \
+#ifdef CONFIG_DISCONTIGMEM
+#define PAGE_TO_PA(page) \
+ ((((page)-(page)->zone->zone_mem_map) << PAGE_SHIFT) \
+ + ((page)->zone->zone_start_paddr))
+#else
+#define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT)
+#endif
+
+#define mk_pte(page, pgprot) \
({ \
- pte_t __pte; \
+ pte_t __pte; \
+ \
+ pte_val(__pte) = ((unsigned long)(PAGE_TO_PA(page))) | \
+ pgprot_val(pgprot); \
\
- pte_val(__pte) = ((page)-mem_map)*PAGE_SIZE + \
- pgprot_val(pgprot); \
__pte; \
})
@@ -242,12 +250,16 @@ extern inline pte_t pte_mkwrite(pte_t pt
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
-/*
- * Permanent address of a page. Obviously must never be
- * called on a highmem page.
- */
+/* Permanent address of a page. On parisc we don't have highmem. */
+
#define page_address(page) ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+
+#ifdef CONFIG_DISCONTIGMEM
+#define pte_page(x) (phys_to_page(pte_val(x)))
+#else
+#define pte_page(x) (mem_map+(pte_val(x) >> PAGE_SHIFT))
+#endif
+
#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
--- include/asm-parisc/mmzone.h.old Sat Mar 17 21:55:12 2001
+++ include/asm-parisc/mmzone.h Wed Mar 21 06:59:28 2001
@@ -1,14 +1,31 @@
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
-#define NODE_DATA(nid) ((pg_data_t *)printk("NODE_DATA not implemented!\n"))
-#define NODE_MEM_MAP(nid) printk("NODE_MEM_MAP not implemented!\n")
-#define virt_to_page(kaddr) ((struct page *)printk("virt to page not implemented!\n"))
-#define VALID_PAGE(page) (printk("VALID_PAGE not implemented!\n") == 0)
+struct node_map_data {
+ pg_data_t pg_data;
+ struct page *adj_node_mem_map;
+};
-#if 0
-#define virt_to_page(kaddr) (mem_map + ((__pa(kaddr)-vtop_start_paddr) >> PAGE_SHIFT))
+extern struct node_map_data node_data[];
+extern unsigned char *chunkmap;
+
+#define BADCHUNK ((unsigned char)0xff)
+#define CHUNKSZ (256*1024*1024)
+#define CHUNKSHIFT 28
+#define CHUNKMASK (~(CHUNKSZ - 1))
+#define CHUNKNUM(paddr) ((paddr) >> CHUNKSHIFT)
+
+#define NODE_DATA(nid) (&node_data[nid].pg_data)
+#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
+#define ADJ_NODE_MEM_MAP(nid) (node_data[nid].adj_node_mem_map)
+
+#define phys_to_page(paddr) \
+ (ADJ_NODE_MEM_MAP(chunkmap[CHUNKNUM((paddr))]) \
+ + ((paddr) >> PAGE_SHIFT))
+
+#define virt_to_page(kvaddr) phys_to_page(__pa(kvaddr))
+
+/* This is kind of bogus, need to investigate performance of doing it right */
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-#endif
#endif /* !_PARISC_MMZONE_H */
--- include/asm-parisc/irq.h.old Mon Mar 19 08:47:01 2001
+++ include/asm-parisc/irq.h Thu Mar 22 08:49:51 2001
@@ -34,7 +34,7 @@
#endif
#define IRQ_PER_REGION (1 << IRQ_REGION_SHIFT)
-#define NR_IRQ_REGS 8
+#define NR_IRQ_REGS 16
#define NR_IRQS (NR_IRQ_REGS * IRQ_PER_REGION)
#define IRQ_REGION(irq) ((irq) >> IRQ_REGION_SHIFT)
--- include/asm-parisc/assembly.h.old Thu Mar 22 03:40:01 2001
+++ include/asm-parisc/assembly.h Thu Mar 22 08:11:58 2001
@@ -127,6 +127,7 @@
#define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r
.macro save_general regs
+ STREG %r1, PT_GR1 (\regs)
STREG %r2, PT_GR2 (\regs)
STREG %r3, PT_GR3 (\regs)
STREG %r4, PT_GR4 (\regs)
@@ -151,15 +152,18 @@
STREG %r23, PT_GR23(\regs)
STREG %r24, PT_GR24(\regs)
STREG %r25, PT_GR25(\regs)
- /* r26 is clobbered by cr19 and assumed to be saved before hand */
+ /* r26 is saved in get_stack and used to preserve a value across virt_map */
STREG %r27, PT_GR27(\regs)
STREG %r28, PT_GR28(\regs)
- /* r29 is already saved and points to PT_xxx struct */
+ /* r29 is saved in get_stack and used to point to saved registers */
/* r30 stack pointer saved in get_stack */
STREG %r31, PT_GR31(\regs)
+ SAVE_CR (%cr27, PT_CR27(\regs))
.endm
.macro rest_general regs
+ REST_CR (%cr27, PT_CR27(\regs))
+ /* r1 used as a temp in rest_stack and is restored there */
LDREG PT_GR2 (\regs), %r2
LDREG PT_GR3 (\regs), %r3
LDREG PT_GR4 (\regs), %r4
@@ -187,6 +191,7 @@
LDREG PT_GR26(\regs), %r26
LDREG PT_GR27(\regs), %r27
LDREG PT_GR28(\regs), %r28
+ /* r29 points to register save area, and is restored in rest_stack */
/* r30 stack pointer restored in rest_stack */
LDREG PT_GR31(\regs), %r31
.endm
@@ -371,13 +376,14 @@
#else
SAVE_CR (%cr11, PT_SAR (\regs))
#endif
- SAVE_CR (%cr22, PT_PSW (\regs))
SAVE_CR (%cr19, PT_IIR (\regs))
- SAVE_CR (%cr28, PT_GR1 (\regs))
- SAVE_CR (%cr31, PT_GR29 (\regs))
- STREG %r26, PT_GR26 (\regs)
- mfctl %cr29, %r26
+ /*
+ * Code immediately following this macro (in intr_save) relies
+ * on r8 containing ipsw.
+ */
+ mfctl %cr22, %r8
+ STREG %r8, PT_PSW(\regs)
.endm
.macro rest_specials regs
--- include/asm-parisc/ptrace.h.old Thu Mar 22 05:01:13 2001
+++ include/asm-parisc/ptrace.h Thu Mar 22 08:49:51 2001
@@ -16,9 +16,6 @@ struct pt_regs {
unsigned long sr[ 8];
unsigned long iasq[2];
unsigned long iaoq[2];
- unsigned long cr24;
- unsigned long cr25;
- unsigned long cr26;
unsigned long cr27;
unsigned long cr30;
unsigned long orig_r28;
@@ -29,7 +26,6 @@ struct pt_regs {
unsigned long isr; /* CR20 */
unsigned long ior; /* CR21 */
unsigned long ipsw; /* CR22 */
- unsigned long cr_pid[4]; /* CR8,9,12,13 */
};
#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
--- include/asm-parisc/psw.h.old Thu Mar 22 07:12:33 2001
+++ include/asm-parisc/psw.h Thu Mar 22 07:14:28 2001
@@ -25,6 +25,7 @@
#define PSW_S 0x02000000
#define PSW_E 0x04000000
#define PSW_W 0x08000000 /* PA2.0 only */
+#define PSW_W_BIT 36 /* PA2.0 only */
#define PSW_Z 0x40000000 /* PA1.x only */
#define PSW_Y 0x80000000 /* PA1.x only */
--- include/asm-parisc/pdc.h.old Thu Mar 22 08:27:15 2001
+++ include/asm-parisc/pdc.h Thu Mar 22 08:49:51 2001
@@ -603,6 +603,7 @@ extern void pdc_console_init(void);
extern int pdc_getc(void); /* wait for char */
extern void pdc_putc(unsigned char); /* print char */
+extern void setup_pdc(void); /* in inventory.c */
/* wrapper-functions from pdc.c */
--- include/asm-parisc/mmu.h.old Thu Mar 22 08:47:05 2001
+++ include/asm-parisc/mmu.h Thu Mar 22 08:48:36 2001
@@ -1,68 +1,7 @@
-/*
- * parisc mmu structures
- */
-
#ifndef _PARISC_MMU_H_
#define _PARISC_MMU_H_
-#ifndef __ASSEMBLY__
-
-/* Default "unsigned long" context */
+/* On parisc, we store the space id here */
typedef unsigned long mm_context_t;
-
-/* Hardware Page Table Entry */
-typedef struct _PTE {
- unsigned long v:1; /* Entry is valid */
- unsigned long tag:31; /* Unique Tag */
-
- unsigned long r:1; /* referenced */
- unsigned long os_1:1; /* */
- unsigned long t:1; /* page reference trap */
- unsigned long d:1; /* dirty */
- unsigned long b:1; /* break */
- unsigned long type:3; /* access type */
- unsigned long pl1:2; /* PL1 (execute) */
- unsigned long pl2:2; /* PL2 (write) */
- unsigned long u:1; /* uncacheable */
- unsigned long id:1; /* access id */
- unsigned long os_2:1; /* */
-
- unsigned long os_3:3; /* */
- unsigned long res_1:4; /* */
- unsigned long phys:20; /* physical page number */
- unsigned long os_4:2; /* */
- unsigned long res_2:3; /* */
-
- unsigned long next; /* pointer to next page */
-} PTE;
-
-/*
- * Simulated two-level MMU. This structure is used by the kernel
- * to keep track of MMU mappings and is used to update/maintain
- * the hardware HASH table which is really a cache of mappings.
- *
- * The simulated structures mimic the hardware available on other
- * platforms, notably the 80x86 and 680x0.
- */
-
-typedef struct _pte {
- unsigned long page_num:20;
- unsigned long flags:12; /* Page flags (some unused bits) */
-} pte;
-
-#define PD_SHIFT (10+12) /* Page directory */
-#define PD_MASK 0x02FF
-#define PT_SHIFT (12) /* Page Table */
-#define PT_MASK 0x02FF
-#define PG_SHIFT (12) /* Page Entry */
-
-/* MMU context */
-
-typedef struct _MMU_context {
- long pid[4];
- pte **pmap; /* Two-level page-map structure */
-} MMU_context;
-
-#endif /* __ASSEMBLY__ */
#endif /* _PARISC_MMU_H_ */