[parisc-linux-cvs] Patch 3 of 3 (boot cleanup/large memory preparation)
John Marvin
jsm@udlkern.fc.hp.com
Fri, 2 Mar 2001 03:34:27 -0700 (MST)
--- arch/parisc/mm/init.c.old Thu Mar 1 23:06:24 2001
+++ arch/parisc/mm/init.c Fri Mar 2 02:36:05 2001
@@ -15,13 +15,221 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
+#include <linux/blk.h> /* for initrd_start and initrd_end */
#include <linux/swap.h>
#include <linux/unistd.h>
#include <asm/pgalloc.h>
-static unsigned long totalram_pages;
-extern unsigned long max_pfn, mem_max;
+extern char _text; /* start of kernel code, defined by linker */
+extern int data_start;
+extern char _end; /* end of BSS, defined by linker */
+
+/*
+** KLUGE ALERT!
+**
+** We *really* should be using a combination of request_resource()
+** and request_region()! But request_region() requires kmalloc since
+** returns a new struct resource. And kmalloc just isn't available
+** until after mem_init() is called from start_kernel().
+**
+** FIXME: assume contiguous memory initially.
+** Additional chunks of memory might be added to sysram_resource.sibling.
+*/
+static struct resource sysrom_resource = {
+ name: "System ROM", start: 0x0f0000000UL, end: 0x0f00fffffUL,
+ flags: IORESOURCE_BUSY | IORESOURCE_MEM,
+ parent: &iomem_resource, sibling: NULL, child: NULL };
+
+static struct resource pdcdata_resource;
+
+static struct resource sysram_resource = {
+ name: "System RAM", start: 0UL, end: ~0UL /* bogus */,
+ flags: IORESOURCE_MEM,
+ parent: &iomem_resource, sibling: &sysrom_resource, child: &pdcdata_resource};
+
+static struct resource data_resource = {
+ name: "kernel Data", start: virt_to_phys(&data_start), end: virt_to_phys(&_end)-1,
+ flags: IORESOURCE_BUSY | IORESOURCE_MEM,
+ parent: &sysram_resource, sibling: NULL, child: NULL};
+
+static struct resource code_resource = {
+ name: "Kernel Code", start: virt_to_phys(&_text), end: virt_to_phys(&data_start)-1,
+ flags: IORESOURCE_BUSY | IORESOURCE_MEM,
+ parent: &sysram_resource, sibling: &data_resource, child: NULL};
+
+static struct resource pdcdata_resource = {
+ name: "PDC data (Page Zero)", start: 0, end: 0x9ff,
+ flags: IORESOURCE_BUSY | IORESOURCE_MEM,
+ parent: &sysram_resource, sibling: &code_resource, child: NULL};
+
+
+static unsigned long max_pfn;
+
+/* The following array is initialized from the firmware specific
+ * information retrieved in kernel/inventory.c.
+ */
+
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
+int npmem_ranges;
+
+#ifdef __LP64__
+#define MAX_MEM (~0UL)
+#else /* !__LP64__ */
+#define MAX_MEM (3584U*1024U*1024U)
+#endif /* !__LP64__ */
+
+static void __init setup_bootmem(void)
+{
+ unsigned long bootmap_size;
+ unsigned long mem_max;
+ unsigned long bootmap_pages;
+ unsigned long bootmap_start_pfn;
+ unsigned long bootmap_pfn;
+ int i;
+
+ disable_sr_hashing(); /* Turn off space register hashing */
+
+#ifdef __LP64__
+
+ /* Print the memory ranges, even if we are not going to use them */
+
+ if (npmem_ranges > 1) {
+ printk("Memory Ranges:\n");
+
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long start;
+ unsigned long size;
+
+ size = (pmem_ranges[i].pages << PAGE_SHIFT);
+ start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ printk("%2d) Start 0x%016lx End 0x%016lx Size %6ld Mb\n",
+ i,start, start + (size - 1), size >> 20);
+ }
+ }
+
+#ifndef CONFIG_DISCONTIGMEM
+ if (npmem_ranges > 1) {
+ printk("\n\n\n\n");
+ printk("WARNING! This machine has additional memory in discontiguous\n");
+ printk(" ranges, however CONFIG_DISCONTIGMEM needs to be enabled\n");
+ printk(" in order to access it.\n\n");
+ printk(" Memory will be limited to the first range reported above.\n\n\n\n");
+
+ npmem_ranges = 1;
+ }
+#endif
+
+#else /* !__LP64__ */
+
+#ifdef CONFIG_DISCONTIGMEM
+ printk("\n\n\n\n");
+ printk("32 bit kernels do not support discontiguous memory, so there is\n");
+ printk("no good reason to enable CONFIG_DISCONTIGMEM. There is a slight\n");
+ printk("performance penalty for doing so.\n\n\n\n");
+#endif
+#endif /* !__LP64__ */
+
+ /*
+ * For 32 bit kernels we limit the amount of memory we can
+ * support, in order to preserve enough kernel address space
+ * for other purposes. For 64 bit kernels we don't normally
+ * limit the memory, but this mechanism can be used to
+ * artificially limit the amount of memory (and it is written
+ * to work with multiple memory ranges).
+ */
+
+ mem_max = 0;
+
+ for (i = 0; (i < npmem_ranges) && (mem_max < MAX_MEM); i++) {
+ unsigned long rsize;
+
+ rsize = pmem_ranges[i].pages << PAGE_SHIFT;
+ if ((mem_max + rsize) > MAX_MEM) {
+ printk("Memory truncated to %ld Mb\n", MAX_MEM >> 20);
+ pmem_ranges[i].pages = (MAX_MEM >> PAGE_SHIFT)
+ - (mem_max >> PAGE_SHIFT);
+ mem_max = MAX_MEM;
+ npmem_ranges = i + 1;
+ break;
+ }
+ mem_max += rsize;
+ }
+
+ printk("\nTotal Memory: %ld Mb\n",mem_max >> 20);
+
+ bootmap_pages = 0;
+ for (i = 0; i < npmem_ranges; i++)
+ bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
+
+
+ bootmap_start_pfn = __pa((unsigned long) &_end);
+ bootmap_start_pfn = (bootmap_start_pfn + PAGE_SIZE) & PAGE_MASK;
+ bootmap_start_pfn = bootmap_start_pfn >> PAGE_SHIFT;
+
+ /*
+ * Initialize and free the full range of memory in each range.
+ * Note that the only writing these routines do are to the bootmap,
+ * and we've made sure to locate the bootmap properly so that they
+ * won't be writing over anything important.
+ */
+
+ bootmap_pfn = bootmap_start_pfn;
+ max_pfn = 0;
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long start_pfn;
+ unsigned long npages;
+
+ start_pfn = pmem_ranges[i].start_pfn;
+ npages = pmem_ranges[i].pages;
+
+ bootmap_size = init_bootmem_node(NODE_DATA(i),
+ bootmap_pfn,
+ start_pfn,
+ (start_pfn + npages) );
+
+ free_bootmem_node(NODE_DATA(i),
+ (start_pfn << PAGE_SHIFT),
+ (npages << PAGE_SHIFT) );
+
+ bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if ((start_pfn + npages) > max_pfn)
+ max_pfn = start_pfn + npages;
+
+ }
+
+ if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
+ printk("WARNING! bootmap sizing is messed up!\n");
+ BUG();
+ }
+
+ /* Now, reserve bootmap, kernel text/data/bss, and pdc memory */
+
+ /* HACK! reserve 0 to end of bootmaps in one call
+ * This will be fixed in a future checkin, but is equivalent
+ * to what we were doing previously (wasting memory).
+ */
+
+ reserve_bootmem_node(NODE_DATA(0), 0UL, (bootmap_pfn << PAGE_SHIFT));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ printk("initrd: %08x-%08x\n", (int) initrd_start, (int) initrd_end);
+
+ if (initrd_end != 0) {
+ initrd_below_start_ok = 1;
+ reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_end - initrd_start);
+ }
+#endif
+
+#if 1
+ /* KLUGE! this really belongs in kernel/resource.c! */
+ iomem_resource.end = ~0UL;
+#endif
+
+ /* HACK! just use range 0 for now */
+
+ sysram_resource.end = ((pmem_ranges[0].start_pfn + pmem_ranges[0].pages) << PAGE_SHIFT) - 1;
+}
void free_initmem(void) {
}
@@ -47,11 +255,11 @@ unsigned long pcxl_dma_start;
void __init mem_init(void)
{
- max_mapnr = num_physpages = max_low_pfn;
- high_memory = __va(max_low_pfn * PAGE_SIZE);
+ max_mapnr = max_pfn;
+ high_memory = __va((max_pfn << PAGE_SHIFT));
- totalram_pages += free_all_bootmem();
- printk("Memory: %luk available\n", totalram_pages << (PAGE_SHIFT-10));
+ num_physpages = free_all_bootmem_node(NODE_DATA(0));
+ printk("Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
if (hppa_dma_ops == &pcxl_dma_ops) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
@@ -163,7 +371,6 @@ void set_pte_phys (unsigned long vaddr,
{
}
-
/*
* pagetable_init() sets up the page tables
*
@@ -184,6 +391,7 @@ static void __init pagetable_init(void)
unsigned long ro_start;
unsigned long ro_end;
unsigned long fv_addr;
+ int range;
extern const int stext;
extern int data_start;
extern const unsigned long fault_vector_20;
@@ -194,62 +402,67 @@ static void __init pagetable_init(void)
printk("pagetable_init\n");
- /* Map whole memory from PAGE_OFFSET */
+ /* Map each physical memory range to its kernel vaddr */
+
+ for (range = 0; range < npmem_ranges; range++) {
+ unsigned long start_paddr;
+ unsigned long end_paddr;
- pg_dir = pgd_offset_k(PAGE_OFFSET);
+ start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
+ end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
+
+ pg_dir = pgd_offset_k(start_paddr + PAGE_OFFSET);
#if PTRS_PER_PMD == 1
- start_pmd = 0;
+ start_pmd = 0;
#else
- start_pmd = (((PAGE_OFFSET) >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+ start_pmd = (((start_paddr + PAGE_OFFSET) >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
#endif
- address = 0;
- while (address < mem_max) {
- /* XXX: BTLB should be done here */
-
+ address = start_paddr;
+ while (address < end_paddr) {
#if PTRS_PER_PMD == 1
- pmd = (pmd_t *)__pa(pg_dir);
+ pmd = (pmd_t *)__pa(pg_dir);
#else
- pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));
+ pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));
- /*
- * pmd is physical at this point
- */
-
- if (!pmd) {
- pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- pmd = (pmd_t *) __pa(pmd);
- }
+ /*
+ * pmd is physical at this point
+ */
- pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;
+ if (!pmd) {
+ pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
+ pmd = (pmd_t *) __pa(pmd);
+ }
+
+ pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;
#endif
- pg_dir++;
+ pg_dir++;
- /* now change pmd to kernel virtual addresses */
+ /* now change pmd to kernel virtual addresses */
- pmd = (pmd_t *)__va(pmd) + start_pmd;
- for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
+ pmd = (pmd_t *)__va(pmd) + start_pmd;
+ for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
- /*
- * pg_table is physical at this point
- */
+ /*
+ * pg_table is physical at this point
+ */
- pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));
- if (!pg_table) {
- pg_table = (pte_t *)
- alloc_bootmem_low_pages(PAGE_SIZE);
- pg_table = (pte_t *) __pa(pg_table);
- }
+ pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));
+ if (!pg_table) {
+ pg_table = (pte_t *)
+ alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
+ pg_table = (pte_t *) __pa(pg_table);
+ }
- pmd_val(*pmd) = _PAGE_TABLE |
- (unsigned long) pg_table;
+ pmd_val(*pmd) = _PAGE_TABLE |
+ (unsigned long) pg_table;
- /* now change pg_table to kernel virtual addresses */
+ /* now change pg_table to kernel virtual addresses */
- pg_table = (pte_t *) __va(pg_table);
- for (tmp2=0; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
- pte_t pte;
+ pg_table = (pte_t *) __va(pg_table);
+ for (tmp2=0; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
+ pte_t pte;
#if !defined(CONFIG_KWDB) && !defined(CONFIG_STI_CONSOLE)
#warning STI console should explicitly allocate executable pages but does not
@@ -258,25 +471,26 @@ static void __init pagetable_init(void)
** The right thing to do seems like KWDB modify only the pte which
** has a break point on it...otherwise we might mask worse bugs.
*/
- if (address >= ro_start && address < ro_end
- && address != fv_addr)
- pte = __mk_pte(address, PAGE_KERNEL_RO);
- else
+ if (address >= ro_start && address < ro_end
+ && address != fv_addr)
+ pte = __mk_pte(address, PAGE_KERNEL_RO);
+ else
#endif
- pte = __mk_pte(address, PAGE_KERNEL);
+ pte = __mk_pte(address, PAGE_KERNEL);
- if (address >= mem_max)
- pte_val(pte) = 0;
+ if (address >= end_paddr)
+ pte_val(pte) = 0;
- set_pte(pg_table, pte);
+ set_pte(pg_table, pte);
- address += PAGE_SIZE;
- }
+ address += PAGE_SIZE;
+ }
- if (address >= mem_max)
- break;
+ if (address >= end_paddr)
+ break;
+ }
+ start_pmd = 0;
}
- start_pmd = 0;
}
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
@@ -329,14 +543,18 @@ static void __init gateway_init(void)
void __init paging_init(void)
{
+ setup_bootmem();
pagetable_init();
gateway_init();
flush_all_caches(); /* start with a known state */
+ /* Need to fix this for each node ... */
+
{
- unsigned long zones_size[MAX_NR_ZONES] = { max_pfn/2, max_pfn/2, };
+ unsigned long zones_size[MAX_NR_ZONES] = { max_pfn, 0, 0, };
+ unsigned long zholes_size[MAX_NR_ZONES] = { 0, 0, 0, };
- free_area_init(zones_size);
+ free_area_init_node(0,NULL,NULL,zones_size,0UL,zholes_size);
}
}
@@ -448,7 +666,7 @@ void free_initrd_mem(unsigned long start
ClearPageReserved(mem_map + MAP_NR(start));
set_page_count(mem_map+MAP_NR(start), 1);
free_page(start);
- totalram_pages++;
+ nu_mphyspages++;
}
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
#endif
@@ -457,26 +675,12 @@ void free_initrd_mem(unsigned long start
void si_meminfo(struct sysinfo *val)
{
- int i;
-
- i = max_mapnr;
- val->totalram = totalram_pages;
+ val->totalram = num_physpages;
val->sharedram = 0;
val->freeram = nr_free_pages();
val->bufferram = atomic_read(&buffermem_pages);
-#if 0
- while (i-- > 0) {
- if (PageReserved(mem_map+i))
- continue;
- val->totalram++;
- if (!atomic_read(&mem_map[i].count))
- continue;
- val->sharedram += atomic_read(&mem_map[i].count) - 1;
- }
- val->totalram <<= PAGE_SHIFT;
- val->sharedram <<= PAGE_SHIFT;
-#endif
val->totalhigh = 0;
val->freehigh = 0;
+ val->mem_unit = PAGE_SIZE;
return;
}
--- include/asm-parisc/hardware.h.old Thu Mar 1 23:06:24 2001
+++ include/asm-parisc/hardware.h Fri Mar 2 00:08:06 2001
@@ -116,6 +116,7 @@ extern int register_driver(struct pa_iod
extern int unregister_driver(struct pa_iodc_driver *driver);
/* inventory.c: */
-extern void do_inventory(void);
+extern void do_memory_inventory(void);
+extern void do_device_inventory(void);
#endif
--- include/asm-parisc/page.h.old Thu Mar 1 23:06:24 2001
+++ include/asm-parisc/page.h Fri Mar 2 00:19:58 2001
@@ -64,6 +64,20 @@ extern __inline__ int get_order(unsigned
return order;
}
+#ifdef __LP64__
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+#else
+#define MAX_PHYSMEM_RANGES 1 /* First range is only range that fits in 32 bits */
+#endif
+
+typedef struct __physmem_range {
+ unsigned long start_pfn;
+ unsigned long pages; /* PAGE_SIZE pages */
+} physmem_range_t;
+
+extern physmem_range_t pmem_ranges[];
+extern int npmem_ranges;
+
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
--- include/asm-parisc/pci.h.old Thu Mar 1 23:06:24 2001
+++ include/asm-parisc/pci.h Fri Mar 2 00:08:06 2001
@@ -206,12 +206,7 @@ extern void pcibios_assign_unassigned_re
** To date, only alpha sets this to one. We'll need to set this
** to zero for legacy platforms and one for PAT platforms.
*/
-#ifdef __LP64__
-extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
-#define pcibios_assign_all_busses() pdc_pat
-#else
-#define pcibios_assign_all_busses() 0
-#endif
+#define pcibios_assign_all_busses() (pdc_type == PDC_TYPE_PAT)
#define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
--- include/asm-parisc/pdc.h.old Thu Mar 1 23:06:24 2001
+++ include/asm-parisc/pdc.h Fri Mar 2 02:08:49 2001
@@ -85,6 +85,9 @@
#define PDC_TLB_INFO 0 /* returns parameter */
#define PDC_TLB_SETUP 1 /* set up miss handling */
+#define PDC_MEM 20 /* Manage memory */
+#define PDC_MEM_TABLE 128 /* Non contig mem map (sprockets) */
+
#define PDC_PSW 21 /* Get/Set default System Mask */
#define PDC_PSW_MASK 0 /* Return mask */
#define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */
@@ -179,6 +182,17 @@ compatibility */
#include <linux/types.h>
+extern int pdc_type;
+
+/* Values for pdc_type */
+
+#define PDC_TYPE_ILLEGAL -1
+#define PDC_TYPE_PAT 0 /* Newer PAT PDC box (64 bit only */
+#define PDC_TYPE_SYSTEM_MAP 1 /* Legacy box that supports PDC_SYSTEM_MAP */
+#define PDC_TYPE_LEGACY 2 /* Older Legacy box */
+
+#define is_pdc_pat() (pdc_type == PDC_TYPE_PAT)
+
struct pdc_model { /* for PDC_MODEL */
unsigned long hversion;
unsigned long sversion;
@@ -329,6 +343,20 @@ struct pdc_tlb { /* for PDC_TLB */
unsigned long pad[32-2];
} __attribute__((aligned(8))) ;
+#ifdef __LP64__
+struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
+ unsigned long entries_returned;
+ unsigned long entries_total;
+ unsigned long pad[32-2];
+};
+
+struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
+ unsigned long paddr;
+ unsigned int pages;
+ unsigned int reserved;
+};
+#endif
+
struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
void * mod_addr;
unsigned long mod_pgs;
@@ -577,13 +605,7 @@ struct zeropage {
#ifndef __ASSEMBLY__
-struct pdc_pat_io_num {
- unsigned long num;
- unsigned long reserved[31];
-};
-
-
-
+extern void setup_pdc(void);
extern void pdc_console_init(void);
/* pdc_get/put are NOT SMP safe - use at your own risk! */
extern int pdc_getc(void); /* wait for char */
@@ -619,19 +641,15 @@ int pdc_mem_map_hpa(void *r_addr, void *
extern int pdc_chassis_disp(unsigned long disp);
extern int pdc_chassis_info(void *pdc_result, void *chassis_info, unsigned long len);
-#ifdef __LP64__
-int pdc_pat_get_irt_size(void *r_addr, unsigned long cell_num);
-int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
-#else
-/* No PAT support for 32-bit kernels...sorry */
-#define pdc_pat_get_irt_size(r_addr, cell_numn) PDC_RET_NE_PROC
-#define pdc_pat_get_irt(r_addr, cell_num) PDC_RET_NE_PROC
-#endif
int pdc_pci_irt_size(void *r_addr, void *hpa);
int pdc_pci_irt(void *r_addr, void *hpa, void *tbl);
int pdc_tod_read(struct pdc_tod *tod);
int pdc_tod_set(unsigned long sec, unsigned long usec);
+
+#ifdef __LP64__
+int pdc_mem_mem_table(void *r_addr, void *tbl, unsigned long entries);
+#endif
/* on all currently-supported platforms, IODC I/O calls are always
* 32-bit calls, and MEM_PDC calls are always the same width as the OS.
--- include/asm-parisc/pdcpat.h.old Thu Mar 1 23:06:24 2001
+++ include/asm-parisc/pdcpat.h Thu Mar 1 23:22:08 2001
@@ -10,7 +10,6 @@
* Copyright 2000 (c) Grant Grundler <grundler@puffin.external.hp.com>
*/
-
/* PDC PAT CELL */
#define PDC_PAT_CELL 64L /* Interface for gaining and
* manipulatin g cell state within PD */
@@ -168,13 +167,33 @@
#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
+/* PDC PAT PD */
+
+#define PDC_PAT_PD 74L /* Protection Domain Info */
+#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
+
+/* PDC_PAT_PD_GET_ADDR_MAP entry types */
+
+#define PAT_MEMORY_DESCRIPTOR 1
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory types */
+
+#define PAT_MEMTYPE_MEMORY 0
+#define PAT_MEMTYPE_FIRMWARE 4
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
+
+#define PAT_MEMUSE_GENERAL 0
+#define PAT_MEMUSE_GI 128
+#define PAT_MEMUSE_GNI 129
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
** PDC_PAT_CELL_GET_INFO return block
*/
-typedef struct pdc_pat_cell_info_rtn_block {
+struct pdc_pat_cell_info_rtn_block {
unsigned long cpu_info;
unsigned long cell_info;
unsigned long cell_location;
@@ -187,8 +206,9 @@ typedef struct pdc_pat_cell_info_rtn_blo
unsigned long fabric_info2;
unsigned long fabric_info3;
unsigned long reserved[21];
-} pdc_pat_cell_info_rtn_block_t;
+} __attribute__((aligned(8))) ;
+typedef struct pdc_pat_cell_info_rtn_block pdc_pat_cell_info_rtn_block_t;
/* FIXME: mod[508] should really be a union of the various mod components */
struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
@@ -201,19 +221,42 @@ struct pdc_pat_cell_mod_maddr_block { /*
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
+struct pdc_pat_io_num {
+ unsigned long num;
+ unsigned long reserved[31];
+};
+
+
+ struct pdc_pat_pd_addr_map_rtn {
+ unsigned long actual_len; /* actual # bytes in address map */
+ unsigned long reserved[31];
+} __attribute__((aligned(8))) ;
+
+struct pdc_pat_pd_addr_map_entry {
+ unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
+ unsigned char reserve1[5];
+ unsigned char memory_type;
+ unsigned char memory_usage;
+ unsigned long paddr;
+ unsigned int pages; /* Length in 4K pages */
+ unsigned int reserve2;
+ unsigned long cell_map;
+} __attribute__((aligned(8))) ;
extern int pdc_pat_cell_get_number(void *);
extern int pdc_pat_cell_module(void *, unsigned long, unsigned long, unsigned long, void *);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
-/* Flag to indicate this is a PAT box...don't use this unless you
-** really have to...it might go away some day.
-*/
#ifdef __LP64__
-extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
+extern int pdc_pat_get_irt_size(void *r_addr, unsigned long cell_num);
+extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
#else
-#define pdc_pat 0
+/* No PAT support for 32-bit kernels...sorry */
+#define pdc_pat_get_irt_size(r_addr, cell_numn) PDC_RET_NE_PROC
+#define pdc_pat_get_irt(r_addr, cell_num) PDC_RET_NE_PROC
#endif
+
+extern int pdc_pat_pd_get_addr_map(void *, void *, unsigned long, unsigned long);
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
--- include/asm-parisc/pgtable.h.old Thu Mar 1 23:06:24 2001
+++ include/asm-parisc/pgtable.h Thu Mar 1 23:22:08 2001
@@ -246,9 +246,7 @@ extern inline pte_t pte_modify(pte_t pte
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
-#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
-#define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+#define page_address(page) ((page)->virtual)
#define pte_page(x) (mem_map+pte_pagenr(x))
#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))