[parisc-linux-cvs] Patch 1 of 3 (boot cleanup/large memory preparation)
John Marvin
jsm@udlkern.fc.hp.com
Fri, 2 Mar 2001 03:49:00 -0700 (MST)
Hmm,
I sent 3 patches, out, but for some reason, the 1st didn't seem to make it
(with my luck it's sitting somewhere and will be delivered about the time
I finish typing this ...)
Anyway, I had hoped to finish the large memory support before vacation,
but a few things got in the way, so I am checking in the changes to
prepare for large memory support, which includes some cleanup/redesign
of the early boot code.
I've tested these changes on: A500, J5000/32 bit, J5000/64 bit, A180 & 712.
I've broken up the patch into three parts, due to the size of the change.
I will be reading e-mail while on vacation (I'll be back Tuesday 3/12) in
case there are any questions.
John
--- arch/parisc/kernel/ccio-dma.c.old Thu Mar 1 23:06:24 2001
+++ arch/parisc/kernel/ccio-dma.c Thu Mar 1 23:45:04 2001
@@ -1350,12 +1350,11 @@ static void
ccio_ioc_init(struct ioc *ioc)
{
int i, iov_order;
- extern unsigned long mem_max; /* arch.../setup.c */
u32 iova_space_size;
+ unsigned long physmem;
/*
** Determine IOVA Space size from memory size.
- ** Using "mem_max" is a kluge.
**
** Ideally, PCI drivers would register the maximum number
** of DMA they can have outstanding for each device they
@@ -1366,14 +1365,16 @@ ccio_ioc_init(struct ioc *ioc)
*/
/* limit IOVA space size to 1MB-1GB */
- if(mem_max < (ccio_mem_ratio * 1024 * 1024)) {
+
+ physmem = num_physpages << PAGE_SHIFT;
+ if(physmem < (ccio_mem_ratio * 1024 * 1024)) {
iova_space_size = 1024 * 1024;
#ifdef __LP64__
- } else if(mem_max > (ccio_mem_ratio * 512 * 1024 * 1024)) {
+ } else if(physmem > (ccio_mem_ratio * 512 * 1024 * 1024)) {
iova_space_size = 512 * 1024 * 1024;
#endif
} else {
- iova_space_size = (u32)(mem_max / ccio_mem_ratio);
+ iova_space_size = (u32)(physmem / ccio_mem_ratio);
}
/*
@@ -1402,7 +1403,7 @@ ccio_ioc_init(struct ioc *ioc)
ASSERT((1 << get_order(ioc->pdir_size)) == (ioc->pdir_size >> PAGE_SHIFT));
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits) PDIR size 0x%0x",
- __FUNCTION__, ioc->ioc_hpa, mem_max>>20, iova_space_size>>20,
+ __FUNCTION__, ioc->ioc_hpa, physmem>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT, ioc->pdir_size);
ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, get_order(ioc->pdir_size));
--- arch/parisc/kernel/head.S.old Fri Mar 2 00:55:59 2001
+++ arch/parisc/kernel/head.S Fri Mar 2 01:05:57 2001
@@ -33,6 +33,15 @@ __setup_start:
.export __setup_end
__setup_end:
+ .data
+
+ .export boot_args
+boot_args:
+ .word 0 /* arg0 */
+ .word 0 /* arg1 */
+ .word 0 /* arg2 */
+ .word 0 /* arg3 */
+
.text
.align 4
.import init_task_union,data
@@ -54,6 +63,28 @@ stext:
mtsp %r0,%sr6
mtsp %r0,%sr7
+ /* Clear BSS (shouldn't the boot loader do this?) */
+
+ .import _edata,data
+ .import _end,data
+
+ ldil L%PA(_edata),%r3
+ ldo R%PA(_edata)(%r3),%r3
+ ldil L%PA(_end),%r4
+ ldo R%PA(_end)(%r4),%r4
+$bss_loop:
+ cmpb,<<,n %r3,%r4,$bss_loop
+ stb,ma %r0,1(%r3)
+
+ /* Save away the arguments the boot loader passed in (32 bit args) */
+
+ ldil L%PA(boot_args),%r1
+ ldo R%PA(boot_args)(%r1),%r1
+ stw,ma %arg0,4(%r1)
+ stw,ma %arg1,4(%r1)
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
/* Initialize startup VM. Just map first 8 MB of memory */
ldil L%PA(pg0),%r1
ldo R%PA(pg0)(%r1),%r1
@@ -83,8 +114,8 @@ $pgt_fill_loop:
copy %r0,%r2
/* And the RFI Target address too */
- ldil L%start_parisc,%r11
- ldo R%start_parisc(%r11),%r11
+ ldil L%start_kernel,%r11
+ ldo R%start_kernel(%r11),%r11
/* And the stack pointer too */
ldil L%init_task_union+TASK_SZ_ALGN,%sp
--- arch/parisc/kernel/head64.S.old Thu Mar 1 23:06:24 2001
+++ arch/parisc/kernel/head64.S Thu Mar 1 23:22:08 2001
@@ -37,6 +37,15 @@ __setup_start:
.export __setup_end
__setup_end:
+ .data
+
+ .export boot_args
+boot_args:
+ .word 0 /* arg0 */
+ .word 0 /* arg1 */
+ .word 0 /* arg2 */
+ .word 0 /* arg3 */
+
.text
.align 4
@@ -53,6 +62,28 @@ stext:
mtsp %r0,%sr6
mtsp %r0,%sr7
+ /* Clear BSS (shouldn't the boot loader do this?) */
+
+ .import _edata,data
+ .import _end,data
+
+ ldil L%PA(_edata),%r3
+ ldo R%PA(_edata)(%r3),%r3
+ ldil L%PA(_end),%r4
+ ldo R%PA(_end)(%r4),%r4
+$bss_loop:
+ cmpb,<<,n %r3,%r4,$bss_loop
+ stb,ma %r0,1(%r3)
+
+ /* Save away the arguments the boot loader passed in (32 bit args) */
+
+ ldil L%PA(boot_args),%r1
+ ldo R%PA(boot_args)(%r1),%r1
+ stw,ma %arg0,4(%r1)
+ stw,ma %arg1,4(%r1)
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
/* Initialize startup VM. Just map first 8 MB of memory */
ldil L%PA(pg0),%r1
@@ -91,7 +122,7 @@ $pgt_fill_loop:
nop
/* And the RFI Target address too */
- load32 start_parisc, %r11
+ load32 start_kernel, %r11
/* And the stack pointer too */
load32 PA(init_task_union+TASK_SZ_ALGN),%sp
@@ -122,7 +153,6 @@ $pgt_fill_loop:
** Entry:
** %r3 PDCE_PROC address
** %r11 RFI target address.
- ** %r26-%r23 args to pass to target function
**
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
*/
@@ -135,11 +165,7 @@ common_stext:
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif /* CONFIG_SMP */
- /* Save parameters from PALO/PDC in task space */
- std %arg0, TASK_PT_GR26-TASK_SZ_ALGN(%sp)
- std %arg1, TASK_PT_GR25-TASK_SZ_ALGN(%sp)
- std %arg2, TASK_PT_GR24-TASK_SZ_ALGN(%sp)
- std %arg3, TASK_PT_GR23-TASK_SZ_ALGN(%sp)
+ /* Save the rfi target address */
std %r11, TASK_PT_GR11-TASK_SZ_ALGN(%sp)
/* Set Wide mode as the "Default" (eg for traps)
@@ -157,18 +183,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
- /* restore PDC/PALO parameters */
- ldd TASK_PT_GR26-TASK_SZ_ALGN(%sp), %arg0
- ldd TASK_PT_GR25-TASK_SZ_ALGN(%sp), %arg1
- ldd TASK_PT_GR24-TASK_SZ_ALGN(%sp), %arg2
- ldd TASK_PT_GR23-TASK_SZ_ALGN(%sp), %arg3
+ /* restore rfi target address*/
ldd TASK_PT_GR11-TASK_SZ_ALGN(%sp), %r11
-
- std %r0, TASK_PT_GR26-TASK_SZ_ALGN(%sp)
- std %r0, TASK_PT_GR25-TASK_SZ_ALGN(%sp)
- std %r0, TASK_PT_GR24-TASK_SZ_ALGN(%sp)
- std %r0, TASK_PT_GR23-TASK_SZ_ALGN(%sp)
- std %r0, TASK_PT_GR11-TASK_SZ_ALGN(%sp)
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
--- arch/parisc/kernel/inventory.c.old Thu Mar 1 23:06:24 2001
+++ arch/parisc/kernel/inventory.c Fri Mar 2 03:18:45 2001
@@ -7,7 +7,9 @@
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/pdc.h>
+#include <asm/pdcpat.h>
#include <asm/processor.h>
+#include <asm/page.h>
/*
** Debug options
@@ -15,22 +17,140 @@
*/
#undef DEBUG_PAT
+int pdc_type = PDC_TYPE_ILLEGAL;
+static struct pdc_model model __attribute__ ((aligned(8)));
#ifndef __LP64__
static u8 iodc_data[32 * sizeof(long)] __attribute__ ((aligned(64)));
static struct pdc_memory_map r_addr __attribute__ ((aligned(8)));
-static struct pdc_model model __attribute__ ((aligned(8)));
#endif
static unsigned long pdc_result[32] __attribute__ ((aligned(8)));
-static struct pdc_hpa processor_hpa __attribute__ ((aligned(8)));
static struct pdc_system_map_mod_info module_result __attribute__ ((aligned(8)));
static struct pdc_system_map_addr_info addr_result __attribute__ ((aligned(8)));
static struct pdc_module_path module_path __attribute__ ((aligned(8)));
+void setup_pdc(void)
+{
+ long status;
+ unsigned int bus_id;
+
+ /* Determine the pdc "type" used on this machine */
+
+ printk("Determining PDC firmware type: ");
+
+ status = pdc_system_map_find_mods(&module_result, &module_path, 0);
+ if (status == PDC_RET_OK) {
+ pdc_type = PDC_TYPE_SYSTEM_MAP;
+ printk("Newer Box\n");
+ return;
+ }
+
+ /*
+ * If the machine doesn't support PDC_SYSTEM_MAP then either it
+ * is a pdc pat box, or it is an older box. All 64 bit capable
+ * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
+ */
+
+ /*
+ * TODO: Right now, if you try to boot a 32 bit kernel on a
+ * pdc pat box, you get the "Ancient Box" panic, which isn't very
+ * helpful. We should test for 64 bit capability and give a
+ * clearer message.
+ */
+
#ifdef __LP64__
-#include <asm/pdcpat.h>
+ status = pdc_pat_cell_get_number(&pdc_result);
+ if (status == PDC_RET_OK) {
+ pdc_type = PDC_TYPE_PAT;
+ printk("64 bit PDC PAT Box\n");
+ return;
+ }
+#endif
+
+ /* Here, we're going to check the model, and decide
+ ** if we should even bother trying.
+ */
+
+ status = pdc_model_info(&model);
+
+ bus_id = (model.hversion >> (4 + 7)) & 0x1f;
+
+ /* Here, we're checking the HVERSION of the CPU.
+ ** We're only checking the 0th CPU, since it'll
+ ** be the same on an SMP box.
+ */
+
+ switch (bus_id) {
+ case 0x4: /* 720, 730, 750, 735, 755 */
+ case 0x6: /* 705, 710 */
+ case 0x7: /* 715, 725 */
+ case 0x8: /* 745, 747, 742 */
+ case 0xA: /* 712 and similiar */
+ case 0xC: /* 715/64, at least */
+
+ pdc_type = PDC_TYPE_LEGACY;
+ printk("Older Legacy Box\n");
+ return;
+
+ default: /* Everything else */
+ break;
+
+ }
+
+ printk("Ancient Box (bus_id = 0x%x)\n",bus_id);
+ panic("This system will probably never run Linux.\n");
+}
+
+#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
+
+static void set_pmem_entry(physmem_range_t *pmem_ptr,
+ unsigned long start,
+ unsigned long pages4k)
+{
+ /* Rather than aligning and potentially throwing away
+ * memory, we'll assume that any ranges are already
+ * nicely aligned with any reasonable page size, and
+ * panic if they are not (it's more likely that the
+ * pdc info is bad in this case).
+ */
-int pdc_pat;
+ if ( ((start & (PAGE_SIZE - 1)) != 0)
+ || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) {
+
+ panic("Memory range doesn't align with page size!\n");
+ }
+
+ pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
+ pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
+}
+
+void do_pagezero_memconfig(void)
+{
+ unsigned long npages;
+
+ /* Use the 32 bit information from page zero to create a single
+ * entry in the pmem_ranges[] table.
+ *
+ * We currently don't support machines with contiguous memory
+ * >= 4 Gb, who report that memory using 64 bit only fields
+ * on page zero. It's not worth doing until it can be tested,
+ * and it is not clear we can support those machines for other
+ * reasons.
+ *
+ * If that support is done in the future, this is where it
+ * should be done.
+ */
+
+ npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
+ set_pmem_entry(pmem_ranges,0UL,npages);
+ npmem_ranges = 1;
+}
+
+#ifdef __LP64__
+
+#define do_legacy_inventory() 0
+
+/* All of the PDC PAT specific code goes here */
/*
** The module object is filled via PDC_PAT_CELL[Return Cell Module].
@@ -123,6 +243,80 @@ static int pat_query_module(ulong pcell_
}
+/* pat pdc can return information about a variety of different
+ * types of memory (e.g. firmware,i/o, etc) but we only care about
+ * the usable physical ram right now. Since the firmware specific
+ * information is allocated on the stack, we'll be generous, in
+ * case there is a lot of other information we don't care about.
+ */
+
+#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
+
+static void do_pat_memconfig(void)
+{
+ struct pdc_pat_pd_addr_map_rtn r_addr;
+ struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
+ struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
+ physmem_range_t *pmem_ptr;
+ long status;
+ int entries;
+ unsigned long length;
+ int i;
+
+ length = (unsigned long)(PAT_MAX_RANGES + 1)
+ * sizeof(struct pdc_pat_pd_addr_map_entry);
+
+ status = pdc_pat_pd_get_addr_map(&r_addr,mem_table,length,0L);
+
+ if ((status != PDC_RET_OK)
+ || ((r_addr.actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
+
+ /* The above pdc call shouldn't fail, but, just in
+ * case, just use the PAGE0 info.
+ */
+
+ printk("\n\n\nWARNING! Could not get full memory configuration. All memory may not be used!\n\n\n");
+ do_pagezero_memconfig();
+ return;
+ }
+
+ entries = r_addr.actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
+
+ if (entries > PAT_MAX_RANGES) {
+ printk("This Machine has more memory ranges than we support!\n");
+ printk("Some memory may not be used!\n");
+ }
+
+ /* Copy information into the firmware independent pmem_ranges
+ * array, skipping types we don't care about. Notice we said
+ * "may" above. We'll use all the entries that were returned.
+ */
+
+ npmem_ranges = 0;
+ mtbl_ptr = mem_table;
+ pmem_ptr = pmem_ranges; /* Global firmware independent table */
+ for (i = 0; i < entries; i++,mtbl_ptr++) {
+ if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
+ || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
+ || (mtbl_ptr->pages == 0)
+ || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
+ && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
+ && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
+
+ continue;
+ }
+
+ if (npmem_ranges == MAX_PHYSMEM_RANGES) {
+ printk("This Machine has more memory ranges than we support!\n");
+ printk("Some memory will not be used!\n");
+ break;
+ }
+
+ set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+ npmem_ranges++;
+ }
+}
+
static int do_pat_inventory(void)
{
ulong mod_index = 0;
@@ -134,8 +328,8 @@ static int do_pat_inventory(void)
** Note: Prelude (and it's successors: Lclass, A400/500) only
** implement PDC_PAT_CELL sub-options 0 and 2.
*/
- pdc_pat = (pdc_pat_cell_get_number(&pdc_result) == PDC_OK);
- if (!pdc_pat) {
+ status = pdc_pat_cell_get_number(&pdc_result);
+ if (status != PDC_RET_OK) {
return 0;
}
@@ -152,90 +346,57 @@ static int do_pat_inventory(void)
return mod_index;
}
-#endif /* __LP64__ */
-/* Fixed Physical Address - Location of the Central Bus */
-#define FPA (unsigned long)(signed int)0xFFF80000
+/* We only look for extended memory ranges on a 64 bit capable box */
-/* The fixed portion is contained in hpa[14..19] for 32 bit and hpa[46..51] for 64 bit.
-** The maximum number of native devices is 2^6 (64) and the offset between devices is
-** 2^12 (0x1000).
-** - Ryan
-*/
-#define MAX_NATIVE_DEVICES 64
-#define NATIVE_DEVICE_OFFSET 0x1000
-
-static int do_native_bus_walk(unsigned long hpa)
+static void do_system_map_memconfig(void)
{
- int num = 0;
- struct hp_device *hp_device;
- unsigned long hpa_end =
- hpa + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET);
-
- for (; hpa < hpa_end; hpa += NATIVE_DEVICE_OFFSET) {
- hp_device = alloc_pa_dev(hpa);
- if (!hp_device)
- continue;
-
- register_pa_dev(hp_device);
- ++num;
- }
- return num;
-}
-
-static int do_newer_workstation_inventory(void)
-{
- int i, j, num;
+ struct pdc_memory_table_raddr r_addr;
+ struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
+ struct pdc_memory_table *mtbl_ptr;
+ physmem_range_t *pmem_ptr;
long status;
- struct hp_device *hp_device;
+ int entries;
+ int i;
- /* So the idea here is to simply try one SYSTEM_MAP call. If
- ** that one works, great, otherwise do it another way
- */
- status = pdc_system_map_find_mods(&module_result, &module_path, 0);
- if (status != PDC_RET_OK)
- return 0;
+ status = pdc_mem_mem_table(&r_addr,mem_table,
+ (unsigned long)MAX_PHYSMEM_RANGES);
- /* This is for newer non-PDC-PAT boxes */
- printk("a newer box...\n");
- num = 0;
- for (i = 0; status != PDC_RET_NE_PROC && status != PDC_RET_NE_MOD; ++i) {
+ if (status != PDC_RET_OK) {
- status = pdc_system_map_find_mods(&module_result, &module_path, i);
- if (status != PDC_RET_OK)
- continue;
-
- hp_device = alloc_pa_dev((unsigned long) module_result.mod_addr);
- if (!hp_device)
- continue;
-
- register_pa_dev(hp_device);
- ++num;
+ /* The above pdc call only works on boxes with sprockets
+ * firmware (newer B,C,J class). Other non PAT PDC machines
+ * do support more than 3.75 Gb of memory, but we don't
+ * support them yet.
+ */
- /* if available, get the additional addresses for a module */
- if (!module_result.add_addrs)
- continue;
+ do_pagezero_memconfig();
+ return;
+ }
- for (j = 1; j <= module_result.add_addrs; ++j) {
- status = pdc_system_map_find_addrs(&addr_result, i, j);
- if (status == PDC_RET_OK) {
- add_pa_dev_addr(hp_device, (unsigned long)
- addr_result.mod_addr);
- } else {
- printk("Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
- status, j);
- status = PDC_RET_OK; /* reset status for outer loop */
- }
- }
- } /* end of main loop */
+ if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
+ printk("This Machine has more memory ranges than we support!\n");
+ printk("Some memory will not be used!\n");
+ }
- /* Walk the system bus */
- num += do_native_bus_walk(FPA);
- return (num > 0);
+ entries = (int)r_addr.entries_returned;
+
+ npmem_ranges = 0;
+ mtbl_ptr = mem_table;
+ pmem_ptr = pmem_ranges; /* Global firmware independent table */
+ for (i = 0; i < entries; i++,mtbl_ptr++) {
+ set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+ npmem_ranges++;
+ }
}
+#else /* !__LP64__ */
-#ifndef __LP64__
+#define do_pat_inventory() 0
+#define do_pat_memconfig() 0
+#define do_system_map_memconfig() do_pagezero_memconfig()
+
+/* All of the older legacy box (32 bit only) code goes here */
/* The following checks to see if the system is a 715/old. This might
** sound a bit unusual, but there are some workarounds required for these
@@ -274,8 +435,7 @@ static int check_if_715old(void)
return 0;
}
-
-static int really_do_oldhw_inventory(void)
+int do_legacy_inventory(void)
{
int i, mod, num = 0;
int status;
@@ -419,69 +579,142 @@ static int really_do_oldhw_inventory(voi
return num;
}
+#endif /* !__LP64__ */
+
+/* Common 32/64 bit based code goes here */
+
+/* Fixed Physical Address - Location of the Central Bus */
+#define FPA (unsigned long)(signed int)0xFFF80000
+
+/* The fixed portion is contained in hpa[14..19] for 32 bit and hpa[46..51] for 64 bit.
+** The maximum number of native devices is 2^6 (64) and the offset between devices is
+** 2^12 (0x1000).
+** - Ryan
+*/
+#define MAX_NATIVE_DEVICES 64
+#define NATIVE_DEVICE_OFFSET 0x1000
+
+static int do_native_bus_walk(unsigned long hpa)
+{
+ int num = 0;
+ struct hp_device *hp_device;
+ unsigned long hpa_end =
+ hpa + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET);
+
+ for (; hpa < hpa_end; hpa += NATIVE_DEVICE_OFFSET) {
+ hp_device = alloc_pa_dev(hpa);
+ if (!hp_device)
+ continue;
+
+ register_pa_dev(hp_device);
+ ++num;
+ }
+ return num;
+}
-static int do_old_inventory(void)
+static int do_system_map_inventory(void)
{
- unsigned int bus_id;
+ int i, j, num;
long status;
+ struct hp_device *hp_device;
- printk(" an older box...\n");
-
- /* Here, we're going to check the model, and decide
- ** if we should even bother trying.
+ /* So the idea here is to simply try one SYSTEM_MAP call. If
+ ** that one works, great, otherwise do it another way
*/
+ status = pdc_system_map_find_mods(&module_result, &module_path, 0);
+ if (status != PDC_RET_OK)
+ return 0;
- status = pdc_model_info(&model);
+ num = 0;
+ for (i = 0; status != PDC_RET_NE_PROC && status != PDC_RET_NE_MOD; ++i) {
- bus_id = (model.hversion >> (4 + 7)) & 0x1f;
+ status = pdc_system_map_find_mods(&module_result, &module_path, i);
+ if (status != PDC_RET_OK)
+ continue;
+
+ hp_device = alloc_pa_dev((unsigned long) module_result.mod_addr);
+ if (!hp_device)
+ continue;
+
+ register_pa_dev(hp_device);
+ ++num;
- /* Here, we're checking the HVERSION of the CPU.
- ** We're only checking the 0th CPU, since it'll
- ** be the same on an SMP box.
- */
+ /* if available, get the additional addresses for a module */
+ if (!module_result.add_addrs)
+ continue;
- switch (bus_id) {
- case 0x4: /* 720, 730, 750, 735, 755 */
- case 0x6: /* 705, 710 */
- case 0x7: /* 715, 725 */
- case 0x8: /* 745, 747, 742 */
- case 0xA: /* 712 and similiar */
- case 0xC: /* 715/64, at least */
+ for (j = 1; j <= module_result.add_addrs; ++j) {
+ status = pdc_system_map_find_addrs(&addr_result, i, j);
+ if (status == PDC_RET_OK) {
+ add_pa_dev_addr(hp_device, (unsigned long)
+ addr_result.mod_addr);
+ } else {
+ printk("Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
+ status, j);
+ status = PDC_RET_OK; /* reset status for outer loop */
+ }
+ }
+ } /* end of main loop */
- /* Do inventory using MEM_MAP */
- return really_do_oldhw_inventory();
+ /* Walk the system bus */
+ num += do_native_bus_walk(FPA);
+ return (num > 0);
+}
- default: /* Everything else */
- printk("This is a very very old machine, with a bus_id of 0x%x.\n",
- bus_id);
- panic("This will probably never run Linux.\n");
+void do_memory_inventory(void)
+{
+ switch (pdc_type) {
+
+ case PDC_TYPE_PAT:
+ do_pat_memconfig();
+ break;
+
+ case PDC_TYPE_SYSTEM_MAP:
+ do_system_map_memconfig();
+ break;
+
+ case PDC_TYPE_LEGACY:
+ do_pagezero_memconfig();
+ return;
+
+ default:
+ panic("Unknown pdc_type!\n");
}
- return 0;
+ if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
+ printk("Bad memory configuration returned!\n");
+ printk("Some memory may not be used!\n");
+ do_pagezero_memconfig();
+ }
}
-#endif /* !__LP64__ */
-
-void do_inventory(void)
+void do_device_inventory(void)
{
- if (pdc_hpa_processor(&processor_hpa) < 0) {
- printk(KERN_INFO
- "Couldn't get the HPA of the processor.\n");
- }
+ int num;
- printk("Searching for devices in PDC firmware... ");
- printk("processor hpa 0x%lx\n", processor_hpa.hpa);
+ printk("Searching for devices...\n");
- if (!(do_newer_workstation_inventory()
-#ifdef __LP64__
- || do_pat_inventory()
-#else /* __LP64__ */
- || do_old_inventory()
-#endif /* __LP64__ */
- )) {
+ switch (pdc_type) {
+
+ case PDC_TYPE_PAT:
+ num = do_pat_inventory();
+ break;
+
+ case PDC_TYPE_SYSTEM_MAP:
+ num = do_system_map_inventory();
+ break;
+
+ case PDC_TYPE_LEGACY:
+ num = do_legacy_inventory();
+ break;
+
+ default:
+ panic("Unknown pdc_type!\n");
+ }
+
+ if (!num) {
panic("I can't get the hardware inventory on this machine");
}
print_pa_devices(NULL);
}
-