[parisc-linux-cvs] [PATCH] ccio-dma cleanups and kernel documentation
Ryan Bradetich
rbrad@beavis.ybsoft.com
Tue, 28 Aug 2001 21:35:25 -0600
--gKMricLos+KVdGMg
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
This patch mainly adds kernel documentation to the ccio-dma driver, but
I've also done a few code cleanups I found while auditing the code at
OLS. I wanted to get this patch submitted before it suffers anymore
bit rot.
--
--gKMricLos+KVdGMg
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename="ccio-dma.cleanup.patch"
Index: ccio-dma.c
===================================================================
RCS file: /home/cvs/parisc/linux/arch/parisc/kernel/ccio-dma.c,v
retrieving revision 1.37
diff -u -p -r1.37 ccio-dma.c
--- ccio-dma.c 2001/08/28 08:15:09 1.37
+++ ccio-dma.c 2001/08/29 03:27:25
@@ -62,7 +62,6 @@
#undef DEBUG_CCIO_RUN
#undef DEBUG_CCIO_INIT
#undef DEBUG_CCIO_RUN_SG
-#undef ASSERT_PDIR_SANITY
#include <linux/proc_fs.h>
#include <asm/runway.h> /* for proc_runway_root */
@@ -145,7 +144,6 @@ struct ioc {
struct ioa_registers *ioc_hpa; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
-
u32 res_hint; /* next available IOVP -
circular search */
u32 res_size; /* size of resource map in bytes */
@@ -209,98 +207,24 @@ static int ccio_count;
* match the I/O TLB replacement policy.
*
***************************************************************/
-#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
#define IOVP_MASK PAGE_MASK
/* Convert from IOVP to IOVA and vice versa. */
#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
-#define CCIO_IOVP(iova) ((iova) & ~(IOVP_SIZE-1) )
+#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
-#ifdef ASSERT_PDIR_SANITY
-static void
-ccio_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
-{
- /* start printing from lowest pde in rval */
- u64 *ptr = &(ioc->pdir_base[pide & (~0UL * BITS_PER_LONG)]);
- unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
- uint rcnt;
-
- printk(KERN_DEBUG "ccio: %s rp %p bit %d rval 0x%lx\n",
- msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
-
- rcnt = 0;
- while(rcnt < BITS_PER_LONG) {
- printk(KERN_DEBUG "%s %2d %p %016Lx\n",
- (rcnt == (pide & (BITS_PER_LONG - 1)))
- ? " -->" : " ",
- rcnt, ptr, *ptr );
- rcnt++;
- ptr++;
- }
- printk(KERN_DEBUG "%s", msg);
-}
-
-static int
-ccio_check_pdir(struct ioc *ioc, char *msg)
-{
- int i, j;
- u32 *res_ptr = (u32 *)ioc->res_map;
- u32 *pptr = (u32 *)ioc->pdir_base;
- u32 pmap;
- char buf1[512] = {0};
- char buf2[512] = {0};
-
- ++pptr;
-
- printk(KERN_DEBUG "%s", msg);
- for(i = 0; i < (ioc->res_size / sizeof(u32)); ++i, ++res_ptr) {
- if((i & 15) == 0) {
- printk(KERN_DEBUG "%s\n", buf1);
- buf1[0] = '\0';
- printk(KERN_DEBUG "%s\n", buf2);
- buf2[0] = '\0';
- printk(KERN_DEBUG "\n");
- }
- for(j = 0, pmap = 0; j < 32; ++j, ++pptr, ++pptr) {
- pmap |= (*pptr & 0x1) << (31 - j);
- }
- sprintf(buf1, "%s %08x", buf1, *res_ptr);
- sprintf(buf2, "%s %08x", buf2, pmap);
- }
- printk(KERN_DEBUG "%s\n", buf1);
- printk(KERN_DEBUG "%s\n", buf2);
- printk(KERN_DEBUG "\n");
- return 0;
-}
-
-static void
-ccio_dump_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
-{
- while(nents-- > 0) {
- printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", nents,
- (unsigned long)sg_dma_address(startsg),
- sg_dma_len(startsg),
- startsg->address, startsg->length);
- startsg++;
- }
-}
-
-#endif /* ASSERT_PDIR_SANITY */
-
/*
** Don't worry about the 150% average search length on a miss.
** If the search wraps around, and passes the res_hint, it will
** cause the kernel to panic anyhow.
*/
-
-
#define CCIO_SEARCH_LOOP(ioc, res_idx, mask_ptr, size) \
for(; res_ptr < res_end; ++res_ptr) { \
if(0 == (*res_ptr & *mask_ptr)) { \
@@ -332,12 +256,21 @@ ccio_dump_sg(struct ioc *ioc, struct sca
** o use different search for "large" (eg > 4 pages) or "very large"
** (eg > 16 pages) mappings.
*/
+
+/**
+ * ccio_alloc_range - Allocate pages in the ioc's resource map.
+ * @ioc: The I/O Controller.
+ * @pages_needed: The requested number of pages to be mapped into the
+ * I/O Pdir...
+ *
+ * This function searches the resource map of the ioc to locate a range
+ * of available pages for the requested size.
+ */
static int
-ccio_alloc_range(struct ioc *ioc, size_t size)
+ccio_alloc_range(struct ioc *ioc, unsigned long pages_needed)
{
int res_idx;
- unsigned long pages_needed = (size >> IOVP_SHIFT);
- unsigned long mask = ~0L;
+ unsigned long mask;
#ifdef CONFIG_PROC_FS
unsigned long cr_start = mfctl(16);
#endif
@@ -345,9 +278,8 @@ ccio_alloc_range(struct ioc *ioc, size_t
ASSERT(pages_needed);
ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_needed <= BITS_PER_LONG);
- ASSERT(0 == (size & ~IOVP_MASK));
- mask = ~(mask >> pages_needed);
+ mask = ~(~0UL >> pages_needed);
DBG_RES("%s() size: %d pages_needed %d mask 0x%08lx\n",
__FUNCTION__, size, pages_needed, mask);
@@ -368,13 +300,12 @@ ccio_alloc_range(struct ioc *ioc, size_t
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 64);
#endif
} else {
- panic(__FILE__ ": %s() Too many pages to map. pages_needed: %ld\n", __FUNCTION__, pages_needed);
+ panic(__FILE__ ": %s() Too many pages to map. pages_needed: %ld\n",
+ __FUNCTION__, pages_needed);
}
-#ifdef ASSERT_PDIR_SANITY
- ccio_check_pdir(ioc, "bummer");
-#endif
- panic(__FILE__ ": %s() I/O MMU is out of mapping resources.\n", __FUNCTION__);
+ panic(__FILE__ ": %s() I/O MMU is out of mapping resources.\n",
+ __FUNCTION__);
resource_found:
@@ -406,22 +337,27 @@ resource_found:
ASSERT((*res_ptr & *mask_ptr) == *mask_ptr); \
*res_ptr &= ~(*mask_ptr);
-/*
-** clear bits in the ioa's resource map
-*/
+/**
+ * ccio_free_range - Free pages from the ioc's resource map.
+ * @ioc: The I/O Controller.
+ * @iova: The I/O Virtual Address.
+ * @pages_mapped: The requested number of pages to be freed from the
+ * I/O Pdir.
+ *
+ * This function frees the resouces allocated for the iova.
+ */
static void
-ccio_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
+ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
{
- unsigned long mask = ~0L;
+ unsigned long mask;
unsigned long iovp = CCIO_IOVP(iova);
unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
- unsigned int pages_mapped = (size >> IOVP_SHIFT);
ASSERT(pages_mapped);
ASSERT((pages_mapped * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_mapped <= BITS_PER_LONG);
- mask = ~(mask >> pages_mapped);
+ mask = ~(~0UL >> pages_mapped);
DBG_RES("%s(): res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
__FUNCTION__, res_idx, size, pages_mapped, mask);
@@ -441,7 +377,8 @@ ccio_free_range(struct ioc *ioc, dma_add
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 64);
#endif
} else {
- panic(__FILE__ ":%s() Too many pages to unmap.\n", __FUNCTION__);
+ panic(__FILE__ ":%s() Too many pages to unmap.\n",
+ __FUNCTION__);
}
}
@@ -454,7 +391,6 @@ ccio_free_range(struct ioc *ioc, dma_add
typedef unsigned long space_t;
#define KERNEL_SPACE 0
-
/*
** DMA "Page Type" and Hints
** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
@@ -504,32 +440,35 @@ static u32 hint_lookup[] = {
[PCI_DMA_NONE] 0, /* not valid */
};
-/*
-** Initialize an I/O Pdir entry
-**
-** Given a virtual address (vba, arg2) and space id, (sid, arg1),
-** load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
-** entry consists of 8 bytes as shown below (MSB == bit 0):
-**
-**
-** WORD 0:
-** +------+----------------+-----------------------------------------------+
-** | Phys | Virtual Index | Phys |
-** | 0:3 | 0:11 | 4:19 |
-** |4 bits| 12 bits | 16 bits |
-** +------+----------------+-----------------------------------------------+
-** WORD 1:
-** +-----------------------+-----------------------------------------------+
-** | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
-** | 20:39 | | Enable |Enable | |Enable|DMA | |
-** | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
-** +-----------------------+-----------------------------------------------+
-**
-** The virtual index field is filled with the results of the LCI
-** (Load Coherence Index) instruction. The 8 bits used for the virtual
-** index are bits 12:19 of the value returned by LCI.
-*/
-
+/**
+ * ccio_io_pdir_entry - Initialize an I/O Pdir.
+ * @pdir_ptr: A pointer into I/O Pdir.
+ * @sid: The Space Identifier.
+ * @vba: The virtual address.
+ * @hints: The DMA Hint.
+ *
+ * Given a virtual address (vba, arg2) and space id, (sid, arg1),
+ * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
+ * entry consists of 8 bytes as shown below (MSB == bit 0):
+ *
+ *
+ * WORD 0:
+ * +------+----------------+-----------------------------------------------+
+ * | Phys | Virtual Index | Phys |
+ * | 0:3 | 0:11 | 4:19 |
+ * |4 bits| 12 bits | 16 bits |
+ * +------+----------------+-----------------------------------------------+
+ * WORD 1:
+ * +-----------------------+-----------------------------------------------+
+ * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
+ * | 20:39 | | Enable |Enable | |Enable|DMA | |
+ * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
+ * +-----------------------+-----------------------------------------------+
+ *
+ * The virtual index field is filled with the results of the LCI
+ * (Load Coherence Index) instruction. The 8 bits used for the virtual
+ * index are bits 12:19 of the value returned by LCI.
+ */
void CCIO_INLINE
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints)
{
@@ -593,16 +532,22 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_
asm volatile("sync");
}
-/*
-** Remove stale entries from the I/O TLB.
-** Need to do this whenever an entry in the PDIR is marked invalid.
-*/
+/**
+ * ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
+ * @ioc: The I/O Controller.
+ * @iovp: The I/O Virtual Page.
+ * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
+ *
+ * Purge invalid I/O PDIR entries from the I/O TLB.
+ *
+ * FIXME: Can we change the byte_cnt to pages_mapped?
+ */
static CCIO_INLINE void
ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
{
u32 chain_size = 1 << ioc->chainid_shift;
- iovp &= ~(IOVP_SIZE-1); /* clear offset bits, just want pagenum */
+ iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
byte_cnt += chain_size;
while(byte_cnt > chain_size) {
@@ -612,10 +557,14 @@ ccio_clear_io_tlb(struct ioc *ioc, dma_a
}
}
-/***********************************************************
+/**
+ * ccio_mark_invalid - Mark the I/O Pdir entries invalid.
+ * @ioc: The I/O Controller.
+ * @iova: The I/O Virtual Address.
+ * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
*
- * Mark the I/O Pdir entries invalid and blow away the
- * corresponding I/O TLB entries.
+ * Mark the I/O Pdir entries invalid and blow away the corresponding I/O
+ * TLB entries.
*
* FIXME: at some threshhold it might be "cheaper" to just blow
* away the entire I/O TLB instead of individual entries.
@@ -623,7 +572,9 @@ ccio_clear_io_tlb(struct ioc *ioc, dma_a
* FIXME: Uturn has 256 TLB entries. We don't need to purge every
* PDIR entry - just once for each possible TLB entry.
* (We do need to maker I/O PDIR entries invalid regardless).
- ***********************************************************/
+ *
+ * FIXME: Can we change byte_cnt to pages_mapped?
+ */
static CCIO_INLINE void
ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
@@ -631,7 +582,7 @@ ccio_mark_invalid(struct ioc *ioc, dma_a
size_t saved_byte_cnt;
/* round up to nearest page size */
- saved_byte_cnt = byte_cnt = (byte_cnt + ~IOVP_MASK) & IOVP_MASK;
+ saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE);
while(byte_cnt > 0) {
/* invalidate one page at a time */
@@ -664,6 +615,13 @@ ccio_mark_invalid(struct ioc *ioc, dma_a
**
*****************************************************************/
+/**
+ * ccio_dma_supported - Verify the IOMMU supports the DMA address range.
+ * @dev: The PCI device.
+ * @mask: A bit mask describing the DMA address range of the device.
+ *
+ * This function impliments the pci_dma_supported function.
+ */
static int
ccio_dma_supported(struct pci_dev *dev, dma_addr_t mask)
{
@@ -676,13 +634,18 @@ ccio_dma_supported(struct pci_dev *dev,
dev->dma_mask = mask; /* save it */
/* only support 32-bit devices (ie PCI/GSC) */
- return (int)(mask >= 0xffffffffUL);
+ return (int)(mask == 0xffffffffUL);
}
-
-/*
-** map_single returns a fully formed IOVA
-*/
+/**
+ * ccio_map_single - Map an address range into the IOMMU.
+ * @dev: The PCI device.
+ * @addr: The start address of the DMA region.
+ * @size: The length of the DMA region.
+ * @direction: The direction of the DMA transaction (to/from device).
+ *
+ * This function impliments the pci_map_single function.
+ */
static dma_addr_t
ccio_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
{
@@ -700,8 +663,7 @@ ccio_map_single(struct pci_dev *dev, voi
offset = ((dma_addr_t) addr) & ~IOVP_MASK;
/* round up to nearest IOVP_SIZE */
- size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
-
+ size = ROUNDUP(size + offset, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
@@ -709,7 +671,7 @@ ccio_map_single(struct pci_dev *dev, voi
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
- idx = ccio_alloc_range(ioc, size);
+ idx = ccio_alloc_range(ioc, (size >> IOVP_SHIFT));
iovp = (dma_addr_t)MKIOVP(idx);
pdir_start = &(ioc->pdir_base[idx]);
@@ -721,9 +683,6 @@ ccio_map_single(struct pci_dev *dev, voi
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
hint |= HINT_SAFE_DMA;
- /* round up to nearest IOVP_SIZE */
- size = (size + ~IOVP_MASK) & IOVP_MASK;
-
while(size > 0) {
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
@@ -742,8 +701,18 @@ ccio_map_single(struct pci_dev *dev, voi
return CCIO_IOVA(iovp, offset);
}
+/**
+ * ccio_unmap_single - Unmap an address range from the IOMMU.
+ * @dev: The PCI device.
+ * @addr: The start address of the DMA region.
+ * @size: The length of the DMA region.
+ * @direction: The direction of the DMA transaction (to/from device).
+ *
+ * This function impliments the pci_unmap_single function.
+ */
static void
-ccio_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction)
+ccio_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
+ int direction)
{
#ifdef FIXME
/* Multi-IOC (ie N-class) : need to lookup IOC from dev
@@ -775,10 +744,18 @@ ccio_unmap_single(struct pci_dev *dev, d
#endif
ccio_mark_invalid(ioc, iova, size);
- ccio_free_range(ioc, iova, size);
+ ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
spin_unlock_irqrestore(&ioc->res_lock, flags);
}
+/**
+ * ccio_alloc_consistent - Allocate a consistent DMA mapping.
+ * @dev: The PCI device.
+ * @size: The length of the DMA region.
+ * @dma_handle: The DMA address handed back to the device (not the cpu).
+ *
+ * This function impliments the pci_alloc_consistent function.
+ */
static void *
ccio_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma_handle)
{
@@ -803,11 +780,21 @@ ccio_alloc_consistent(struct pci_dev *de
return ret;
}
+/**
+ * ccio_free_consistent - Free a consistent DMA mapping.
+ * @dev: The PCI device.
+ * @size: The length of the DMA region.
+ * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
+ * @dma_handle: The device address returned from the ccio_alloc_consistent.
+ *
+ * This function impliments the pci_free_consistent function.
+ */
static void
-ccio_free_consistent(struct pci_dev *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+ccio_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
{
ccio_unmap_single(dev, dma_handle, size, 0);
- free_pages((unsigned long)vaddr, get_order(size));
+ free_pages((unsigned long)cpu_addr, get_order(size));
}
/*
@@ -817,8 +804,19 @@ ccio_free_consistent(struct pci_dev *dev
*/
#define PIDE_FLAG 0x80000000UL
+/**
+ * ccio_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
+ * @ioc: The I/O Controller.
+ * @startsg: The scatter/gather list of coalesced chunks.
+ * @nents: The number of entries in the scatter/gather list.
+ * @hint: The DMA Hint.
+ *
+ * This function inserts the coalesced scatter/gather list chunks into the
+ * I/O Controller's I/O Pdir.
+ */
static CCIO_INLINE int
-ccio_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, unsigned long hint)
+ccio_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
+ unsigned long hint)
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
int n_mappings = 0;
@@ -866,7 +864,8 @@ ccio_fill_pdir(struct ioc *ioc, struct s
ioc->msg_pages += cnt >> IOVP_SHIFT;
#endif
do {
- ccio_io_pdir_entry(pdirp, KERNEL_SPACE, (void *)vaddr, hint);
+ ccio_io_pdir_entry(pdirp, KERNEL_SPACE,
+ (void *)vaddr, hint);
vaddr += IOVP_SIZE;
cnt -= IOVP_SIZE;
pdirp++;
@@ -877,17 +876,6 @@ ccio_fill_pdir(struct ioc *ioc, struct s
return(n_mappings);
}
-
-/*
-** Two address ranges are DMA contiguous *iff* "end of prev" and
-** "start of next" are both on a page boundry.
-**
-** (shift left is a quick trick to mask off upper bits)
-*/
-#define DMA_CONTIG(__X, __Y) \
- (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
-
-
/*
** First pass is to walk the SG list and determine where the breaks are
** in the DMA stream. Allocates PDIR entries but does not fill them.
@@ -930,27 +918,16 @@ ccio_coalesce_chunks(struct ioc *ioc, st
unsigned long startsg_end;
startsg++;
- startsg_end = (unsigned long)startsg->address + startsg->length;
+ startsg_end = (unsigned long)startsg->address +
+ startsg->length;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/*
- ** First look for virtually contiguous blocks.
- ** PARISC needs this since it's cache is virtually
- ** indexed and we need the associated virtual
- ** address for each I/O address we map.
- **
- ** 1) Can we prepend the next transaction?
- ** Only if they are on the same page.
- ** And we don't mind DMA order wrong. NOT.
- ** Feasible but requires substantial work.
+ ** Append the next transaction?
*/
-
- /*
- ** 2) or append the next transaction?
- */
if(vcontig_end == (unsigned long) startsg->address) {
vcontig_len += startsg->length;
vcontig_end += startsg->length;
@@ -982,24 +959,29 @@ ccio_coalesce_chunks(struct ioc *ioc, st
** Allocate space for DMA stream.
*/
sg_dma_len(vcontig_sg) = vcontig_len;
- dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK;
+ dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE);
sg_dma_address(dma_sg) =
PIDE_FLAG
- | (ccio_alloc_range(ioc, dma_len) << IOVP_SHIFT)
+ | (ccio_alloc_range(ioc, (dma_len >> IOVP_SHIFT)) << IOVP_SHIFT)
| dma_offset;
n_mappings++;
}
return n_mappings;
}
-
-/*
-** And this algorithm still generally only ends up coalescing entries
-** that happens to be on the same page due to how sglists are assembled.
-*/
+/**
+ * ccio_map_sg - Map the scatter/gather list into the IOMMU.
+ * @dev: The PCI device.
+ * @sglist: The scatter/gather list to be mapped in the IOMMU.
+ * @nents: The number of entries in the scatter/gather list.
+ * @direction: The direction of the DMA transaction (to/from device).
+ *
+ * This function impliments the pci_map_sg function.
+ */
static int
-ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
+ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
+ int direction)
{
struct ioc *ioc = &ccio_list->ioc[0];
int coalesced, filled = 0;
@@ -1011,20 +993,14 @@ ccio_map_sg(struct pci_dev *dev, struct
/* Fast path single entry scatterlists. */
if(nents == 1) {
sg_dma_address(sglist)= ccio_map_single(dev, sglist->address,
- sglist->length, direction);
+ sglist->length,
+ direction);
sg_dma_len(sglist)= sglist->length;
return 1;
}
spin_lock_irqsave(&ioc->res_lock, flags);
-#ifdef ASSERT_PDIR_SANITY
- if(ccio_check_pdir(ioc, "Check before ccio_map_sg()")) {
- ccio_dump_sg(ioc, sglist, nents);
- panic("Check before ccio_map_sg()");
- }
-#endif
-
#ifdef CONFIG_PROC_FS
ioc->msg_calls++;
#endif
@@ -1049,13 +1025,6 @@ ccio_map_sg(struct pci_dev *dev, struct
*/
filled = ccio_fill_pdir(ioc, sglist, nents, hint);
-#ifdef ASSERT_PDIR_SANITY
- if(ccio_check_pdir(ioc, "Check after ccio_map_sg()")) {
- ccio_dump_sg(ioc, sglist, nents);
- panic("Check after ccio_map_sg()\n");
- }
-#endif
-
spin_unlock_irqrestore(&ioc->res_lock, flags);
ASSERT(coalesced == filled);
@@ -1064,13 +1033,21 @@ ccio_map_sg(struct pci_dev *dev, struct
return filled;
}
+/**
+ * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
+ * @dev: The PCI device.
+ * @sglist: The scatter/gather list to be unmapped from the IOMMU.
+ * @nents: The number of entries in the scatter/gather list.
+ * @direction: The direction of the DMA transaction (to/from device).
+ *
+ * This function impliments the pci_unmap_sg function.
+ */
static void
-ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
+ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
+ int direction)
{
- struct ioc *ioc = &ccio_list->ioc[0]; /* FIXME : see Multi-IOC below */
-#ifdef ASSERT_PDIR_SANITY
- unsigned long flags;
-#endif
+ /* FIXME : see Multi-IOC below */
+ struct ioc *ioc = &ccio_list->ioc[0];
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__FUNCTION__, nents, sglist->address, sglist->length);
@@ -1079,28 +1056,17 @@ ccio_unmap_sg(struct pci_dev *dev, struc
ioc->usg_calls++;
#endif
-#ifdef ASSERT_PDIR_SANITY
- spin_lock_irqsave(&ioc->res_lock, flags);
- ccio_check_pdir(ioc,"Check before ccio_unmap_sg()");
- spin_unlock_irqrestore(&ioc->res_lock, flags);
-#endif
-
while(sg_dma_len(sglist) && nents--) {
#ifdef CONFIG_PROC_FS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
- ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
+ ccio_unmap_single(dev, sg_dma_address(sglist),
+ sg_dma_len(sglist), direction);
++sglist;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
-
-#ifdef ASSERT_PDIR_SANITY
- spin_lock_irqsave(&ioc->res_lock, flags);
- ccio_check_pdir(ioc,"Check after ccio_unmap_sg()");
- spin_unlock_irqrestore(&ioc->res_lock, flags);
-#endif
}
static struct pci_dma_ops ccio_ops = {
@@ -1214,8 +1180,6 @@ ccio_resource_map(char *buf, char **star
}
#endif
-
-/* CUJO20 KLUDGE start */
static struct {
u16 hversion;
u8 spa;
@@ -1223,16 +1187,6 @@ static struct {
u32 foo[3]; /* 16 bytes total */
} cujo_iodc;
-/*
-** CUJO 2.0 incorrectly decodes a memory access for specific
-** pages (every page at specific iotlb locations dependent
-** upon where the cujo is flexed - diff on raven/firehawk.
-** resulting in an hpmc and/or silent data corruption.
-** Workaround is to prevent use of those I/O TLB entries
-** by marking the suspect bitmap range entries as busy.
-*/
-
-/* CUJO20 KLUDGE start */
#define CUJO_20_BITMASK 0x0ffff000 /* upper nibble is a don't care */
#define CUJO_20_STEP 0x10000000 /* inc upper nibble */
#define CUJO_20_BADPAGE1 0x01003000 /* pages that hpmc on raven U+ */
@@ -1240,10 +1194,20 @@ static struct {
#define CUJO_20_BADHVERS 0x6821 /* low nibble 1 is cujo rev 2.0 */
#define CUJO_RAVEN_LOC 0xf1000000UL /* cujo location on raven U+ */
#define CUJO_FIREHAWK_LOC 0xf1604000UL /* cujo location on firehawk U+ */
-/* CUJO20 KLUDGE end */
+/**
+ * ccio_cujo20_fixup - Detect and work around a bug in the CUJO 2.0 chip.
+ * @ioc: The I/O Controller.
+ *
+ * CUJO 2.0 incorrectly decodes a memory access for specific
+ * pages (every page at specific I/O TLB locations dependent
+ * upon where the cujo is flexed - diff on raven/firehawk.
+ * resulting in an HPMC and/or silent data corruption.
+ * Workaround is to prevent use of those I/O TLB entries
+ * by marking the suspect bitmap range entries as busy.
+ */
static void
-ccio_cujo20_hack(struct ioc *ioc)
+ccio_cujo20_fixup(struct ioc *ioc)
{
unsigned long bytecnt;
u32 iovp = 0, io_io_low;
@@ -1256,12 +1220,14 @@ ccio_cujo20_hack(struct ioc *ioc)
if(!(CUJO_RAVEN_LOC == io_io_low || CUJO_FIREHAWK_LOC == io_io_low))
return;
- status = pdc_iodc_read(&bytecnt, (void *)CUJO_RAVEN_LOC, 0, &cujo_iodc, 16);
+ status = pdc_iodc_read(&bytecnt, (void *)CUJO_RAVEN_LOC, 0,
+ &cujo_iodc, 16);
if(0 == status) {
if(CUJO_20_BADHVERS == cujo_iodc.hversion)
iovp = CUJO_20_BADPAGE1;
} else {
- status = pdc_iodc_read(&bytecnt, (void *)CUJO_FIREHAWK_LOC, 0, &cujo_iodc, 16);
+ status = pdc_iodc_read(&bytecnt, (void *)CUJO_FIREHAWK_LOC, 0,
+ &cujo_iodc, 16);
if(0 == status) {
if(CUJO_20_BADHVERS == cujo_iodc.hversion)
iovp = CUJO_20_BADPAGE2;
@@ -1278,7 +1244,7 @@ ccio_cujo20_hack(struct ioc *ioc)
/*
** mark bit entries that match "bad page"
*/
- idx = PDIR_INDEX(iovp)>>3;
+ idx = PDIR_INDEX(iovp) >> 3;
mask = 0xff;
while(idx * sizeof(u8) < ioc->res_size) {
@@ -1286,7 +1252,6 @@ ccio_cujo20_hack(struct ioc *ioc)
idx += (PDIR_INDEX(CUJO_20_STEP)>>3);
}
}
-/* CUJO20 KLUDGE end */
#if 0
/* GRANT - is this needed for U2 or not? */
@@ -1316,6 +1281,14 @@ ccio_get_iotlb_size(struct parisc_device
#define CCIO_CHAINID_MASK 0xff
#endif /* 0 */
+/**
+ * ccio_ioc_init - Initalize the I/O Controller
+ * @ioc: The I/O Controller.
+ *
+ * Initalize the I/O Controller which includes setting up the
+ * I/O Page Directory, the resource map, and initalizing the
+ * U2/Uturn chip into virtual mode.
+ */
static void
ccio_ioc_init(struct ioc *ioc)
{
@@ -1376,7 +1349,8 @@ ccio_ioc_init(struct ioc *ioc)
__FUNCTION__, ioc->ioc_hpa, physmem>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT, ioc->pdir_size);
- ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, get_order(ioc->pdir_size));
+ ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
+ get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
}
@@ -1389,7 +1363,8 @@ ccio_ioc_init(struct ioc *ioc)
ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
- ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL, get_order(ioc->res_size));
+ ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
+ get_order(ioc->res_size));
if(NULL == ioc->res_map) {
panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__);
}
@@ -1436,18 +1411,20 @@ ccio_ioc_init(struct ioc *ioc)
/*
** See if we need the cujo 2.0 work around.
*/
- /* CUJO20 KLUDGE start */
- ccio_cujo20_hack(ioc);
- /* CUJO20 KLUDGE end */
+ ccio_cujo20_fixup(ioc);
}
int do_native_bus_walk(unsigned long io_io_low);
-/*
-** Determine if ccio should claim this chip (return 0) or not (return 1).
-** If so, initialize the chip and tell other partners in crime they
-** have work to do.
-*/
+/**
+ * ccio_driver_callback - Determine if ccio should claim this device.
+ * @d: The device.
+ * @dri: The IODC information about the device.
+ *
+ * Determine if ccio should claim this chip (return 0) or not (return 1).
+ * If so, initialize the chip and tell other partners in crime they
+ * have work to do.
+ */
static int
ccio_driver_callback(struct parisc_device *dev)
{
@@ -1505,11 +1482,10 @@ ccio_driver_callback(struct parisc_devic
io_io_low = READ_U32(&ioa->ioc[i].ioc_hpa->io_io_low) << 16;
io_io_high = READ_U32(&ioa->ioc[i].ioc_hpa->io_io_high) << 16;
- printk(KERN_INFO "io_io_high: 0x%08lx io_io_low: 0x%08lx\n", io_io_low, io_io_high);
+ printk(KERN_INFO "io_io_high: 0x%08lx io_io_low: 0x%08lx\n", io_io_high, io_io_low);
start_index = get_num_pa_dev();
num_devices = do_native_bus_walk(io_io_low);
print_pa_devices(start_index, num_devices);
-
return 0;
}
@@ -1526,6 +1502,11 @@ static struct parisc_driver ccio_driver
probe: ccio_driver_callback,
};
+/**
+ * ccio_init - ccio initalization procedure.
+ *
+ * Register this driver.
+ */
void __init ccio_init(void)
{
register_parisc_driver(&ccio_driver);
--gKMricLos+KVdGMg--