2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002-2004 Alex Williamson
5 ** (c) Copyright 2002-2003 Grant Grundler
6 ** (c) Copyright 2002-2004 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/acpi.h>
35 #include <linux/efi.h>
36 #include <linux/nodemask.h>
37 #include <linux/bitops.h> /* hweight64() */
39 #include <asm/delay.h> /* ia64_get_itc() */
41 #include <asm/page.h> /* PAGE_OFFSET */
43 #include <asm/system.h> /* wmb() */
45 #include <asm/acpi-ext.h>
50 ** Enabling timing search of the pdir resource map. Output in /proc.
51 ** Disabled by default to optimize performance.
53 #undef PDIR_SEARCH_TIMING
56 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
57 ** not defined, all DMA will be 32bit and go through the TLB.
58 ** There's potentially a conflict in the bio merge code with us
59 ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
60 ** appears to give more performance than bio-level virtual merging, we'll
61 ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
62 ** completely restrict DMA to the IOMMU.
64 #define ALLOW_IOV_BYPASS
67 ** This option specifically allows/disallows bypassing scatterlists with
68 ** multiple entries. Coalescing these entries can allow better DMA streaming
69 ** and in some cases shows better performance than entirely bypassing the
70 ** IOMMU. Performance increase on the order of 1-2% sequential output/input
71 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
73 #undef ALLOW_IOV_BYPASS_SG
76 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
77 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
78 ** disconnect on 4k boundaries and prevent such issues. If the device is
79 ** particularly agressive, this option will keep the entire pdir valid such
80 ** that prefetching will hit a valid address. This could severely impact
81 ** error containment, and is therefore off by default. The page that is
82 ** used for spill-over is poisoned, so that should help debugging somewhat.
84 #undef FULL_VALID_PDIR
86 #define ENABLE_MARK_CLEAN
89 ** The number of debug flags is a clue - this code is fragile. NOTE: since
90 ** tightening the use of res_lock the resource bitmap and actual pdir are no
91 ** longer guaranteed to stay in sync. The sanity checking code isn't going to
96 #undef DEBUG_SBA_RUN_SG
97 #undef DEBUG_SBA_RESOURCE
98 #undef ASSERT_PDIR_SANITY
99 #undef DEBUG_LARGE_SG_ENTRIES
102 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
103 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
106 #define SBA_INLINE __inline__
107 /* #define SBA_INLINE */
109 #ifdef DEBUG_SBA_INIT
110 #define DBG_INIT(x...) printk(x)
112 #define DBG_INIT(x...)
116 #define DBG_RUN(x...) printk(x)
118 #define DBG_RUN(x...)
121 #ifdef DEBUG_SBA_RUN_SG
122 #define DBG_RUN_SG(x...) printk(x)
124 #define DBG_RUN_SG(x...)
128 #ifdef DEBUG_SBA_RESOURCE
129 #define DBG_RES(x...) printk(x)
131 #define DBG_RES(x...)
135 #define DBG_BYPASS(x...) printk(x)
137 #define DBG_BYPASS(x...)
140 #ifdef ASSERT_PDIR_SANITY
141 #define ASSERT(expr) \
143 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
151 ** The number of pdir entries to "free" before issuing
152 ** a read to PCOM register to flush out PCOM writes.
153 ** Interacts with allocation granularity (ie 4 or 8 entries
154 ** allocated and free'd/purged at a time might make this
155 ** less interesting).
157 #define DELAYED_RESOURCE_CNT 64
159 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
160 #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
161 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
162 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
164 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
166 #define IOC_FUNC_ID 0x000
167 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
168 #define IOC_IBASE 0x300 /* IO TLB */
169 #define IOC_IMASK 0x308
170 #define IOC_PCOM 0x310
171 #define IOC_TCNFG 0x318
172 #define IOC_PDIR_BASE 0x320
174 #define IOC_ROPE0_CFG 0x500
175 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
178 /* AGP GART driver looks for this */
179 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
182 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
184 ** Some IOCs (sx1000) can run at the above pages sizes, but are
185 ** really only supported using the IOC at a 4k page size.
187 ** iovp_size could only be greater than PAGE_SIZE if we are
188 ** confident the drivers really only touch the next physical
189 ** page iff that driver instance owns it.
191 static unsigned long iovp_size
;
192 static unsigned long iovp_shift
;
193 static unsigned long iovp_mask
;
196 void __iomem
*ioc_hpa
; /* I/O MMU base address */
197 char *res_map
; /* resource map, bit == pdir entry */
198 u64
*pdir_base
; /* physical base address */
199 unsigned long ibase
; /* pdir IOV Space base */
200 unsigned long imask
; /* pdir IOV Space mask */
202 unsigned long *res_hint
; /* next avail IOVP - circular search */
203 unsigned long dma_mask
;
204 spinlock_t res_lock
; /* protects the resource bitmap, but must be held when */
205 /* clearing pdir to prevent races with allocations. */
206 unsigned int res_bitshift
; /* from the RIGHT! */
207 unsigned int res_size
; /* size of resource map in bytes */
209 unsigned int node
; /* node where this IOC lives */
211 #if DELAYED_RESOURCE_CNT > 0
212 spinlock_t saved_lock
; /* may want to try to get this on a separate cacheline */
213 /* than res_lock for bigger systems. */
215 struct sba_dma_pair
{
218 } saved
[DELAYED_RESOURCE_CNT
];
221 #ifdef PDIR_SEARCH_TIMING
222 #define SBA_SEARCH_SAMPLE 0x100
223 unsigned long avg_search
[SBA_SEARCH_SAMPLE
];
224 unsigned long avg_idx
; /* current index into avg_search */
227 /* Stuff we don't need in performance path */
228 struct ioc
*next
; /* list of IOC's in system */
229 acpi_handle handle
; /* for multiple IOC's */
231 unsigned int func_id
;
232 unsigned int rev
; /* HW revision of chip */
234 unsigned int pdir_size
; /* in bytes, determined by IOV Space size */
235 struct pci_dev
*sac_only_dev
;
238 static struct ioc
*ioc_list
;
239 static int reserve_sba_gart
= 1;
241 static SBA_INLINE
void sba_mark_invalid(struct ioc
*, dma_addr_t
, size_t);
242 static SBA_INLINE
void sba_free_range(struct ioc
*, dma_addr_t
, size_t);
244 #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
246 #ifdef FULL_VALID_PDIR
247 static u64 prefetch_spill_page
;
251 # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
252 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
254 # define GET_IOC(dev) NULL
258 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
259 ** (or rather not merge) DMA's into managable chunks.
260 ** On parisc, this is more of the software/tuning constraint
261 ** rather than the HW. I/O MMU allocation alogorithms can be
262 ** faster with smaller size is (to some degree).
264 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
266 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
268 /************************************
269 ** SBA register read and write support
271 ** BE WARNED: register writes are posted.
272 ** (ie follow writes which must reach HW with a read)
275 #define READ_REG(addr) __raw_readq(addr)
276 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
278 #ifdef DEBUG_SBA_INIT
281 * sba_dump_tlb - debugging only - print IOMMU operating parameters
282 * @hpa: base address of the IOMMU
284 * Print the size/location of the IO MMU PDIR.
287 sba_dump_tlb(char *hpa
)
289 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa
);
290 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa
+IOC_IBASE
));
291 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa
+IOC_IMASK
));
292 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa
+IOC_TCNFG
));
293 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa
+IOC_PDIR_BASE
));
299 #ifdef ASSERT_PDIR_SANITY
302 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
303 * @ioc: IO MMU structure which owns the pdir we are interested in.
304 * @msg: text to print ont the output line.
307 * Print one entry of the IO MMU PDIR in human readable form.
310 sba_dump_pdir_entry(struct ioc
*ioc
, char *msg
, uint pide
)
312 /* start printing from lowest pde in rval */
313 u64
*ptr
= &ioc
->pdir_base
[pide
& ~(BITS_PER_LONG
- 1)];
314 unsigned long *rptr
= (unsigned long *) &ioc
->res_map
[(pide
>>3) & -sizeof(unsigned long)];
317 printk(KERN_DEBUG
"SBA: %s rp %p bit %d rval 0x%lx\n",
318 msg
, rptr
, pide
& (BITS_PER_LONG
- 1), *rptr
);
321 while (rcnt
< BITS_PER_LONG
) {
322 printk(KERN_DEBUG
"%s %2d %p %016Lx\n",
323 (rcnt
== (pide
& (BITS_PER_LONG
- 1)))
325 rcnt
, ptr
, (unsigned long long) *ptr
);
329 printk(KERN_DEBUG
"%s", msg
);
334 * sba_check_pdir - debugging only - consistency checker
335 * @ioc: IO MMU structure which owns the pdir we are interested in.
336 * @msg: text to print ont the output line.
338 * Verify the resource map and pdir state is consistent
341 sba_check_pdir(struct ioc
*ioc
, char *msg
)
343 u64
*rptr_end
= (u64
*) &(ioc
->res_map
[ioc
->res_size
]);
344 u64
*rptr
= (u64
*) ioc
->res_map
; /* resource map ptr */
345 u64
*pptr
= ioc
->pdir_base
; /* pdir ptr */
348 while (rptr
< rptr_end
) {
350 int rcnt
; /* number of bits we might check */
356 /* Get last byte and highest bit from that */
357 u32 pde
= ((u32
)((*pptr
>> (63)) & 0x1));
358 if ((rval
& 0x1) ^ pde
)
361 ** BUMMER! -- res_map != pdir --
362 ** Dump rval and matching pdir entries
364 sba_dump_pdir_entry(ioc
, msg
, pide
);
368 rval
>>= 1; /* try the next bit */
372 rptr
++; /* look at next word of res_map */
374 /* It'd be nice if we always got here :^) */
380 * sba_dump_sg - debugging only - print Scatter-Gather list
381 * @ioc: IO MMU structure which owns the pdir we are interested in.
382 * @startsg: head of the SG list
383 * @nents: number of entries in SG list
385 * print the SG list so we can verify it's correct by hand.
388 sba_dump_sg( struct ioc
*ioc
, struct scatterlist
*startsg
, int nents
)
390 while (nents
-- > 0) {
391 printk(KERN_DEBUG
" %d : DMA %08lx/%05x CPU %p\n", nents
,
392 startsg
->dma_address
, startsg
->dma_length
,
393 sba_sg_address(startsg
));
399 sba_check_sg( struct ioc
*ioc
, struct scatterlist
*startsg
, int nents
)
401 struct scatterlist
*the_sg
= startsg
;
402 int the_nents
= nents
;
404 while (the_nents
-- > 0) {
405 if (sba_sg_address(the_sg
) == 0x0UL
)
406 sba_dump_sg(NULL
, startsg
, nents
);
411 #endif /* ASSERT_PDIR_SANITY */
416 /**************************************************************
418 * I/O Pdir Resource Management
420 * Bits set in the resource map are in use.
421 * Each bit can represent a number of pages.
422 * LSbs represent lower addresses (IOVA's).
424 ***************************************************************/
425 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
427 /* Convert from IOVP to IOVA and vice versa. */
428 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
429 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
431 #define PDIR_ENTRY_SIZE sizeof(u64)
433 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
435 #define RESMAP_MASK(n) ~(~0UL << (n))
436 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
440 * For most cases the normal get_order is sufficient, however it limits us
441 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
442 * It only incurs about 1 clock cycle to use this one with the static variable
443 * and makes the code more intuitive.
445 static SBA_INLINE
int
446 get_iovp_order (unsigned long size
)
448 long double d
= size
- 1;
451 order
= ia64_getf_exp(d
);
452 order
= order
- iovp_shift
- 0xffff + 1;
459 * sba_search_bitmap - find free space in IO PDIR resource bitmap
460 * @ioc: IO MMU structure which owns the pdir we are interested in.
461 * @bits_wanted: number of entries we need.
463 * Find consecutive free bits in resource bitmap.
464 * Each bit represents one entry in the IO Pdir.
465 * Cool perf optimization: search for log2(size) bits at a time.
467 static SBA_INLINE
unsigned long
468 sba_search_bitmap(struct ioc
*ioc
, unsigned long bits_wanted
)
470 unsigned long *res_ptr
= ioc
->res_hint
;
471 unsigned long *res_end
= (unsigned long *) &(ioc
->res_map
[ioc
->res_size
]);
472 unsigned long pide
= ~0UL;
474 ASSERT(((unsigned long) ioc
->res_hint
& (sizeof(unsigned long) - 1UL)) == 0);
475 ASSERT(res_ptr
< res_end
);
478 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
479 * if a TLB entry is purged while in use. sba_mark_invalid()
480 * purges IOTLB entries in power-of-two sizes, so we also
481 * allocate IOVA space in power-of-two sizes.
483 bits_wanted
= 1UL << get_iovp_order(bits_wanted
<< iovp_shift
);
485 if (likely(bits_wanted
== 1)) {
486 unsigned int bitshiftcnt
;
487 for(; res_ptr
< res_end
; res_ptr
++) {
488 if (likely(*res_ptr
!= ~0UL)) {
489 bitshiftcnt
= ffz(*res_ptr
);
490 *res_ptr
|= (1UL << bitshiftcnt
);
491 pide
= ((unsigned long)res_ptr
- (unsigned long)ioc
->res_map
);
492 pide
<<= 3; /* convert to bit address */
494 ioc
->res_bitshift
= bitshiftcnt
+ bits_wanted
;
502 if (likely(bits_wanted
<= BITS_PER_LONG
/2)) {
504 ** Search the resource bit map on well-aligned values.
505 ** "o" is the alignment.
506 ** We need the alignment to invalidate I/O TLB using
507 ** SBA HW features in the unmap path.
509 unsigned long o
= 1 << get_iovp_order(bits_wanted
<< iovp_shift
);
510 uint bitshiftcnt
= ROUNDUP(ioc
->res_bitshift
, o
);
511 unsigned long mask
, base_mask
;
513 base_mask
= RESMAP_MASK(bits_wanted
);
514 mask
= base_mask
<< bitshiftcnt
;
516 DBG_RES("%s() o %ld %p", __FUNCTION__
, o
, res_ptr
);
517 for(; res_ptr
< res_end
; res_ptr
++)
519 DBG_RES(" %p %lx %lx\n", res_ptr
, mask
, *res_ptr
);
521 for (; mask
; mask
<<= o
, bitshiftcnt
+= o
) {
522 if(0 == ((*res_ptr
) & mask
)) {
523 *res_ptr
|= mask
; /* mark resources busy! */
524 pide
= ((unsigned long)res_ptr
- (unsigned long)ioc
->res_map
);
525 pide
<<= 3; /* convert to bit address */
527 ioc
->res_bitshift
= bitshiftcnt
+ bits_wanted
;
541 qwords
= bits_wanted
>> 6; /* /64 */
542 bits
= bits_wanted
- (qwords
* BITS_PER_LONG
);
544 end
= res_end
- qwords
;
546 for (; res_ptr
< end
; res_ptr
++) {
547 for (i
= 0 ; i
< qwords
; i
++) {
551 if (bits
&& res_ptr
[i
] && (__ffs(res_ptr
[i
]) < bits
))
554 /* Found it, mark it */
555 for (i
= 0 ; i
< qwords
; i
++)
557 res_ptr
[i
] |= RESMAP_MASK(bits
);
559 pide
= ((unsigned long)res_ptr
- (unsigned long)ioc
->res_map
);
560 pide
<<= 3; /* convert to bit address */
562 ioc
->res_bitshift
= bits
;
570 prefetch(ioc
->res_map
);
571 ioc
->res_hint
= (unsigned long *) ioc
->res_map
;
572 ioc
->res_bitshift
= 0;
576 ioc
->res_hint
= res_ptr
;
582 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
583 * @ioc: IO MMU structure which owns the pdir we are interested in.
584 * @size: number of bytes to create a mapping for
586 * Given a size, find consecutive unmarked and then mark those bits in the
590 sba_alloc_range(struct ioc
*ioc
, size_t size
)
592 unsigned int pages_needed
= size
>> iovp_shift
;
593 #ifdef PDIR_SEARCH_TIMING
594 unsigned long itc_start
;
599 ASSERT(pages_needed
);
600 ASSERT(0 == (size
& ~iovp_mask
));
602 spin_lock_irqsave(&ioc
->res_lock
, flags
);
604 #ifdef PDIR_SEARCH_TIMING
605 itc_start
= ia64_get_itc();
608 ** "seek and ye shall find"...praying never hurts either...
610 pide
= sba_search_bitmap(ioc
, pages_needed
);
611 if (unlikely(pide
>= (ioc
->res_size
<< 3))) {
612 pide
= sba_search_bitmap(ioc
, pages_needed
);
613 if (unlikely(pide
>= (ioc
->res_size
<< 3))) {
614 #if DELAYED_RESOURCE_CNT > 0
616 ** With delayed resource freeing, we can give this one more shot. We're
617 ** getting close to being in trouble here, so do what we can to make this
620 spin_lock(&ioc
->saved_lock
);
621 if (ioc
->saved_cnt
> 0) {
622 struct sba_dma_pair
*d
;
623 int cnt
= ioc
->saved_cnt
;
625 d
= &(ioc
->saved
[ioc
->saved_cnt
]);
628 sba_mark_invalid(ioc
, d
->iova
, d
->size
);
629 sba_free_range(ioc
, d
->iova
, d
->size
);
633 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
635 spin_unlock(&ioc
->saved_lock
);
637 pide
= sba_search_bitmap(ioc
, pages_needed
);
638 if (unlikely(pide
>= (ioc
->res_size
<< 3)))
639 panic(__FILE__
": I/O MMU @ %p is out of mapping resources\n",
642 panic(__FILE__
": I/O MMU @ %p is out of mapping resources\n",
648 #ifdef PDIR_SEARCH_TIMING
649 ioc
->avg_search
[ioc
->avg_idx
++] = (ia64_get_itc() - itc_start
) / pages_needed
;
650 ioc
->avg_idx
&= SBA_SEARCH_SAMPLE
- 1;
653 prefetchw(&(ioc
->pdir_base
[pide
]));
655 #ifdef ASSERT_PDIR_SANITY
656 /* verify the first enable bit is clear */
657 if(0x00 != ((u8
*) ioc
->pdir_base
)[pide
*PDIR_ENTRY_SIZE
+ 7]) {
658 sba_dump_pdir_entry(ioc
, "sba_search_bitmap() botched it?", pide
);
662 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
663 __FUNCTION__
, size
, pages_needed
, pide
,
664 (uint
) ((unsigned long) ioc
->res_hint
- (unsigned long) ioc
->res_map
),
667 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
674 * sba_free_range - unmark bits in IO PDIR resource bitmap
675 * @ioc: IO MMU structure which owns the pdir we are interested in.
676 * @iova: IO virtual address which was previously allocated.
677 * @size: number of bytes to create a mapping for
679 * clear bits in the ioc's resource map
681 static SBA_INLINE
void
682 sba_free_range(struct ioc
*ioc
, dma_addr_t iova
, size_t size
)
684 unsigned long iovp
= SBA_IOVP(ioc
, iova
);
685 unsigned int pide
= PDIR_INDEX(iovp
);
686 unsigned int ridx
= pide
>> 3; /* convert bit to byte address */
687 unsigned long *res_ptr
= (unsigned long *) &((ioc
)->res_map
[ridx
& ~RESMAP_IDX_MASK
]);
688 int bits_not_wanted
= size
>> iovp_shift
;
691 /* Round up to power-of-two size: see AR2305 note above */
692 bits_not_wanted
= 1UL << get_iovp_order(bits_not_wanted
<< iovp_shift
);
693 for (; bits_not_wanted
> 0 ; res_ptr
++) {
695 if (unlikely(bits_not_wanted
> BITS_PER_LONG
)) {
697 /* these mappings start 64bit aligned */
699 bits_not_wanted
-= BITS_PER_LONG
;
700 pide
+= BITS_PER_LONG
;
704 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
705 m
= RESMAP_MASK(bits_not_wanted
) << (pide
& (BITS_PER_LONG
- 1));
708 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__
, (uint
) iova
, size
,
709 bits_not_wanted
, m
, pide
, res_ptr
, *res_ptr
);
712 ASSERT(bits_not_wanted
);
713 ASSERT((*res_ptr
& m
) == m
); /* verify same bits are set */
720 /**************************************************************
722 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
724 ***************************************************************/
727 * sba_io_pdir_entry - fill in one IO PDIR entry
728 * @pdir_ptr: pointer to IO PDIR entry
729 * @vba: Virtual CPU address of buffer to map
731 * SBA Mapping Routine
733 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
734 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
735 * Each IO Pdir entry consists of 8 bytes as shown below
739 * +-+---------------------+----------------------------------+----+--------+
740 * |V| U | PPN[39:12] | U | FF |
741 * +-+---------------------+----------------------------------+----+--------+
745 * PPN == Physical Page Number
747 * The physical address fields are filled with the results of virt_to_phys()
752 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
753 | 0x8000000000000000ULL)
756 sba_io_pdir_entry(u64
*pdir_ptr
, unsigned long vba
)
758 *pdir_ptr
= ((vba
& ~0xE000000000000FFFULL
) | 0x80000000000000FFULL
);
762 #ifdef ENABLE_MARK_CLEAN
764 * Since DMA is i-cache coherent, any (complete) pages that were written via
765 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
766 * flush them when they get mapped into an executable vm-area.
769 mark_clean (void *addr
, size_t size
)
771 unsigned long pg_addr
, end
;
773 pg_addr
= PAGE_ALIGN((unsigned long) addr
);
774 end
= (unsigned long) addr
+ size
;
775 while (pg_addr
+ PAGE_SIZE
<= end
) {
776 struct page
*page
= virt_to_page((void *)pg_addr
);
777 set_bit(PG_arch_1
, &page
->flags
);
778 pg_addr
+= PAGE_SIZE
;
784 * sba_mark_invalid - invalidate one or more IO PDIR entries
785 * @ioc: IO MMU structure which owns the pdir we are interested in.
786 * @iova: IO Virtual Address mapped earlier
787 * @byte_cnt: number of bytes this mapping covers.
789 * Marking the IO PDIR entry(ies) as Invalid and invalidate
790 * corresponding IO TLB entry. The PCOM (Purge Command Register)
791 * is to purge stale entries in the IO TLB when unmapping entries.
793 * The PCOM register supports purging of multiple pages, with a minium
794 * of 1 page and a maximum of 2GB. Hardware requires the address be
795 * aligned to the size of the range being purged. The size of the range
796 * must be a power of 2. The "Cool perf optimization" in the
797 * allocation routine helps keep that true.
799 static SBA_INLINE
void
800 sba_mark_invalid(struct ioc
*ioc
, dma_addr_t iova
, size_t byte_cnt
)
802 u32 iovp
= (u32
) SBA_IOVP(ioc
,iova
);
804 int off
= PDIR_INDEX(iovp
);
806 /* Must be non-zero and rounded up */
807 ASSERT(byte_cnt
> 0);
808 ASSERT(0 == (byte_cnt
& ~iovp_mask
));
810 #ifdef ASSERT_PDIR_SANITY
811 /* Assert first pdir entry is set */
812 if (!(ioc
->pdir_base
[off
] >> 60)) {
813 sba_dump_pdir_entry(ioc
,"sba_mark_invalid()", PDIR_INDEX(iovp
));
817 if (byte_cnt
<= iovp_size
)
819 ASSERT(off
< ioc
->pdir_size
);
821 iovp
|= iovp_shift
; /* set "size" field for PCOM */
823 #ifndef FULL_VALID_PDIR
825 ** clear I/O PDIR entry "valid" bit
826 ** Do NOT clear the rest - save it for debugging.
827 ** We should only clear bits that have previously
830 ioc
->pdir_base
[off
] &= ~(0x80000000000000FFULL
);
833 ** If we want to maintain the PDIR as valid, put in
834 ** the spill page so devices prefetching won't
835 ** cause a hard fail.
837 ioc
->pdir_base
[off
] = (0x80000000000000FFULL
| prefetch_spill_page
);
840 u32 t
= get_iovp_order(byte_cnt
) + iovp_shift
;
843 ASSERT(t
<= 31); /* 2GB! Max value of "size" field */
846 /* verify this pdir entry is enabled */
847 ASSERT(ioc
->pdir_base
[off
] >> 63);
848 #ifndef FULL_VALID_PDIR
849 /* clear I/O Pdir entry "valid" bit first */
850 ioc
->pdir_base
[off
] &= ~(0x80000000000000FFULL
);
852 ioc
->pdir_base
[off
] = (0x80000000000000FFULL
| prefetch_spill_page
);
855 byte_cnt
-= iovp_size
;
856 } while (byte_cnt
> 0);
859 WRITE_REG(iovp
| ioc
->ibase
, ioc
->ioc_hpa
+IOC_PCOM
);
863 * sba_map_single - map one buffer and return IOVA for DMA
864 * @dev: instance of PCI owned by the driver that's asking.
865 * @addr: driver buffer to map.
866 * @size: number of bytes to map in driver buffer.
869 * See Documentation/DMA-mapping.txt
872 sba_map_single(struct device
*dev
, void *addr
, size_t size
, int dir
)
879 #ifdef ASSERT_PDIR_SANITY
882 #ifdef ALLOW_IOV_BYPASS
883 unsigned long pci_addr
= virt_to_phys(addr
);
886 #ifdef ALLOW_IOV_BYPASS
887 ASSERT(to_pci_dev(dev
)->dma_mask
);
889 ** Check if the PCI device can DMA to ptr... if so, just return ptr
891 if (likely((pci_addr
& ~to_pci_dev(dev
)->dma_mask
) == 0)) {
893 ** Device is bit capable of DMA'ing to the buffer...
894 ** just return the PCI address of ptr
896 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
897 to_pci_dev(dev
)->dma_mask
, pci_addr
);
904 prefetch(ioc
->res_hint
);
907 ASSERT(size
<= DMA_CHUNK_SIZE
);
909 /* save offset bits */
910 offset
= ((dma_addr_t
) (long) addr
) & ~iovp_mask
;
912 /* round up to nearest iovp_size */
913 size
= (size
+ offset
+ ~iovp_mask
) & iovp_mask
;
915 #ifdef ASSERT_PDIR_SANITY
916 spin_lock_irqsave(&ioc
->res_lock
, flags
);
917 if (sba_check_pdir(ioc
,"Check before sba_map_single()"))
918 panic("Sanity check failed");
919 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
922 pide
= sba_alloc_range(ioc
, size
);
924 iovp
= (dma_addr_t
) pide
<< iovp_shift
;
926 DBG_RUN("%s() 0x%p -> 0x%lx\n",
927 __FUNCTION__
, addr
, (long) iovp
| offset
);
929 pdir_start
= &(ioc
->pdir_base
[pide
]);
932 ASSERT(((u8
*)pdir_start
)[7] == 0); /* verify availability */
933 sba_io_pdir_entry(pdir_start
, (unsigned long) addr
);
935 DBG_RUN(" pdir 0x%p %lx\n", pdir_start
, *pdir_start
);
941 /* force pdir update */
944 /* form complete address */
945 #ifdef ASSERT_PDIR_SANITY
946 spin_lock_irqsave(&ioc
->res_lock
, flags
);
947 sba_check_pdir(ioc
,"Check after sba_map_single()");
948 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
950 return SBA_IOVA(ioc
, iovp
, offset
);
954 * sba_unmap_single - unmap one IOVA and free resources
955 * @dev: instance of PCI owned by the driver that's asking.
956 * @iova: IOVA of driver buffer previously mapped.
957 * @size: number of bytes mapped in driver buffer.
960 * See Documentation/DMA-mapping.txt
962 void sba_unmap_single(struct device
*dev
, dma_addr_t iova
, size_t size
, int dir
)
965 #if DELAYED_RESOURCE_CNT > 0
966 struct sba_dma_pair
*d
;
974 #ifdef ALLOW_IOV_BYPASS
975 if (likely((iova
& ioc
->imask
) != ioc
->ibase
)) {
977 ** Address does not fall w/in IOVA, must be bypassing
979 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova
);
981 #ifdef ENABLE_MARK_CLEAN
982 if (dir
== DMA_FROM_DEVICE
) {
983 mark_clean(phys_to_virt(iova
), size
);
989 offset
= iova
& ~iovp_mask
;
991 DBG_RUN("%s() iovp 0x%lx/%x\n",
992 __FUNCTION__
, (long) iova
, size
);
994 iova
^= offset
; /* clear offset bits */
996 size
= ROUNDUP(size
, iovp_size
);
999 #if DELAYED_RESOURCE_CNT > 0
1000 spin_lock_irqsave(&ioc
->saved_lock
, flags
);
1001 d
= &(ioc
->saved
[ioc
->saved_cnt
]);
1004 if (unlikely(++(ioc
->saved_cnt
) >= DELAYED_RESOURCE_CNT
)) {
1005 int cnt
= ioc
->saved_cnt
;
1006 spin_lock(&ioc
->res_lock
);
1008 sba_mark_invalid(ioc
, d
->iova
, d
->size
);
1009 sba_free_range(ioc
, d
->iova
, d
->size
);
1013 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
1014 spin_unlock(&ioc
->res_lock
);
1016 spin_unlock_irqrestore(&ioc
->saved_lock
, flags
);
1017 #else /* DELAYED_RESOURCE_CNT == 0 */
1018 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1019 sba_mark_invalid(ioc
, iova
, size
);
1020 sba_free_range(ioc
, iova
, size
);
1021 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
1022 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1023 #endif /* DELAYED_RESOURCE_CNT == 0 */
1024 #ifdef ENABLE_MARK_CLEAN
1025 if (dir
== DMA_FROM_DEVICE
) {
1026 u32 iovp
= (u32
) SBA_IOVP(ioc
,iova
);
1027 int off
= PDIR_INDEX(iovp
);
1030 if (size
<= iovp_size
) {
1031 addr
= phys_to_virt(ioc
->pdir_base
[off
] &
1032 ~0xE000000000000FFFULL
);
1033 mark_clean(addr
, size
);
1035 size_t byte_cnt
= size
;
1038 addr
= phys_to_virt(ioc
->pdir_base
[off
] &
1039 ~0xE000000000000FFFULL
);
1040 mark_clean(addr
, min(byte_cnt
, iovp_size
));
1042 byte_cnt
-= iovp_size
;
1044 } while (byte_cnt
> 0);
1052 * sba_alloc_coherent - allocate/map shared mem for DMA
1053 * @dev: instance of PCI owned by the driver that's asking.
1054 * @size: number of bytes mapped in driver buffer.
1055 * @dma_handle: IOVA of new buffer.
1057 * See Documentation/DMA-mapping.txt
1060 sba_alloc_coherent (struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
, int flags
)
1071 page
= alloc_pages_node(ioc
->node
== MAX_NUMNODES
?
1072 numa_node_id() : ioc
->node
, flags
,
1075 if (unlikely(!page
))
1078 addr
= page_address(page
);
1081 addr
= (void *) __get_free_pages(flags
, get_order(size
));
1083 if (unlikely(!addr
))
1086 memset(addr
, 0, size
);
1087 *dma_handle
= virt_to_phys(addr
);
1089 #ifdef ALLOW_IOV_BYPASS
1090 ASSERT(dev
->coherent_dma_mask
);
1092 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1094 if (likely((*dma_handle
& ~dev
->coherent_dma_mask
) == 0)) {
1095 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1096 dev
->coherent_dma_mask
, *dma_handle
);
1103 * If device can't bypass or bypass is disabled, pass the 32bit fake
1104 * device to map single to get an iova mapping.
1106 *dma_handle
= sba_map_single(&ioc
->sac_only_dev
->dev
, addr
, size
, 0);
1113 * sba_free_coherent - free/unmap shared mem for DMA
1114 * @dev: instance of PCI owned by the driver that's asking.
1115 * @size: number of bytes mapped in driver buffer.
1116 * @vaddr: virtual address IOVA of "consistent" buffer.
1117 * @dma_handler: IO virtual address of "consistent" buffer.
1119 * See Documentation/DMA-mapping.txt
1121 void sba_free_coherent (struct device
*dev
, size_t size
, void *vaddr
, dma_addr_t dma_handle
)
1123 sba_unmap_single(dev
, dma_handle
, size
, 0);
1124 free_pages((unsigned long) vaddr
, get_order(size
));
1129 ** Since 0 is a valid pdir_base index value, can't use that
1130 ** to determine if a value is valid or not. Use a flag to indicate
1131 ** the SG list entry contains a valid pdir index.
1133 #define PIDE_FLAG 0x1UL
1135 #ifdef DEBUG_LARGE_SG_ENTRIES
1136 int dump_run_sg
= 0;
1141 * sba_fill_pdir - write allocated SG entries into IO PDIR
1142 * @ioc: IO MMU structure which owns the pdir we are interested in.
1143 * @startsg: list of IOVA/size pairs
1144 * @nents: number of entries in startsg list
1146 * Take preprocessed SG list and write corresponding entries
1150 static SBA_INLINE
int
1153 struct scatterlist
*startsg
,
1156 struct scatterlist
*dma_sg
= startsg
; /* pointer to current DMA */
1159 unsigned long dma_offset
= 0;
1162 while (nents
-- > 0) {
1163 int cnt
= startsg
->dma_length
;
1164 startsg
->dma_length
= 0;
1166 #ifdef DEBUG_LARGE_SG_ENTRIES
1168 printk(" %2d : %08lx/%05x %p\n",
1169 nents
, startsg
->dma_address
, cnt
,
1170 sba_sg_address(startsg
));
1172 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1173 nents
, startsg
->dma_address
, cnt
,
1174 sba_sg_address(startsg
));
1177 ** Look for the start of a new DMA stream
1179 if (startsg
->dma_address
& PIDE_FLAG
) {
1180 u32 pide
= startsg
->dma_address
& ~PIDE_FLAG
;
1181 dma_offset
= (unsigned long) pide
& ~iovp_mask
;
1182 startsg
->dma_address
= 0;
1184 dma_sg
->dma_address
= pide
| ioc
->ibase
;
1185 pdirp
= &(ioc
->pdir_base
[pide
>> iovp_shift
]);
1190 ** Look for a VCONTIG chunk
1193 unsigned long vaddr
= (unsigned long) sba_sg_address(startsg
);
1196 /* Since multiple Vcontig blocks could make up
1197 ** one DMA stream, *add* cnt to dma_len.
1199 dma_sg
->dma_length
+= cnt
;
1201 dma_offset
=0; /* only want offset on first chunk */
1202 cnt
= ROUNDUP(cnt
, iovp_size
);
1204 sba_io_pdir_entry(pdirp
, vaddr
);
1212 /* force pdir update */
1215 #ifdef DEBUG_LARGE_SG_ENTRIES
1223 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1224 ** "start of next" are both on an IOV page boundary.
1226 ** (shift left is a quick trick to mask off upper bits)
1228 #define DMA_CONTIG(__X, __Y) \
1229 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1233 * sba_coalesce_chunks - preprocess the SG list
1234 * @ioc: IO MMU structure which owns the pdir we are interested in.
1235 * @startsg: list of IOVA/size pairs
1236 * @nents: number of entries in startsg list
1238 * First pass is to walk the SG list and determine where the breaks are
1239 * in the DMA stream. Allocates PDIR entries but does not fill them.
1240 * Returns the number of DMA chunks.
1242 * Doing the fill separate from the coalescing/allocation keeps the
1243 * code simpler. Future enhancement could make one pass through
1244 * the sglist do both.
1246 static SBA_INLINE
int
1247 sba_coalesce_chunks( struct ioc
*ioc
,
1248 struct scatterlist
*startsg
,
1251 struct scatterlist
*vcontig_sg
; /* VCONTIG chunk head */
1252 unsigned long vcontig_len
; /* len of VCONTIG chunk */
1253 unsigned long vcontig_end
;
1254 struct scatterlist
*dma_sg
; /* next DMA stream head */
1255 unsigned long dma_offset
, dma_len
; /* start/len of DMA stream */
1259 unsigned long vaddr
= (unsigned long) sba_sg_address(startsg
);
1262 ** Prepare for first/next DMA stream
1264 dma_sg
= vcontig_sg
= startsg
;
1265 dma_len
= vcontig_len
= vcontig_end
= startsg
->length
;
1266 vcontig_end
+= vaddr
;
1267 dma_offset
= vaddr
& ~iovp_mask
;
1269 /* PARANOID: clear entries */
1270 startsg
->dma_address
= startsg
->dma_length
= 0;
1273 ** This loop terminates one iteration "early" since
1274 ** it's always looking one "ahead".
1276 while (--nents
> 0) {
1277 unsigned long vaddr
; /* tmp */
1282 startsg
->dma_address
= startsg
->dma_length
= 0;
1284 /* catch brokenness in SCSI layer */
1285 ASSERT(startsg
->length
<= DMA_CHUNK_SIZE
);
1288 ** First make sure current dma stream won't
1289 ** exceed DMA_CHUNK_SIZE if we coalesce the
1292 if (((dma_len
+ dma_offset
+ startsg
->length
+ ~iovp_mask
) & iovp_mask
)
1297 ** Then look for virtually contiguous blocks.
1299 ** append the next transaction?
1301 vaddr
= (unsigned long) sba_sg_address(startsg
);
1302 if (vcontig_end
== vaddr
)
1304 vcontig_len
+= startsg
->length
;
1305 vcontig_end
+= startsg
->length
;
1306 dma_len
+= startsg
->length
;
1310 #ifdef DEBUG_LARGE_SG_ENTRIES
1311 dump_run_sg
= (vcontig_len
> iovp_size
);
1315 ** Not virtually contigous.
1316 ** Terminate prev chunk.
1317 ** Start a new chunk.
1319 ** Once we start a new VCONTIG chunk, dma_offset
1320 ** can't change. And we need the offset from the first
1321 ** chunk - not the last one. Ergo Successive chunks
1322 ** must start on page boundaries and dove tail
1323 ** with it's predecessor.
1325 vcontig_sg
->dma_length
= vcontig_len
;
1327 vcontig_sg
= startsg
;
1328 vcontig_len
= startsg
->length
;
1331 ** 3) do the entries end/start on page boundaries?
1332 ** Don't update vcontig_end until we've checked.
1334 if (DMA_CONTIG(vcontig_end
, vaddr
))
1336 vcontig_end
= vcontig_len
+ vaddr
;
1337 dma_len
+= vcontig_len
;
1345 ** End of DMA Stream
1346 ** Terminate last VCONTIG block.
1347 ** Allocate space for DMA stream.
1349 vcontig_sg
->dma_length
= vcontig_len
;
1350 dma_len
= (dma_len
+ dma_offset
+ ~iovp_mask
) & iovp_mask
;
1351 ASSERT(dma_len
<= DMA_CHUNK_SIZE
);
1352 dma_sg
->dma_address
= (dma_addr_t
) (PIDE_FLAG
1353 | (sba_alloc_range(ioc
, dma_len
) << iovp_shift
)
1363 * sba_map_sg - map Scatter/Gather list
1364 * @dev: instance of PCI owned by the driver that's asking.
1365 * @sglist: array of buffer/length pairs
1366 * @nents: number of entries in list
1367 * @dir: R/W or both.
1369 * See Documentation/DMA-mapping.txt
1371 int sba_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nents
, int dir
)
1374 int coalesced
, filled
= 0;
1375 #ifdef ASSERT_PDIR_SANITY
1376 unsigned long flags
;
1378 #ifdef ALLOW_IOV_BYPASS_SG
1379 struct scatterlist
*sg
;
1382 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__
, nents
);
1386 #ifdef ALLOW_IOV_BYPASS_SG
1387 ASSERT(to_pci_dev(dev
)->dma_mask
);
1388 if (likely((ioc
->dma_mask
& ~to_pci_dev(dev
)->dma_mask
) == 0)) {
1389 for (sg
= sglist
; filled
< nents
; filled
++, sg
++){
1390 sg
->dma_length
= sg
->length
;
1391 sg
->dma_address
= virt_to_phys(sba_sg_address(sg
));
1396 /* Fast path single entry scatterlists. */
1398 sglist
->dma_length
= sglist
->length
;
1399 sglist
->dma_address
= sba_map_single(dev
, sba_sg_address(sglist
), sglist
->length
, dir
);
1403 #ifdef ASSERT_PDIR_SANITY
1404 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1405 if (sba_check_pdir(ioc
,"Check before sba_map_sg()"))
1407 sba_dump_sg(ioc
, sglist
, nents
);
1408 panic("Check before sba_map_sg()");
1410 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1413 prefetch(ioc
->res_hint
);
1416 ** First coalesce the chunks and allocate I/O pdir space
1418 ** If this is one DMA stream, we can properly map using the
1419 ** correct virtual address associated with each DMA page.
1420 ** w/o this association, we wouldn't have coherent DMA!
1421 ** Access to the virtual address is what forces a two pass algorithm.
1423 coalesced
= sba_coalesce_chunks(ioc
, sglist
, nents
);
1426 ** Program the I/O Pdir
1428 ** map the virtual addresses to the I/O Pdir
1429 ** o dma_address will contain the pdir index
1430 ** o dma_len will contain the number of bytes to map
1431 ** o address contains the virtual address.
1433 filled
= sba_fill_pdir(ioc
, sglist
, nents
);
1435 #ifdef ASSERT_PDIR_SANITY
1436 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1437 if (sba_check_pdir(ioc
,"Check after sba_map_sg()"))
1439 sba_dump_sg(ioc
, sglist
, nents
);
1440 panic("Check after sba_map_sg()\n");
1442 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1445 ASSERT(coalesced
== filled
);
1446 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__
, filled
);
1453 * sba_unmap_sg - unmap Scatter/Gather list
1454 * @dev: instance of PCI owned by the driver that's asking.
1455 * @sglist: array of buffer/length pairs
1456 * @nents: number of entries in list
1457 * @dir: R/W or both.
1459 * See Documentation/DMA-mapping.txt
1461 void sba_unmap_sg (struct device
*dev
, struct scatterlist
*sglist
, int nents
, int dir
)
1463 #ifdef ASSERT_PDIR_SANITY
1465 unsigned long flags
;
1468 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1469 __FUNCTION__
, nents
, sba_sg_address(sglist
), sglist
->length
);
1471 #ifdef ASSERT_PDIR_SANITY
1475 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1476 sba_check_pdir(ioc
,"Check before sba_unmap_sg()");
1477 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1480 while (nents
&& sglist
->dma_length
) {
1482 sba_unmap_single(dev
, sglist
->dma_address
, sglist
->dma_length
, dir
);
1487 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__
, nents
);
1489 #ifdef ASSERT_PDIR_SANITY
1490 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1491 sba_check_pdir(ioc
,"Check after sba_unmap_sg()");
1492 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1497 /**************************************************************
1499 * Initialization and claim
1501 ***************************************************************/
1504 ioc_iova_init(struct ioc
*ioc
)
1508 struct pci_dev
*device
= NULL
;
1509 #ifdef FULL_VALID_PDIR
1510 unsigned long index
;
1514 ** Firmware programs the base and size of a "safe IOVA space"
1515 ** (one that doesn't overlap memory or LMMIO space) in the
1516 ** IBASE and IMASK registers.
1518 ioc
->ibase
= READ_REG(ioc
->ioc_hpa
+ IOC_IBASE
) & ~0x1UL
;
1519 ioc
->imask
= READ_REG(ioc
->ioc_hpa
+ IOC_IMASK
) | 0xFFFFFFFF00000000UL
;
1521 ioc
->iov_size
= ~ioc
->imask
+ 1;
1523 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1524 __FUNCTION__
, ioc
->ioc_hpa
, ioc
->ibase
, ioc
->imask
,
1525 ioc
->iov_size
>> 20);
1527 switch (iovp_size
) {
1528 case 4*1024: tcnfg
= 0; break;
1529 case 8*1024: tcnfg
= 1; break;
1530 case 16*1024: tcnfg
= 2; break;
1531 case 64*1024: tcnfg
= 3; break;
1533 panic(PFX
"Unsupported IOTLB page size %ldK",
1537 WRITE_REG(tcnfg
, ioc
->ioc_hpa
+ IOC_TCNFG
);
1539 ioc
->pdir_size
= (ioc
->iov_size
/ iovp_size
) * PDIR_ENTRY_SIZE
;
1540 ioc
->pdir_base
= (void *) __get_free_pages(GFP_KERNEL
,
1541 get_order(ioc
->pdir_size
));
1542 if (!ioc
->pdir_base
)
1543 panic(PFX
"Couldn't allocate I/O Page Table\n");
1545 memset(ioc
->pdir_base
, 0, ioc
->pdir_size
);
1547 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__
,
1548 iovp_size
>> 10, ioc
->pdir_base
, ioc
->pdir_size
);
1550 ASSERT(ALIGN((unsigned long) ioc
->pdir_base
, 4*1024) == (unsigned long) ioc
->pdir_base
);
1551 WRITE_REG(virt_to_phys(ioc
->pdir_base
), ioc
->ioc_hpa
+ IOC_PDIR_BASE
);
1554 ** If an AGP device is present, only use half of the IOV space
1555 ** for PCI DMA. Unfortunately we can't know ahead of time
1556 ** whether GART support will actually be used, for now we
1557 ** can just key on an AGP device found in the system.
1558 ** We program the next pdir index after we stop w/ a key for
1559 ** the GART code to handshake on.
1561 for_each_pci_dev(device
)
1562 agp_found
|= pci_find_capability(device
, PCI_CAP_ID_AGP
);
1564 if (agp_found
&& reserve_sba_gart
) {
1565 printk(KERN_INFO PFX
"reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1566 ioc
->iov_size
/2 >> 20, ioc
->ibase
+ ioc
->iov_size
/2);
1567 ioc
->pdir_size
/= 2;
1568 ((u64
*)ioc
->pdir_base
)[PDIR_INDEX(ioc
->iov_size
/2)] = ZX1_SBA_IOMMU_COOKIE
;
1570 #ifdef FULL_VALID_PDIR
1572 ** Check to see if the spill page has been allocated, we don't need more than
1573 ** one across multiple SBAs.
1575 if (!prefetch_spill_page
) {
1576 char *spill_poison
= "SBAIOMMU POISON";
1577 int poison_size
= 16;
1578 void *poison_addr
, *addr
;
1580 addr
= (void *)__get_free_pages(GFP_KERNEL
, get_order(iovp_size
));
1582 panic(PFX
"Couldn't allocate PDIR spill page\n");
1585 for ( ; (u64
) poison_addr
< addr
+ iovp_size
; poison_addr
+= poison_size
)
1586 memcpy(poison_addr
, spill_poison
, poison_size
);
1588 prefetch_spill_page
= virt_to_phys(addr
);
1590 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__
, prefetch_spill_page
);
1593 ** Set all the PDIR entries valid w/ the spill page as the target
1595 for (index
= 0 ; index
< (ioc
->pdir_size
/ PDIR_ENTRY_SIZE
) ; index
++)
1596 ((u64
*)ioc
->pdir_base
)[index
] = (0x80000000000000FF | prefetch_spill_page
);
1599 /* Clear I/O TLB of any possible entries */
1600 WRITE_REG(ioc
->ibase
| (get_iovp_order(ioc
->iov_size
) + iovp_shift
), ioc
->ioc_hpa
+ IOC_PCOM
);
1601 READ_REG(ioc
->ioc_hpa
+ IOC_PCOM
);
1603 /* Enable IOVA translation */
1604 WRITE_REG(ioc
->ibase
| 1, ioc
->ioc_hpa
+ IOC_IBASE
);
1605 READ_REG(ioc
->ioc_hpa
+ IOC_IBASE
);
1609 ioc_resource_init(struct ioc
*ioc
)
1611 spin_lock_init(&ioc
->res_lock
);
1612 #if DELAYED_RESOURCE_CNT > 0
1613 spin_lock_init(&ioc
->saved_lock
);
1616 /* resource map size dictated by pdir_size */
1617 ioc
->res_size
= ioc
->pdir_size
/ PDIR_ENTRY_SIZE
; /* entries */
1618 ioc
->res_size
>>= 3; /* convert bit count to byte count */
1619 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__
, ioc
->res_size
);
1621 ioc
->res_map
= (char *) __get_free_pages(GFP_KERNEL
,
1622 get_order(ioc
->res_size
));
1624 panic(PFX
"Couldn't allocate resource map\n");
1626 memset(ioc
->res_map
, 0, ioc
->res_size
);
1627 /* next available IOVP - circular search */
1628 ioc
->res_hint
= (unsigned long *) ioc
->res_map
;
1630 #ifdef ASSERT_PDIR_SANITY
1631 /* Mark first bit busy - ie no IOVA 0 */
1632 ioc
->res_map
[0] = 0x1;
1633 ioc
->pdir_base
[0] = 0x8000000000000000ULL
| ZX1_SBA_IOMMU_COOKIE
;
1635 #ifdef FULL_VALID_PDIR
1636 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1637 ioc
->res_map
[ioc
->res_size
- 1] |= 0x80UL
; /* res_map is chars */
1638 ioc
->pdir_base
[(ioc
->pdir_size
/ PDIR_ENTRY_SIZE
) - 1] = (0x80000000000000FF
1639 | prefetch_spill_page
);
1642 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__
,
1643 ioc
->res_size
, (void *) ioc
->res_map
);
1647 ioc_sac_init(struct ioc
*ioc
)
1649 struct pci_dev
*sac
= NULL
;
1650 struct pci_controller
*controller
= NULL
;
1653 * pci_alloc_coherent() must return a DMA address which is
1654 * SAC (single address cycle) addressable, so allocate a
1655 * pseudo-device to enforce that.
1657 sac
= kmalloc(sizeof(*sac
), GFP_KERNEL
);
1659 panic(PFX
"Couldn't allocate struct pci_dev");
1660 memset(sac
, 0, sizeof(*sac
));
1662 controller
= kmalloc(sizeof(*controller
), GFP_KERNEL
);
1664 panic(PFX
"Couldn't allocate struct pci_controller");
1665 memset(controller
, 0, sizeof(*controller
));
1667 controller
->iommu
= ioc
;
1668 sac
->sysdata
= controller
;
1669 sac
->dma_mask
= 0xFFFFFFFFUL
;
1671 sac
->dev
.bus
= &pci_bus_type
;
1673 ioc
->sac_only_dev
= sac
;
1677 ioc_zx1_init(struct ioc
*ioc
)
1679 unsigned long rope_config
;
1682 if (ioc
->rev
< 0x20)
1683 panic(PFX
"IOC 2.0 or later required for IOMMU support\n");
1685 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1686 ioc
->dma_mask
= (0x1UL
<< 39) - 1;
1689 ** Clear ROPE(N)_CONFIG AO bit.
1690 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1691 ** Overrides bit 1 in DMA Hint Sets.
1692 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1694 for (i
=0; i
<(8*8); i
+=8) {
1695 rope_config
= READ_REG(ioc
->ioc_hpa
+ IOC_ROPE0_CFG
+ i
);
1696 rope_config
&= ~IOC_ROPE_AO
;
1697 WRITE_REG(rope_config
, ioc
->ioc_hpa
+ IOC_ROPE0_CFG
+ i
);
1701 typedef void (initfunc
)(struct ioc
*);
1709 static struct ioc_iommu ioc_iommu_info
[] __initdata
= {
1710 { ZX1_IOC_ID
, "zx1", ioc_zx1_init
},
1711 { ZX2_IOC_ID
, "zx2", NULL
},
1712 { SX1000_IOC_ID
, "sx1000", NULL
},
1715 static struct ioc
* __init
1716 ioc_init(u64 hpa
, void *handle
)
1719 struct ioc_iommu
*info
;
1721 ioc
= kmalloc(sizeof(*ioc
), GFP_KERNEL
);
1725 memset(ioc
, 0, sizeof(*ioc
));
1727 ioc
->next
= ioc_list
;
1730 ioc
->handle
= handle
;
1731 ioc
->ioc_hpa
= ioremap(hpa
, 0x1000);
1733 ioc
->func_id
= READ_REG(ioc
->ioc_hpa
+ IOC_FUNC_ID
);
1734 ioc
->rev
= READ_REG(ioc
->ioc_hpa
+ IOC_FCLASS
) & 0xFFUL
;
1735 ioc
->dma_mask
= 0xFFFFFFFFFFFFFFFFUL
; /* conservative */
1737 for (info
= ioc_iommu_info
; info
< ioc_iommu_info
+ ARRAY_SIZE(ioc_iommu_info
); info
++) {
1738 if (ioc
->func_id
== info
->func_id
) {
1739 ioc
->name
= info
->name
;
1745 iovp_size
= (1 << iovp_shift
);
1746 iovp_mask
= ~(iovp_size
- 1);
1748 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__
,
1749 PAGE_SIZE
>> 10, iovp_size
>> 10);
1752 ioc
->name
= kmalloc(24, GFP_KERNEL
);
1754 sprintf((char *) ioc
->name
, "Unknown (%04x:%04x)",
1755 ioc
->func_id
& 0xFFFF, (ioc
->func_id
>> 16) & 0xFFFF);
1757 ioc
->name
= "Unknown";
1761 ioc_resource_init(ioc
);
1764 if ((long) ~iovp_mask
> (long) ia64_max_iommu_merge_mask
)
1765 ia64_max_iommu_merge_mask
= ~iovp_mask
;
1767 printk(KERN_INFO PFX
1768 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1769 ioc
->name
, (ioc
->rev
>> 4) & 0xF, ioc
->rev
& 0xF,
1770 hpa
, ioc
->iov_size
>> 20, ioc
->ibase
);
1777 /**************************************************************************
1779 ** SBA initialization code (HW and SW)
1781 ** o identify SBA chip itself
1782 ** o FIXME: initialize DMA hints for reasonable defaults
1784 **************************************************************************/
1786 #ifdef CONFIG_PROC_FS
1788 ioc_start(struct seq_file
*s
, loff_t
*pos
)
1793 for (ioc
= ioc_list
; ioc
; ioc
= ioc
->next
)
1801 ioc_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
1803 struct ioc
*ioc
= v
;
1810 ioc_stop(struct seq_file
*s
, void *v
)
1815 ioc_show(struct seq_file
*s
, void *v
)
1817 struct ioc
*ioc
= v
;
1818 unsigned long *res_ptr
= (unsigned long *)ioc
->res_map
;
1821 seq_printf(s
, "Hewlett Packard %s IOC rev %d.%d\n",
1822 ioc
->name
, ((ioc
->rev
>> 4) & 0xF), (ioc
->rev
& 0xF));
1824 if (ioc
->node
!= MAX_NUMNODES
)
1825 seq_printf(s
, "NUMA node : %d\n", ioc
->node
);
1827 seq_printf(s
, "IOVA size : %ld MB\n", ((ioc
->pdir_size
>> 3) * iovp_size
)/(1024*1024));
1828 seq_printf(s
, "IOVA page size : %ld kb\n", iovp_size
/1024);
1830 for (i
= 0; i
< (ioc
->res_size
/ sizeof(unsigned long)); ++i
, ++res_ptr
)
1831 used
+= hweight64(*res_ptr
);
1833 seq_printf(s
, "PDIR size : %d entries\n", ioc
->pdir_size
>> 3);
1834 seq_printf(s
, "PDIR used : %d entries\n", used
);
1836 #ifdef PDIR_SEARCH_TIMING
1838 unsigned long i
= 0, avg
= 0, min
, max
;
1839 min
= max
= ioc
->avg_search
[0];
1840 for (i
= 0; i
< SBA_SEARCH_SAMPLE
; i
++) {
1841 avg
+= ioc
->avg_search
[i
];
1842 if (ioc
->avg_search
[i
] > max
) max
= ioc
->avg_search
[i
];
1843 if (ioc
->avg_search
[i
] < min
) min
= ioc
->avg_search
[i
];
1845 avg
/= SBA_SEARCH_SAMPLE
;
1846 seq_printf(s
, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1850 #ifndef ALLOW_IOV_BYPASS
1851 seq_printf(s
, "IOVA bypass disabled\n");
1856 static struct seq_operations ioc_seq_ops
= {
1864 ioc_open(struct inode
*inode
, struct file
*file
)
1866 return seq_open(file
, &ioc_seq_ops
);
1869 static struct file_operations ioc_fops
= {
1872 .llseek
= seq_lseek
,
1873 .release
= seq_release
1879 struct proc_dir_entry
*dir
, *entry
;
1881 dir
= proc_mkdir("bus/mckinley", NULL
);
1885 entry
= create_proc_entry(ioc_list
->name
, 0, dir
);
1887 entry
->proc_fops
= &ioc_fops
;
1892 sba_connect_bus(struct pci_bus
*bus
)
1894 acpi_handle handle
, parent
;
1898 if (!PCI_CONTROLLER(bus
))
1899 panic(PFX
"no sysdata on bus %d!\n", bus
->number
);
1901 if (PCI_CONTROLLER(bus
)->iommu
)
1904 handle
= PCI_CONTROLLER(bus
)->acpi_handle
;
1909 * The IOC scope encloses PCI root bridges in the ACPI
1910 * namespace, so work our way out until we find an IOC we
1911 * claimed previously.
1914 for (ioc
= ioc_list
; ioc
; ioc
= ioc
->next
)
1915 if (ioc
->handle
== handle
) {
1916 PCI_CONTROLLER(bus
)->iommu
= ioc
;
1920 status
= acpi_get_parent(handle
, &parent
);
1922 } while (ACPI_SUCCESS(status
));
1924 printk(KERN_WARNING
"No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus
), bus
->number
);
1929 sba_map_ioc_to_node(struct ioc
*ioc
, acpi_handle handle
)
1931 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
1932 union acpi_object
*obj
;
1933 acpi_handle phandle
;
1936 ioc
->node
= MAX_NUMNODES
;
1939 * Check for a _PXM on this node first. We don't typically see
1940 * one here, so we'll end up getting it from the parent.
1942 if (ACPI_FAILURE(acpi_evaluate_object(handle
, "_PXM", NULL
, &buffer
))) {
1943 if (ACPI_FAILURE(acpi_get_parent(handle
, &phandle
)))
1946 /* Reset the acpi buffer */
1947 buffer
.length
= ACPI_ALLOCATE_BUFFER
;
1948 buffer
.pointer
= NULL
;
1950 if (ACPI_FAILURE(acpi_evaluate_object(phandle
, "_PXM", NULL
,
1955 if (!buffer
.length
|| !buffer
.pointer
)
1958 obj
= buffer
.pointer
;
1960 if (obj
->type
!= ACPI_TYPE_INTEGER
||
1961 obj
->integer
.value
>= MAX_PXM_DOMAINS
) {
1962 acpi_os_free(buffer
.pointer
);
1966 node
= pxm_to_nid_map
[obj
->integer
.value
];
1967 acpi_os_free(buffer
.pointer
);
1969 if (node
>= MAX_NUMNODES
|| !node_online(node
))
1976 #define sba_map_ioc_to_node(ioc, handle)
1980 acpi_sba_ioc_add(struct acpi_device
*device
)
1985 struct acpi_buffer buffer
;
1986 struct acpi_device_info
*dev_info
;
1988 status
= hp_acpi_csr_space(device
->handle
, &hpa
, &length
);
1989 if (ACPI_FAILURE(status
))
1992 buffer
.length
= ACPI_ALLOCATE_LOCAL_BUFFER
;
1993 status
= acpi_get_object_info(device
->handle
, &buffer
);
1994 if (ACPI_FAILURE(status
))
1996 dev_info
= buffer
.pointer
;
1999 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
2000 * root bridges, and its CSR space includes the IOC function.
2002 if (strncmp("HWP0001", dev_info
->hardware_id
.value
, 7) == 0) {
2003 hpa
+= ZX1_IOC_OFFSET
;
2004 /* zx1 based systems default to kernel page size iommu pages */
2006 iovp_shift
= min(PAGE_SHIFT
, 16);
2008 ACPI_MEM_FREE(dev_info
);
2011 * default anything not caught above or specified on cmdline to 4k
2017 ioc
= ioc_init(hpa
, device
->handle
);
2021 /* setup NUMA node association */
2022 sba_map_ioc_to_node(ioc
, device
->handle
);
2026 static struct acpi_driver acpi_sba_ioc_driver
= {
2027 .name
= "IOC IOMMU Driver",
2028 .ids
= "HWP0001,HWP0004",
2030 .add
= acpi_sba_ioc_add
,
2037 acpi_bus_register_driver(&acpi_sba_ioc_driver
);
2043 struct pci_bus
*b
= NULL
;
2044 while ((b
= pci_find_next_bus(b
)) != NULL
)
2049 #ifdef CONFIG_PROC_FS
2055 subsys_initcall(sba_init
); /* must be initialized after ACPI etc., but before any drivers... */
2057 extern void dig_setup(char**);
2059 * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
2060 * so we use the platform_setup hook to fix it up.
2063 sba_setup(char **cmdline_p
)
2065 MAX_DMA_ADDRESS
= ~0UL;
2066 dig_setup(cmdline_p
);
2070 nosbagart(char *str
)
2072 reserve_sba_gart
= 0;
2077 sba_dma_supported (struct device
*dev
, u64 mask
)
2079 /* make sure it's at least 32bit capable */
2080 return ((mask
& 0xFFFFFFFFUL
) == 0xFFFFFFFFUL
);
2084 sba_dma_mapping_error (dma_addr_t dma_addr
)
2089 __setup("nosbagart", nosbagart
);
2092 sba_page_override(char *str
)
2094 unsigned long page_size
;
2096 page_size
= memparse(str
, &str
);
2097 switch (page_size
) {
2102 iovp_shift
= ffs(page_size
) - 1;
2105 printk("%s: unknown/unsupported iommu page size %ld\n",
2106 __FUNCTION__
, page_size
);
2112 __setup("sbapagesize=",sba_page_override
);
2114 EXPORT_SYMBOL(sba_dma_mapping_error
);
2115 EXPORT_SYMBOL(sba_map_single
);
2116 EXPORT_SYMBOL(sba_unmap_single
);
2117 EXPORT_SYMBOL(sba_map_sg
);
2118 EXPORT_SYMBOL(sba_unmap_sg
);
2119 EXPORT_SYMBOL(sba_dma_supported
);
2120 EXPORT_SYMBOL(sba_alloc_coherent
);
2121 EXPORT_SYMBOL(sba_free_coherent
);