2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <asm/atomic.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
35 #include <asm/iommu.h>
37 #include <asm/cacheflush.h>
38 #include <asm/swiotlb.h>
42 static unsigned long iommu_bus_base
; /* GART remapping area (physical) */
43 static unsigned long iommu_size
; /* size of remapping area bytes */
44 static unsigned long iommu_pages
; /* .. and in pages */
46 static u32
*iommu_gatt_base
; /* Remapping table */
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
55 int iommu_fullflush
= 1;
57 /* Allocation bitmap for the remapping area: */
58 static DEFINE_SPINLOCK(iommu_bitmap_lock
);
59 /* Guarded by iommu_bitmap_lock: */
60 static unsigned long *iommu_gart_bitmap
;
62 static u32 gart_unmapped_entry
;
65 #define GPTE_COHERENT 2
66 #define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
70 #define EMERGENCY_PAGES 32 /* = 128KB */
73 #define AGPEXTERN extern
78 /* backdoor interface to AGP driver */
79 AGPEXTERN
int agp_memory_reserved
;
80 AGPEXTERN __u32
*agp_gatt_table
;
82 static unsigned long next_bit
; /* protected by iommu_bitmap_lock */
83 static int need_flush
; /* global flush state. set for each gart wrap */
85 static unsigned long alloc_iommu(struct device
*dev
, int size
,
86 unsigned long align_mask
)
88 unsigned long offset
, flags
;
89 unsigned long boundary_size
;
90 unsigned long base_index
;
92 base_index
= ALIGN(iommu_bus_base
& dma_get_seg_boundary(dev
),
93 PAGE_SIZE
) >> PAGE_SHIFT
;
94 boundary_size
= ALIGN((unsigned long long)dma_get_seg_boundary(dev
) + 1,
95 PAGE_SIZE
) >> PAGE_SHIFT
;
97 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
98 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, next_bit
,
99 size
, base_index
, boundary_size
, align_mask
);
102 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, 0,
103 size
, base_index
, boundary_size
,
107 next_bit
= offset
+size
;
108 if (next_bit
>= iommu_pages
) {
115 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
120 static void free_iommu(unsigned long offset
, int size
)
124 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
125 iommu_area_free(iommu_gart_bitmap
, offset
, size
);
126 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
130 * Use global flush state to avoid races with multiple flushers.
132 static void flush_gart(void)
136 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
141 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
144 #ifdef CONFIG_IOMMU_LEAK
146 #define SET_LEAK(x) \
148 if (iommu_leak_tab) \
149 iommu_leak_tab[x] = __builtin_return_address(0);\
152 #define CLEAR_LEAK(x) \
154 if (iommu_leak_tab) \
155 iommu_leak_tab[x] = NULL; \
158 /* Debugging aid for drivers that don't free their IOMMU tables */
159 static void **iommu_leak_tab
;
160 static int leak_trace
;
161 static int iommu_leak_pages
= 20;
163 static void dump_leak(void)
168 if (dump
|| !iommu_leak_tab
)
171 show_stack(NULL
, NULL
);
173 /* Very crude. dump some from the end of the table too */
174 printk(KERN_DEBUG
"Dumping %d pages from end of IOMMU:\n",
176 for (i
= 0; i
< iommu_leak_pages
; i
+= 2) {
177 printk(KERN_DEBUG
"%lu: ", iommu_pages
-i
);
178 printk_address((unsigned long) iommu_leak_tab
[iommu_pages
-i
], 0);
179 printk(KERN_CONT
"%c", (i
+1)%2 == 0 ? '\n' : ' ');
181 printk(KERN_DEBUG
"\n");
185 # define CLEAR_LEAK(x)
188 static void iommu_full(struct device
*dev
, size_t size
, int dir
)
191 * Ran out of IOMMU space for this operation. This is very bad.
192 * Unfortunately the drivers cannot handle this operation properly.
193 * Return some non mapped prereserved space in the aperture and
194 * let the Northbridge deal with it. This will result in garbage
195 * in the IO operation. When the size exceeds the prereserved space
196 * memory corruption will occur or random memory will be DMAed
197 * out. Hopefully no network devices use single mappings that big.
200 dev_err(dev
, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size
);
202 if (size
> PAGE_SIZE
*EMERGENCY_PAGES
) {
203 if (dir
== PCI_DMA_FROMDEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
204 panic("PCI-DMA: Memory would be corrupted\n");
205 if (dir
== PCI_DMA_TODEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
207 "PCI-DMA: Random memory would be DMAed\n");
209 #ifdef CONFIG_IOMMU_LEAK
215 need_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
217 return force_iommu
||
218 !is_buffer_dma_capable(*dev
->dma_mask
, addr
, size
);
222 nonforced_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
224 return !is_buffer_dma_capable(*dev
->dma_mask
, addr
, size
);
227 /* Map a single continuous physical area into the IOMMU.
228 * Caller needs to check if the iommu is needed and flush.
230 static dma_addr_t
dma_map_area(struct device
*dev
, dma_addr_t phys_mem
,
231 size_t size
, int dir
, unsigned long align_mask
)
233 unsigned long npages
= iommu_num_pages(phys_mem
, size
);
234 unsigned long iommu_page
= alloc_iommu(dev
, npages
, align_mask
);
237 if (iommu_page
== -1) {
238 if (!nonforced_iommu(dev
, phys_mem
, size
))
240 if (panic_on_overflow
)
241 panic("dma_map_area overflow %lu bytes\n", size
);
242 iommu_full(dev
, size
, dir
);
243 return bad_dma_address
;
246 for (i
= 0; i
< npages
; i
++) {
247 iommu_gatt_base
[iommu_page
+ i
] = GPTE_ENCODE(phys_mem
);
248 SET_LEAK(iommu_page
+ i
);
249 phys_mem
+= PAGE_SIZE
;
251 return iommu_bus_base
+ iommu_page
*PAGE_SIZE
+ (phys_mem
& ~PAGE_MASK
);
254 /* Map a single area into the IOMMU */
256 gart_map_single(struct device
*dev
, phys_addr_t paddr
, size_t size
, int dir
)
261 dev
= &x86_dma_fallback_dev
;
263 if (!need_iommu(dev
, paddr
, size
))
266 bus
= dma_map_area(dev
, paddr
, size
, dir
, 0);
273 * Free a DMA mapping.
275 static void gart_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
276 size_t size
, int direction
)
278 unsigned long iommu_page
;
282 if (dma_addr
< iommu_bus_base
+ EMERGENCY_PAGES
*PAGE_SIZE
||
283 dma_addr
>= iommu_bus_base
+ iommu_size
)
286 iommu_page
= (dma_addr
- iommu_bus_base
)>>PAGE_SHIFT
;
287 npages
= iommu_num_pages(dma_addr
, size
);
288 for (i
= 0; i
< npages
; i
++) {
289 iommu_gatt_base
[iommu_page
+ i
] = gart_unmapped_entry
;
290 CLEAR_LEAK(iommu_page
+ i
);
292 free_iommu(iommu_page
, npages
);
296 * Wrapper for pci_unmap_single working with scatterlists.
299 gart_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
301 struct scatterlist
*s
;
304 for_each_sg(sg
, s
, nents
, i
) {
305 if (!s
->dma_length
|| !s
->length
)
307 gart_unmap_single(dev
, s
->dma_address
, s
->dma_length
, dir
);
311 /* Fallback for dma_map_sg in case of overflow */
312 static int dma_map_sg_nonforce(struct device
*dev
, struct scatterlist
*sg
,
315 struct scatterlist
*s
;
318 #ifdef CONFIG_IOMMU_DEBUG
319 printk(KERN_DEBUG
"dma_map_sg overflow\n");
322 for_each_sg(sg
, s
, nents
, i
) {
323 unsigned long addr
= sg_phys(s
);
325 if (nonforced_iommu(dev
, addr
, s
->length
)) {
326 addr
= dma_map_area(dev
, addr
, s
->length
, dir
, 0);
327 if (addr
== bad_dma_address
) {
329 gart_unmap_sg(dev
, sg
, i
, dir
);
331 sg
[0].dma_length
= 0;
335 s
->dma_address
= addr
;
336 s
->dma_length
= s
->length
;
343 /* Map multiple scatterlist entries continuous into the first. */
344 static int __dma_map_cont(struct device
*dev
, struct scatterlist
*start
,
345 int nelems
, struct scatterlist
*sout
,
348 unsigned long iommu_start
= alloc_iommu(dev
, pages
, 0);
349 unsigned long iommu_page
= iommu_start
;
350 struct scatterlist
*s
;
353 if (iommu_start
== -1)
356 for_each_sg(start
, s
, nelems
, i
) {
357 unsigned long pages
, addr
;
358 unsigned long phys_addr
= s
->dma_address
;
360 BUG_ON(s
!= start
&& s
->offset
);
362 sout
->dma_address
= iommu_bus_base
;
363 sout
->dma_address
+= iommu_page
*PAGE_SIZE
+ s
->offset
;
364 sout
->dma_length
= s
->length
;
366 sout
->dma_length
+= s
->length
;
370 pages
= iommu_num_pages(s
->offset
, s
->length
);
372 iommu_gatt_base
[iommu_page
] = GPTE_ENCODE(addr
);
373 SET_LEAK(iommu_page
);
378 BUG_ON(iommu_page
- iommu_start
!= pages
);
384 dma_map_cont(struct device
*dev
, struct scatterlist
*start
, int nelems
,
385 struct scatterlist
*sout
, unsigned long pages
, int need
)
389 sout
->dma_address
= start
->dma_address
;
390 sout
->dma_length
= start
->length
;
393 return __dma_map_cont(dev
, start
, nelems
, sout
, pages
);
397 * DMA map all entries in a scatterlist.
398 * Merge chunks that have page aligned sizes into a continuous mapping.
401 gart_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
403 struct scatterlist
*s
, *ps
, *start_sg
, *sgmap
;
404 int need
= 0, nextneed
, i
, out
, start
;
405 unsigned long pages
= 0;
406 unsigned int seg_size
;
407 unsigned int max_seg_size
;
413 dev
= &x86_dma_fallback_dev
;
417 start_sg
= sgmap
= sg
;
419 max_seg_size
= dma_get_max_seg_size(dev
);
420 ps
= NULL
; /* shut up gcc */
421 for_each_sg(sg
, s
, nents
, i
) {
422 dma_addr_t addr
= sg_phys(s
);
424 s
->dma_address
= addr
;
425 BUG_ON(s
->length
== 0);
427 nextneed
= need_iommu(dev
, addr
, s
->length
);
429 /* Handle the previous not yet processed entries */
432 * Can only merge when the last chunk ends on a
433 * page boundary and the new one doesn't have an
436 if (!iommu_merge
|| !nextneed
|| !need
|| s
->offset
||
437 (s
->length
+ seg_size
> max_seg_size
) ||
438 (ps
->offset
+ ps
->length
) % PAGE_SIZE
) {
439 if (dma_map_cont(dev
, start_sg
, i
- start
,
440 sgmap
, pages
, need
) < 0)
444 sgmap
= sg_next(sgmap
);
451 seg_size
+= s
->length
;
453 pages
+= iommu_num_pages(s
->offset
, s
->length
);
456 if (dma_map_cont(dev
, start_sg
, i
- start
, sgmap
, pages
, need
) < 0)
461 sgmap
= sg_next(sgmap
);
462 sgmap
->dma_length
= 0;
468 gart_unmap_sg(dev
, sg
, out
, dir
);
470 /* When it was forced or merged try again in a dumb way */
471 if (force_iommu
|| iommu_merge
) {
472 out
= dma_map_sg_nonforce(dev
, sg
, nents
, dir
);
476 if (panic_on_overflow
)
477 panic("dma_map_sg: overflow on %lu pages\n", pages
);
479 iommu_full(dev
, pages
<< PAGE_SHIFT
, dir
);
480 for_each_sg(sg
, s
, nents
, i
)
481 s
->dma_address
= bad_dma_address
;
485 /* allocate and map a coherent mapping */
487 gart_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_addr
,
491 unsigned long align_mask
;
494 if (force_iommu
&& !(flag
& GFP_DMA
)) {
495 flag
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
496 page
= alloc_pages(flag
| __GFP_ZERO
, get_order(size
));
500 align_mask
= (1UL << get_order(size
)) - 1;
501 paddr
= dma_map_area(dev
, page_to_phys(page
), size
,
502 DMA_BIDIRECTIONAL
, align_mask
);
505 if (paddr
!= bad_dma_address
) {
507 return page_address(page
);
509 __free_pages(page
, get_order(size
));
511 return dma_generic_alloc_coherent(dev
, size
, dma_addr
, flag
);
516 /* free a coherent mapping */
518 gart_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
521 gart_unmap_single(dev
, dma_addr
, size
, DMA_BIDIRECTIONAL
);
522 free_pages((unsigned long)vaddr
, get_order(size
));
527 static __init
unsigned long check_iommu_size(unsigned long aper
, u64 aper_size
)
532 iommu_size
= aper_size
;
537 a
= aper
+ iommu_size
;
538 iommu_size
-= round_up(a
, PMD_PAGE_SIZE
) - a
;
540 if (iommu_size
< 64*1024*1024) {
542 "PCI-DMA: Warning: Small IOMMU %luMB."
543 " Consider increasing the AGP aperture in BIOS\n",
550 static __init
unsigned read_aperture(struct pci_dev
*dev
, u32
*size
)
552 unsigned aper_size
= 0, aper_base_32
, aper_order
;
555 pci_read_config_dword(dev
, AMD64_GARTAPERTUREBASE
, &aper_base_32
);
556 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &aper_order
);
557 aper_order
= (aper_order
>> 1) & 7;
559 aper_base
= aper_base_32
& 0x7fff;
562 aper_size
= (32 * 1024 * 1024) << aper_order
;
563 if (aper_base
+ aper_size
> 0x100000000UL
|| !aper_size
)
570 static void enable_gart_translations(void)
574 for (i
= 0; i
< num_k8_northbridges
; i
++) {
575 struct pci_dev
*dev
= k8_northbridges
[i
];
577 enable_gart_translation(dev
, __pa(agp_gatt_table
));
582 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
583 * resume in the same way as they are handled in gart_iommu_hole_init().
585 static bool fix_up_north_bridges
;
586 static u32 aperture_order
;
587 static u32 aperture_alloc
;
589 void set_up_gart_resume(u32 aper_order
, u32 aper_alloc
)
591 fix_up_north_bridges
= true;
592 aperture_order
= aper_order
;
593 aperture_alloc
= aper_alloc
;
596 static int gart_resume(struct sys_device
*dev
)
598 printk(KERN_INFO
"PCI-DMA: Resuming GART IOMMU\n");
600 if (fix_up_north_bridges
) {
603 printk(KERN_INFO
"PCI-DMA: Restoring GART aperture settings\n");
605 for (i
= 0; i
< num_k8_northbridges
; i
++) {
606 struct pci_dev
*dev
= k8_northbridges
[i
];
609 * Don't enable translations just yet. That is the next
610 * step. Restore the pre-suspend aperture settings.
612 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
,
613 aperture_order
<< 1);
614 pci_write_config_dword(dev
, AMD64_GARTAPERTUREBASE
,
615 aperture_alloc
>> 25);
619 enable_gart_translations();
624 static int gart_suspend(struct sys_device
*dev
, pm_message_t state
)
629 static struct sysdev_class gart_sysdev_class
= {
631 .suspend
= gart_suspend
,
632 .resume
= gart_resume
,
636 static struct sys_device device_gart
= {
638 .cls
= &gart_sysdev_class
,
642 * Private Northbridge GATT initialization in case we cannot use the
643 * AGP driver for some reason.
645 static __init
int init_k8_gatt(struct agp_kern_info
*info
)
647 unsigned aper_size
, gatt_size
, new_aper_size
;
648 unsigned aper_base
, new_aper_base
;
652 unsigned long start_pfn
, end_pfn
;
654 printk(KERN_INFO
"PCI-DMA: Disabling AGP.\n");
655 aper_size
= aper_base
= info
->aper_size
= 0;
657 for (i
= 0; i
< num_k8_northbridges
; i
++) {
658 dev
= k8_northbridges
[i
];
659 new_aper_base
= read_aperture(dev
, &new_aper_size
);
664 aper_size
= new_aper_size
;
665 aper_base
= new_aper_base
;
667 if (aper_size
!= new_aper_size
|| aper_base
!= new_aper_base
)
672 info
->aper_base
= aper_base
;
673 info
->aper_size
= aper_size
>> 20;
675 gatt_size
= (aper_size
>> PAGE_SHIFT
) * sizeof(u32
);
676 gatt
= (void *)__get_free_pages(GFP_KERNEL
, get_order(gatt_size
));
678 panic("Cannot allocate GATT table");
679 if (set_memory_uc((unsigned long)gatt
, gatt_size
>> PAGE_SHIFT
))
680 panic("Could not set GART PTEs to uncacheable pages");
682 memset(gatt
, 0, gatt_size
);
683 agp_gatt_table
= gatt
;
685 enable_gart_translations();
687 error
= sysdev_class_register(&gart_sysdev_class
);
689 error
= sysdev_register(&device_gart
);
691 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
695 printk(KERN_INFO
"PCI-DMA: aperture base @ %x size %u KB\n",
696 aper_base
, aper_size
>>10);
698 /* need to map that range */
699 end_pfn
= (aper_base
>>PAGE_SHIFT
) + (aper_size
>>PAGE_SHIFT
);
700 if (end_pfn
> max_low_pfn_mapped
) {
701 start_pfn
= (aper_base
>>PAGE_SHIFT
);
702 init_memory_mapping(start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
707 /* Should not happen anymore */
708 printk(KERN_WARNING
"PCI-DMA: More than 4GB of RAM and no IOMMU\n"
709 KERN_WARNING
"falling back to iommu=soft.\n");
713 extern int agp_amd64_init(void);
715 static struct dma_mapping_ops gart_dma_ops
= {
716 .map_single
= gart_map_single
,
717 .unmap_single
= gart_unmap_single
,
718 .sync_single_for_cpu
= NULL
,
719 .sync_single_for_device
= NULL
,
720 .sync_single_range_for_cpu
= NULL
,
721 .sync_single_range_for_device
= NULL
,
722 .sync_sg_for_cpu
= NULL
,
723 .sync_sg_for_device
= NULL
,
724 .map_sg
= gart_map_sg
,
725 .unmap_sg
= gart_unmap_sg
,
726 .alloc_coherent
= gart_alloc_coherent
,
727 .free_coherent
= gart_free_coherent
,
730 void gart_iommu_shutdown(void)
735 if (no_agp
&& (dma_ops
!= &gart_dma_ops
))
738 for (i
= 0; i
< num_k8_northbridges
; i
++) {
741 dev
= k8_northbridges
[i
];
742 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &ctl
);
746 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
, ctl
);
750 void __init
gart_iommu_init(void)
752 struct agp_kern_info info
;
753 unsigned long iommu_start
;
754 unsigned long aper_size
;
755 unsigned long scratch
;
758 if (cache_k8_northbridges() < 0 || num_k8_northbridges
== 0) {
759 printk(KERN_INFO
"PCI-GART: No AMD northbridge found.\n");
763 #ifndef CONFIG_AGP_AMD64
766 /* Makefile puts PCI initialization via subsys_initcall first. */
767 /* Add other K8 AGP bridge drivers here */
769 (agp_amd64_init() < 0) ||
770 (agp_copy_info(agp_bridge
, &info
) < 0);
776 /* Did we detect a different HW IOMMU? */
777 if (iommu_detected
&& !gart_iommu_aperture
)
781 (!force_iommu
&& max_pfn
<= MAX_DMA32_PFN
) ||
782 !gart_iommu_aperture
||
783 (no_agp
&& init_k8_gatt(&info
) < 0)) {
784 if (max_pfn
> MAX_DMA32_PFN
) {
785 printk(KERN_WARNING
"More than 4GB of memory "
786 "but GART IOMMU not available.\n"
787 KERN_WARNING
"falling back to iommu=soft.\n");
792 printk(KERN_INFO
"PCI-DMA: using GART IOMMU.\n");
793 aper_size
= info
.aper_size
* 1024 * 1024;
794 iommu_size
= check_iommu_size(info
.aper_base
, aper_size
);
795 iommu_pages
= iommu_size
>> PAGE_SHIFT
;
797 iommu_gart_bitmap
= (void *) __get_free_pages(GFP_KERNEL
,
798 get_order(iommu_pages
/8));
799 if (!iommu_gart_bitmap
)
800 panic("Cannot allocate iommu bitmap\n");
801 memset(iommu_gart_bitmap
, 0, iommu_pages
/8);
803 #ifdef CONFIG_IOMMU_LEAK
805 iommu_leak_tab
= (void *)__get_free_pages(GFP_KERNEL
,
806 get_order(iommu_pages
*sizeof(void *)));
808 memset(iommu_leak_tab
, 0, iommu_pages
* 8);
811 "PCI-DMA: Cannot allocate leak trace area\n");
816 * Out of IOMMU space handling.
817 * Reserve some invalid pages at the beginning of the GART.
819 iommu_area_reserve(iommu_gart_bitmap
, 0, EMERGENCY_PAGES
);
821 agp_memory_reserved
= iommu_size
;
823 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
826 iommu_start
= aper_size
- iommu_size
;
827 iommu_bus_base
= info
.aper_base
+ iommu_start
;
828 bad_dma_address
= iommu_bus_base
;
829 iommu_gatt_base
= agp_gatt_table
+ (iommu_start
>>PAGE_SHIFT
);
832 * Unmap the IOMMU part of the GART. The alias of the page is
833 * always mapped with cache enabled and there is no full cache
834 * coherency across the GART remapping. The unmapping avoids
835 * automatic prefetches from the CPU allocating cache lines in
836 * there. All CPU accesses are done via the direct mapping to
837 * the backing memory. The GART address is only used by PCI
840 set_memory_np((unsigned long)__va(iommu_bus_base
),
841 iommu_size
>> PAGE_SHIFT
);
843 * Tricky. The GART table remaps the physical memory range,
844 * so the CPU wont notice potential aliases and if the memory
845 * is remapped to UC later on, we might surprise the PCI devices
846 * with a stray writeout of a cacheline. So play it sure and
847 * do an explicit, full-scale wbinvd() _after_ having marked all
848 * the pages as Not-Present:
853 * Try to workaround a bug (thanks to BenH):
854 * Set unmapped entries to a scratch page instead of 0.
855 * Any prefetches that hit unmapped entries won't get an bus abort
856 * then. (P2P bridge may be prefetching on DMA reads).
858 scratch
= get_zeroed_page(GFP_KERNEL
);
860 panic("Cannot allocate iommu scratch page");
861 gart_unmapped_entry
= GPTE_ENCODE(__pa(scratch
));
862 for (i
= EMERGENCY_PAGES
; i
< iommu_pages
; i
++)
863 iommu_gatt_base
[i
] = gart_unmapped_entry
;
866 dma_ops
= &gart_dma_ops
;
869 void __init
gart_parse_options(char *p
)
873 #ifdef CONFIG_IOMMU_LEAK
874 if (!strncmp(p
, "leak", 4)) {
878 if (isdigit(*p
) && get_option(&p
, &arg
))
879 iommu_leak_pages
= arg
;
882 if (isdigit(*p
) && get_option(&p
, &arg
))
884 if (!strncmp(p
, "fullflush", 8))
886 if (!strncmp(p
, "nofullflush", 11))
888 if (!strncmp(p
, "noagp", 5))
890 if (!strncmp(p
, "noaperture", 10))
892 /* duplicated from pci-dma.c */
893 if (!strncmp(p
, "force", 5))
894 gart_iommu_aperture_allowed
= 1;
895 if (!strncmp(p
, "allowed", 7))
896 gart_iommu_aperture_allowed
= 1;
897 if (!strncmp(p
, "memaper", 7)) {
898 fallback_aper_force
= 1;
902 if (get_option(&p
, &arg
))
903 fallback_aper_order
= arg
;