]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/kernel/pci-gart_64.c
[ARM] pxa: add e750 MFP config
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / pci-gart_64.c
1 /*
2 * Dynamic DMA mapping support for AMD Hammer.
3 *
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
12 */
13
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <linux/io.h>
31 #include <asm/atomic.h>
32 #include <asm/mtrr.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
35 #include <asm/iommu.h>
36 #include <asm/gart.h>
37 #include <asm/cacheflush.h>
38 #include <asm/swiotlb.h>
39 #include <asm/dma.h>
40 #include <asm/k8.h>
41
42 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
43 static unsigned long iommu_size; /* size of remapping area bytes */
44 static unsigned long iommu_pages; /* .. and in pages */
45
46 static u32 *iommu_gatt_base; /* Remapping table */
47
48 /*
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
54 */
55 int iommu_fullflush = 1;
56
57 /* Allocation bitmap for the remapping area: */
58 static DEFINE_SPINLOCK(iommu_bitmap_lock);
59 /* Guarded by iommu_bitmap_lock: */
60 static unsigned long *iommu_gart_bitmap;
61
62 static u32 gart_unmapped_entry;
63
64 #define GPTE_VALID 1
65 #define GPTE_COHERENT 2
66 #define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69
70 #define EMERGENCY_PAGES 32 /* = 128KB */
71
72 #ifdef CONFIG_AGP
73 #define AGPEXTERN extern
74 #else
75 #define AGPEXTERN
76 #endif
77
78 /* backdoor interface to AGP driver */
79 AGPEXTERN int agp_memory_reserved;
80 AGPEXTERN __u32 *agp_gatt_table;
81
82 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83 static bool need_flush; /* global flush state. set for each gart wrap */
84
85 static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
87 {
88 unsigned long offset, flags;
89 unsigned long boundary_size;
90 unsigned long base_index;
91
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT;
94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
95 PAGE_SIZE) >> PAGE_SHIFT;
96
97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
99 size, base_index, boundary_size, align_mask);
100 if (offset == -1) {
101 need_flush = true;
102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
103 size, base_index, boundary_size,
104 align_mask);
105 }
106 if (offset != -1) {
107 next_bit = offset+size;
108 if (next_bit >= iommu_pages) {
109 next_bit = 0;
110 need_flush = true;
111 }
112 }
113 if (iommu_fullflush)
114 need_flush = true;
115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
116
117 return offset;
118 }
119
120 static void free_iommu(unsigned long offset, int size)
121 {
122 unsigned long flags;
123
124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
125 iommu_area_free(iommu_gart_bitmap, offset, size);
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127 }
128
129 /*
130 * Use global flush state to avoid races with multiple flushers.
131 */
132 static void flush_gart(void)
133 {
134 unsigned long flags;
135
136 spin_lock_irqsave(&iommu_bitmap_lock, flags);
137 if (need_flush) {
138 k8_flush_garts();
139 need_flush = false;
140 }
141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
142 }
143
144 #ifdef CONFIG_IOMMU_LEAK
145
146 #define SET_LEAK(x) \
147 do { \
148 if (iommu_leak_tab) \
149 iommu_leak_tab[x] = __builtin_return_address(0);\
150 } while (0)
151
152 #define CLEAR_LEAK(x) \
153 do { \
154 if (iommu_leak_tab) \
155 iommu_leak_tab[x] = NULL; \
156 } while (0)
157
158 /* Debugging aid for drivers that don't free their IOMMU tables */
159 static void **iommu_leak_tab;
160 static int leak_trace;
161 static int iommu_leak_pages = 20;
162
163 static void dump_leak(void)
164 {
165 int i;
166 static int dump;
167
168 if (dump || !iommu_leak_tab)
169 return;
170 dump = 1;
171 show_stack(NULL, NULL);
172
173 /* Very crude. dump some from the end of the table too */
174 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
175 iommu_leak_pages);
176 for (i = 0; i < iommu_leak_pages; i += 2) {
177 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
178 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
179 0);
180 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
181 }
182 printk(KERN_DEBUG "\n");
183 }
184 #else
185 # define SET_LEAK(x)
186 # define CLEAR_LEAK(x)
187 #endif
188
189 static void iommu_full(struct device *dev, size_t size, int dir)
190 {
191 /*
192 * Ran out of IOMMU space for this operation. This is very bad.
193 * Unfortunately the drivers cannot handle this operation properly.
194 * Return some non mapped prereserved space in the aperture and
195 * let the Northbridge deal with it. This will result in garbage
196 * in the IO operation. When the size exceeds the prereserved space
197 * memory corruption will occur or random memory will be DMAed
198 * out. Hopefully no network devices use single mappings that big.
199 */
200
201 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
202
203 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
204 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
205 panic("PCI-DMA: Memory would be corrupted\n");
206 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
207 panic(KERN_ERR
208 "PCI-DMA: Random memory would be DMAed\n");
209 }
210 #ifdef CONFIG_IOMMU_LEAK
211 dump_leak();
212 #endif
213 }
214
215 static inline int
216 need_iommu(struct device *dev, unsigned long addr, size_t size)
217 {
218 return force_iommu ||
219 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
220 }
221
222 static inline int
223 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
224 {
225 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
226 }
227
228 /* Map a single continuous physical area into the IOMMU.
229 * Caller needs to check if the iommu is needed and flush.
230 */
231 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
232 size_t size, int dir, unsigned long align_mask)
233 {
234 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
235 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
236 int i;
237
238 if (iommu_page == -1) {
239 if (!nonforced_iommu(dev, phys_mem, size))
240 return phys_mem;
241 if (panic_on_overflow)
242 panic("dma_map_area overflow %lu bytes\n", size);
243 iommu_full(dev, size, dir);
244 return bad_dma_address;
245 }
246
247 for (i = 0; i < npages; i++) {
248 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
249 SET_LEAK(iommu_page + i);
250 phys_mem += PAGE_SIZE;
251 }
252 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
253 }
254
255 /* Map a single area into the IOMMU */
256 static dma_addr_t
257 gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
258 {
259 unsigned long bus;
260
261 if (!dev)
262 dev = &x86_dma_fallback_dev;
263
264 if (!need_iommu(dev, paddr, size))
265 return paddr;
266
267 bus = dma_map_area(dev, paddr, size, dir, 0);
268 flush_gart();
269
270 return bus;
271 }
272
273 /*
274 * Free a DMA mapping.
275 */
276 static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
277 size_t size, int direction)
278 {
279 unsigned long iommu_page;
280 int npages;
281 int i;
282
283 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
284 dma_addr >= iommu_bus_base + iommu_size)
285 return;
286
287 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
288 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
289 for (i = 0; i < npages; i++) {
290 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
291 CLEAR_LEAK(iommu_page + i);
292 }
293 free_iommu(iommu_page, npages);
294 }
295
296 /*
297 * Wrapper for pci_unmap_single working with scatterlists.
298 */
299 static void
300 gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
301 {
302 struct scatterlist *s;
303 int i;
304
305 for_each_sg(sg, s, nents, i) {
306 if (!s->dma_length || !s->length)
307 break;
308 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
309 }
310 }
311
312 /* Fallback for dma_map_sg in case of overflow */
313 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
314 int nents, int dir)
315 {
316 struct scatterlist *s;
317 int i;
318
319 #ifdef CONFIG_IOMMU_DEBUG
320 printk(KERN_DEBUG "dma_map_sg overflow\n");
321 #endif
322
323 for_each_sg(sg, s, nents, i) {
324 unsigned long addr = sg_phys(s);
325
326 if (nonforced_iommu(dev, addr, s->length)) {
327 addr = dma_map_area(dev, addr, s->length, dir, 0);
328 if (addr == bad_dma_address) {
329 if (i > 0)
330 gart_unmap_sg(dev, sg, i, dir);
331 nents = 0;
332 sg[0].dma_length = 0;
333 break;
334 }
335 }
336 s->dma_address = addr;
337 s->dma_length = s->length;
338 }
339 flush_gart();
340
341 return nents;
342 }
343
344 /* Map multiple scatterlist entries continuous into the first. */
345 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
346 int nelems, struct scatterlist *sout,
347 unsigned long pages)
348 {
349 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
350 unsigned long iommu_page = iommu_start;
351 struct scatterlist *s;
352 int i;
353
354 if (iommu_start == -1)
355 return -1;
356
357 for_each_sg(start, s, nelems, i) {
358 unsigned long pages, addr;
359 unsigned long phys_addr = s->dma_address;
360
361 BUG_ON(s != start && s->offset);
362 if (s == start) {
363 sout->dma_address = iommu_bus_base;
364 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
365 sout->dma_length = s->length;
366 } else {
367 sout->dma_length += s->length;
368 }
369
370 addr = phys_addr;
371 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
372 while (pages--) {
373 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
374 SET_LEAK(iommu_page);
375 addr += PAGE_SIZE;
376 iommu_page++;
377 }
378 }
379 BUG_ON(iommu_page - iommu_start != pages);
380
381 return 0;
382 }
383
384 static inline int
385 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
386 struct scatterlist *sout, unsigned long pages, int need)
387 {
388 if (!need) {
389 BUG_ON(nelems != 1);
390 sout->dma_address = start->dma_address;
391 sout->dma_length = start->length;
392 return 0;
393 }
394 return __dma_map_cont(dev, start, nelems, sout, pages);
395 }
396
397 /*
398 * DMA map all entries in a scatterlist.
399 * Merge chunks that have page aligned sizes into a continuous mapping.
400 */
401 static int
402 gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
403 {
404 struct scatterlist *s, *ps, *start_sg, *sgmap;
405 int need = 0, nextneed, i, out, start;
406 unsigned long pages = 0;
407 unsigned int seg_size;
408 unsigned int max_seg_size;
409
410 if (nents == 0)
411 return 0;
412
413 if (!dev)
414 dev = &x86_dma_fallback_dev;
415
416 out = 0;
417 start = 0;
418 start_sg = sgmap = sg;
419 seg_size = 0;
420 max_seg_size = dma_get_max_seg_size(dev);
421 ps = NULL; /* shut up gcc */
422 for_each_sg(sg, s, nents, i) {
423 dma_addr_t addr = sg_phys(s);
424
425 s->dma_address = addr;
426 BUG_ON(s->length == 0);
427
428 nextneed = need_iommu(dev, addr, s->length);
429
430 /* Handle the previous not yet processed entries */
431 if (i > start) {
432 /*
433 * Can only merge when the last chunk ends on a
434 * page boundary and the new one doesn't have an
435 * offset.
436 */
437 if (!iommu_merge || !nextneed || !need || s->offset ||
438 (s->length + seg_size > max_seg_size) ||
439 (ps->offset + ps->length) % PAGE_SIZE) {
440 if (dma_map_cont(dev, start_sg, i - start,
441 sgmap, pages, need) < 0)
442 goto error;
443 out++;
444 seg_size = 0;
445 sgmap = sg_next(sgmap);
446 pages = 0;
447 start = i;
448 start_sg = s;
449 }
450 }
451
452 seg_size += s->length;
453 need = nextneed;
454 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
455 ps = s;
456 }
457 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
458 goto error;
459 out++;
460 flush_gart();
461 if (out < nents) {
462 sgmap = sg_next(sgmap);
463 sgmap->dma_length = 0;
464 }
465 return out;
466
467 error:
468 flush_gart();
469 gart_unmap_sg(dev, sg, out, dir);
470
471 /* When it was forced or merged try again in a dumb way */
472 if (force_iommu || iommu_merge) {
473 out = dma_map_sg_nonforce(dev, sg, nents, dir);
474 if (out > 0)
475 return out;
476 }
477 if (panic_on_overflow)
478 panic("dma_map_sg: overflow on %lu pages\n", pages);
479
480 iommu_full(dev, pages << PAGE_SHIFT, dir);
481 for_each_sg(sg, s, nents, i)
482 s->dma_address = bad_dma_address;
483 return 0;
484 }
485
486 /* allocate and map a coherent mapping */
487 static void *
488 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
489 gfp_t flag)
490 {
491 dma_addr_t paddr;
492 unsigned long align_mask;
493 struct page *page;
494
495 if (force_iommu && !(flag & GFP_DMA)) {
496 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
497 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
498 if (!page)
499 return NULL;
500
501 align_mask = (1UL << get_order(size)) - 1;
502 paddr = dma_map_area(dev, page_to_phys(page), size,
503 DMA_BIDIRECTIONAL, align_mask);
504
505 flush_gart();
506 if (paddr != bad_dma_address) {
507 *dma_addr = paddr;
508 return page_address(page);
509 }
510 __free_pages(page, get_order(size));
511 } else
512 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
513
514 return NULL;
515 }
516
517 /* free a coherent mapping */
518 static void
519 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
520 dma_addr_t dma_addr)
521 {
522 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
523 free_pages((unsigned long)vaddr, get_order(size));
524 }
525
526 static int no_agp;
527
528 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
529 {
530 unsigned long a;
531
532 if (!iommu_size) {
533 iommu_size = aper_size;
534 if (!no_agp)
535 iommu_size /= 2;
536 }
537
538 a = aper + iommu_size;
539 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
540
541 if (iommu_size < 64*1024*1024) {
542 printk(KERN_WARNING
543 "PCI-DMA: Warning: Small IOMMU %luMB."
544 " Consider increasing the AGP aperture in BIOS\n",
545 iommu_size >> 20);
546 }
547
548 return iommu_size;
549 }
550
551 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
552 {
553 unsigned aper_size = 0, aper_base_32, aper_order;
554 u64 aper_base;
555
556 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
557 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
558 aper_order = (aper_order >> 1) & 7;
559
560 aper_base = aper_base_32 & 0x7fff;
561 aper_base <<= 25;
562
563 aper_size = (32 * 1024 * 1024) << aper_order;
564 if (aper_base + aper_size > 0x100000000UL || !aper_size)
565 aper_base = 0;
566
567 *size = aper_size;
568 return aper_base;
569 }
570
571 static void enable_gart_translations(void)
572 {
573 int i;
574
575 for (i = 0; i < num_k8_northbridges; i++) {
576 struct pci_dev *dev = k8_northbridges[i];
577
578 enable_gart_translation(dev, __pa(agp_gatt_table));
579 }
580 }
581
582 /*
583 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
584 * resume in the same way as they are handled in gart_iommu_hole_init().
585 */
586 static bool fix_up_north_bridges;
587 static u32 aperture_order;
588 static u32 aperture_alloc;
589
590 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
591 {
592 fix_up_north_bridges = true;
593 aperture_order = aper_order;
594 aperture_alloc = aper_alloc;
595 }
596
597 static int gart_resume(struct sys_device *dev)
598 {
599 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
600
601 if (fix_up_north_bridges) {
602 int i;
603
604 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
605
606 for (i = 0; i < num_k8_northbridges; i++) {
607 struct pci_dev *dev = k8_northbridges[i];
608
609 /*
610 * Don't enable translations just yet. That is the next
611 * step. Restore the pre-suspend aperture settings.
612 */
613 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
614 aperture_order << 1);
615 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
616 aperture_alloc >> 25);
617 }
618 }
619
620 enable_gart_translations();
621
622 return 0;
623 }
624
625 static int gart_suspend(struct sys_device *dev, pm_message_t state)
626 {
627 return 0;
628 }
629
630 static struct sysdev_class gart_sysdev_class = {
631 .name = "gart",
632 .suspend = gart_suspend,
633 .resume = gart_resume,
634
635 };
636
637 static struct sys_device device_gart = {
638 .id = 0,
639 .cls = &gart_sysdev_class,
640 };
641
642 /*
643 * Private Northbridge GATT initialization in case we cannot use the
644 * AGP driver for some reason.
645 */
646 static __init int init_k8_gatt(struct agp_kern_info *info)
647 {
648 unsigned aper_size, gatt_size, new_aper_size;
649 unsigned aper_base, new_aper_base;
650 struct pci_dev *dev;
651 void *gatt;
652 int i, error;
653
654 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
655 aper_size = aper_base = info->aper_size = 0;
656 dev = NULL;
657 for (i = 0; i < num_k8_northbridges; i++) {
658 dev = k8_northbridges[i];
659 new_aper_base = read_aperture(dev, &new_aper_size);
660 if (!new_aper_base)
661 goto nommu;
662
663 if (!aper_base) {
664 aper_size = new_aper_size;
665 aper_base = new_aper_base;
666 }
667 if (aper_size != new_aper_size || aper_base != new_aper_base)
668 goto nommu;
669 }
670 if (!aper_base)
671 goto nommu;
672 info->aper_base = aper_base;
673 info->aper_size = aper_size >> 20;
674
675 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
676 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
677 get_order(gatt_size));
678 if (!gatt)
679 panic("Cannot allocate GATT table");
680 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
681 panic("Could not set GART PTEs to uncacheable pages");
682
683 agp_gatt_table = gatt;
684
685 enable_gart_translations();
686
687 error = sysdev_class_register(&gart_sysdev_class);
688 if (!error)
689 error = sysdev_register(&device_gart);
690 if (error)
691 panic("Could not register gart_sysdev -- "
692 "would corrupt data on next suspend");
693
694 flush_gart();
695
696 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
697 aper_base, aper_size>>10);
698
699 return 0;
700
701 nommu:
702 /* Should not happen anymore */
703 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
704 KERN_WARNING "falling back to iommu=soft.\n");
705 return -1;
706 }
707
708 static struct dma_mapping_ops gart_dma_ops = {
709 .map_single = gart_map_single,
710 .unmap_single = gart_unmap_single,
711 .map_sg = gart_map_sg,
712 .unmap_sg = gart_unmap_sg,
713 .alloc_coherent = gart_alloc_coherent,
714 .free_coherent = gart_free_coherent,
715 };
716
717 void gart_iommu_shutdown(void)
718 {
719 struct pci_dev *dev;
720 int i;
721
722 if (no_agp && (dma_ops != &gart_dma_ops))
723 return;
724
725 for (i = 0; i < num_k8_northbridges; i++) {
726 u32 ctl;
727
728 dev = k8_northbridges[i];
729 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
730
731 ctl &= ~GARTEN;
732
733 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
734 }
735 }
736
737 void __init gart_iommu_init(void)
738 {
739 struct agp_kern_info info;
740 unsigned long iommu_start;
741 unsigned long aper_base, aper_size;
742 unsigned long start_pfn, end_pfn;
743 unsigned long scratch;
744 long i;
745
746 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
747 printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
748 return;
749 }
750
751 #ifndef CONFIG_AGP_AMD64
752 no_agp = 1;
753 #else
754 /* Makefile puts PCI initialization via subsys_initcall first. */
755 /* Add other K8 AGP bridge drivers here */
756 no_agp = no_agp ||
757 (agp_amd64_init() < 0) ||
758 (agp_copy_info(agp_bridge, &info) < 0);
759 #endif
760
761 if (swiotlb)
762 return;
763
764 /* Did we detect a different HW IOMMU? */
765 if (iommu_detected && !gart_iommu_aperture)
766 return;
767
768 if (no_iommu ||
769 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
770 !gart_iommu_aperture ||
771 (no_agp && init_k8_gatt(&info) < 0)) {
772 if (max_pfn > MAX_DMA32_PFN) {
773 printk(KERN_WARNING "More than 4GB of memory "
774 "but GART IOMMU not available.\n");
775 printk(KERN_WARNING "falling back to iommu=soft.\n");
776 }
777 return;
778 }
779
780 /* need to map that range */
781 aper_size = info.aper_size << 20;
782 aper_base = info.aper_base;
783 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
784 if (end_pfn > max_low_pfn_mapped) {
785 start_pfn = (aper_base>>PAGE_SHIFT);
786 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
787 }
788
789 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
790 iommu_size = check_iommu_size(info.aper_base, aper_size);
791 iommu_pages = iommu_size >> PAGE_SHIFT;
792
793 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
794 get_order(iommu_pages/8));
795 if (!iommu_gart_bitmap)
796 panic("Cannot allocate iommu bitmap\n");
797
798 #ifdef CONFIG_IOMMU_LEAK
799 if (leak_trace) {
800 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
801 get_order(iommu_pages*sizeof(void *)));
802 if (!iommu_leak_tab)
803 printk(KERN_DEBUG
804 "PCI-DMA: Cannot allocate leak trace area\n");
805 }
806 #endif
807
808 /*
809 * Out of IOMMU space handling.
810 * Reserve some invalid pages at the beginning of the GART.
811 */
812 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
813
814 agp_memory_reserved = iommu_size;
815 printk(KERN_INFO
816 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
817 iommu_size >> 20);
818
819 iommu_start = aper_size - iommu_size;
820 iommu_bus_base = info.aper_base + iommu_start;
821 bad_dma_address = iommu_bus_base;
822 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
823
824 /*
825 * Unmap the IOMMU part of the GART. The alias of the page is
826 * always mapped with cache enabled and there is no full cache
827 * coherency across the GART remapping. The unmapping avoids
828 * automatic prefetches from the CPU allocating cache lines in
829 * there. All CPU accesses are done via the direct mapping to
830 * the backing memory. The GART address is only used by PCI
831 * devices.
832 */
833 set_memory_np((unsigned long)__va(iommu_bus_base),
834 iommu_size >> PAGE_SHIFT);
835 /*
836 * Tricky. The GART table remaps the physical memory range,
837 * so the CPU wont notice potential aliases and if the memory
838 * is remapped to UC later on, we might surprise the PCI devices
839 * with a stray writeout of a cacheline. So play it sure and
840 * do an explicit, full-scale wbinvd() _after_ having marked all
841 * the pages as Not-Present:
842 */
843 wbinvd();
844
845 /*
846 * Try to workaround a bug (thanks to BenH):
847 * Set unmapped entries to a scratch page instead of 0.
848 * Any prefetches that hit unmapped entries won't get an bus abort
849 * then. (P2P bridge may be prefetching on DMA reads).
850 */
851 scratch = get_zeroed_page(GFP_KERNEL);
852 if (!scratch)
853 panic("Cannot allocate iommu scratch page");
854 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
855 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
856 iommu_gatt_base[i] = gart_unmapped_entry;
857
858 flush_gart();
859 dma_ops = &gart_dma_ops;
860 }
861
862 void __init gart_parse_options(char *p)
863 {
864 int arg;
865
866 #ifdef CONFIG_IOMMU_LEAK
867 if (!strncmp(p, "leak", 4)) {
868 leak_trace = 1;
869 p += 4;
870 if (*p == '=')
871 ++p;
872 if (isdigit(*p) && get_option(&p, &arg))
873 iommu_leak_pages = arg;
874 }
875 #endif
876 if (isdigit(*p) && get_option(&p, &arg))
877 iommu_size = arg;
878 if (!strncmp(p, "fullflush", 8))
879 iommu_fullflush = 1;
880 if (!strncmp(p, "nofullflush", 11))
881 iommu_fullflush = 0;
882 if (!strncmp(p, "noagp", 5))
883 no_agp = 1;
884 if (!strncmp(p, "noaperture", 10))
885 fix_aperture = 0;
886 /* duplicated from pci-dma.c */
887 if (!strncmp(p, "force", 5))
888 gart_iommu_aperture_allowed = 1;
889 if (!strncmp(p, "allowed", 7))
890 gart_iommu_aperture_allowed = 1;
891 if (!strncmp(p, "memaper", 7)) {
892 fallback_aper_force = 1;
893 p += 7;
894 if (*p == '=') {
895 ++p;
896 if (get_option(&p, &arg))
897 fallback_aper_order = arg;
898 }
899 }
900 }