]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/amd_gart_64.c
dma-mapping: remove the dma_declare_coherent_memory export
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / amd_gart_64.c
CommitLineData
0920654f 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Dynamic DMA mapping support for AMD Hammer.
05fccb0e 4 *
1da177e4
LT
5 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
6 * This allows to use PCI devices that only support 32bit addresses on systems
05fccb0e 7 * with more than 4GB.
1da177e4 8 *
395cf969 9 * See Documentation/DMA-API-HOWTO.txt for the interface specification.
05fccb0e 10 *
1da177e4
LT
11 * Copyright 2002 Andi Kleen, SuSE Labs.
12 */
13
1da177e4
LT
14#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
d43c36dc 19#include <linux/sched.h>
b17b0153 20#include <linux/sched/debug.h>
1da177e4
LT
21#include <linux/string.h>
22#include <linux/spinlock.h>
23#include <linux/pci.h>
1da177e4
LT
24#include <linux/topology.h>
25#include <linux/interrupt.h>
a66022c4 26#include <linux/bitmap.h>
1eeb66a1 27#include <linux/kdebug.h>
9ee1bea4 28#include <linux/scatterlist.h>
fde9a109 29#include <linux/iommu-helper.h>
f3c6ea1b 30#include <linux/syscore_ops.h>
237a6224 31#include <linux/io.h>
5a0e3ad6 32#include <linux/gfp.h>
60063497 33#include <linux/atomic.h>
ea8c64ac 34#include <linux/dma-direct.h>
1da177e4
LT
35#include <asm/mtrr.h>
36#include <asm/pgtable.h>
37#include <asm/proto.h>
46a7fa27 38#include <asm/iommu.h>
395624fc 39#include <asm/gart.h>
d1163651 40#include <asm/set_memory.h>
17a941d8
MBY
41#include <asm/swiotlb.h>
42#include <asm/dma.h>
23ac4ae8 43#include <asm/amd_nb.h>
338bac52 44#include <asm/x86_init.h>
22e6daf4 45#include <asm/iommu_table.h>
1da177e4 46
79da0874 47static unsigned long iommu_bus_base; /* GART remapping area (physical) */
05fccb0e 48static unsigned long iommu_size; /* size of remapping area bytes */
1da177e4
LT
49static unsigned long iommu_pages; /* .. and in pages */
50
05fccb0e 51static u32 *iommu_gatt_base; /* Remapping table */
1da177e4 52
05fccb0e
IM
53/*
54 * If this is disabled the IOMMU will use an optimized flushing strategy
55 * of only flushing when an mapping is reused. With it true the GART is
56 * flushed for every mapping. Problem is that doing the lazy flush seems
57 * to trigger bugs with some popular PCI cards, in particular 3ware (but
58 * has been also also seen with Qlogic at least).
59 */
c854c919 60static int iommu_fullflush = 1;
1da177e4 61
05fccb0e 62/* Allocation bitmap for the remapping area: */
1da177e4 63static DEFINE_SPINLOCK(iommu_bitmap_lock);
05fccb0e
IM
64/* Guarded by iommu_bitmap_lock: */
65static unsigned long *iommu_gart_bitmap;
1da177e4 66
05fccb0e 67static u32 gart_unmapped_entry;
1da177e4
LT
68
69#define GPTE_VALID 1
70#define GPTE_COHERENT 2
71#define GPTE_ENCODE(x) \
72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
73#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
74
1da177e4
LT
75#ifdef CONFIG_AGP
76#define AGPEXTERN extern
77#else
78#define AGPEXTERN
79#endif
80
665d3e2a
JR
81/* GART can only remap to physical addresses < 1TB */
82#define GART_MAX_PHYS_ADDR (1ULL << 40)
83
1da177e4
LT
84/* backdoor interface to AGP driver */
85AGPEXTERN int agp_memory_reserved;
86AGPEXTERN __u32 *agp_gatt_table;
87
88static unsigned long next_bit; /* protected by iommu_bitmap_lock */
3610f211 89static bool need_flush; /* global flush state. set for each gart wrap */
1da177e4 90
7b22ff53
FT
91static unsigned long alloc_iommu(struct device *dev, int size,
92 unsigned long align_mask)
05fccb0e 93{
1da177e4 94 unsigned long offset, flags;
fde9a109
FT
95 unsigned long boundary_size;
96 unsigned long base_index;
97
98 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
99 PAGE_SIZE) >> PAGE_SHIFT;
123bf0e2 100 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
fde9a109 101 PAGE_SIZE) >> PAGE_SHIFT;
1da177e4 102
05fccb0e 103 spin_lock_irqsave(&iommu_bitmap_lock, flags);
fde9a109 104 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
7b22ff53 105 size, base_index, boundary_size, align_mask);
1da177e4 106 if (offset == -1) {
3610f211 107 need_flush = true;
fde9a109 108 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
7b22ff53
FT
109 size, base_index, boundary_size,
110 align_mask);
1da177e4 111 }
05fccb0e 112 if (offset != -1) {
05fccb0e
IM
113 next_bit = offset+size;
114 if (next_bit >= iommu_pages) {
1da177e4 115 next_bit = 0;
3610f211 116 need_flush = true;
05fccb0e
IM
117 }
118 }
1da177e4 119 if (iommu_fullflush)
3610f211 120 need_flush = true;
05fccb0e
IM
121 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
122
1da177e4 123 return offset;
05fccb0e 124}
1da177e4
LT
125
126static void free_iommu(unsigned long offset, int size)
05fccb0e 127{
1da177e4 128 unsigned long flags;
05fccb0e 129
1da177e4 130 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a66022c4 131 bitmap_clear(iommu_gart_bitmap, offset, size);
70d7d357
JR
132 if (offset >= next_bit)
133 next_bit = offset + size;
1da177e4 134 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 135}
1da177e4 136
05fccb0e 137/*
1da177e4
LT
138 * Use global flush state to avoid races with multiple flushers.
139 */
a32073bf 140static void flush_gart(void)
05fccb0e 141{
1da177e4 142 unsigned long flags;
05fccb0e 143
1da177e4 144 spin_lock_irqsave(&iommu_bitmap_lock, flags);
a32073bf 145 if (need_flush) {
eec1d4fa 146 amd_flush_garts();
3610f211 147 need_flush = false;
05fccb0e 148 }
1da177e4 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
05fccb0e 150}
1da177e4 151
1da177e4 152#ifdef CONFIG_IOMMU_LEAK
1da177e4 153/* Debugging aid for drivers that don't free their IOMMU tables */
79da0874 154static void dump_leak(void)
1da177e4 155{
05fccb0e
IM
156 static int dump;
157
19c1a6f5 158 if (dump)
05fccb0e 159 return;
1da177e4 160 dump = 1;
05fccb0e 161
19c1a6f5
FT
162 show_stack(NULL, NULL);
163 debug_dma_dump_mappings(NULL);
1da177e4 164}
1da177e4
LT
165#endif
166
17a941d8 167static void iommu_full(struct device *dev, size_t size, int dir)
1da177e4 168{
05fccb0e 169 /*
1da177e4
LT
170 * Ran out of IOMMU space for this operation. This is very bad.
171 * Unfortunately the drivers cannot handle this operation properly.
05fccb0e 172 * Return some non mapped prereserved space in the aperture and
1da177e4
LT
173 * let the Northbridge deal with it. This will result in garbage
174 * in the IO operation. When the size exceeds the prereserved space
05fccb0e 175 * memory corruption will occur or random memory will be DMAed
1da177e4 176 * out. Hopefully no network devices use single mappings that big.
05fccb0e
IM
177 */
178
fc3a8828 179 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
1da177e4 180#ifdef CONFIG_IOMMU_LEAK
05fccb0e 181 dump_leak();
1da177e4 182#endif
05fccb0e 183}
1da177e4 184
05fccb0e
IM
185static inline int
186need_iommu(struct device *dev, unsigned long addr, size_t size)
187{
a4c2baa6 188 return force_iommu || !dma_capable(dev, addr, size);
1da177e4
LT
189}
190
05fccb0e
IM
191static inline int
192nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
193{
a4c2baa6 194 return !dma_capable(dev, addr, size);
1da177e4
LT
195}
196
197/* Map a single continuous physical area into the IOMMU.
198 * Caller needs to check if the iommu is needed and flush.
199 */
17a941d8 200static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
7b22ff53 201 size_t size, int dir, unsigned long align_mask)
05fccb0e 202{
1477b8e5 203 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
665d3e2a 204 unsigned long iommu_page;
1da177e4 205 int i;
05fccb0e 206
665d3e2a 207 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
9e8aa6b5 208 return DMA_MAPPING_ERROR;
665d3e2a
JR
209
210 iommu_page = alloc_iommu(dev, npages, align_mask);
1da177e4
LT
211 if (iommu_page == -1) {
212 if (!nonforced_iommu(dev, phys_mem, size))
05fccb0e 213 return phys_mem;
1da177e4
LT
214 if (panic_on_overflow)
215 panic("dma_map_area overflow %lu bytes\n", size);
17a941d8 216 iommu_full(dev, size, dir);
9e8aa6b5 217 return DMA_MAPPING_ERROR;
1da177e4
LT
218 }
219
220 for (i = 0; i < npages; i++) {
221 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
1da177e4
LT
222 phys_mem += PAGE_SIZE;
223 }
224 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
225}
226
227/* Map a single area into the IOMMU */
052aedbf
FT
228static dma_addr_t gart_map_page(struct device *dev, struct page *page,
229 unsigned long offset, size_t size,
230 enum dma_data_direction dir,
00085f1e 231 unsigned long attrs)
1da177e4 232{
2be62149 233 unsigned long bus;
052aedbf 234 phys_addr_t paddr = page_to_phys(page) + offset;
1da177e4 235
2be62149
IM
236 if (!need_iommu(dev, paddr, size))
237 return paddr;
1da177e4 238
7b22ff53
FT
239 bus = dma_map_area(dev, paddr, size, dir, 0);
240 flush_gart();
05fccb0e
IM
241
242 return bus;
17a941d8
MBY
243}
244
7c2d9cd2
JM
245/*
246 * Free a DMA mapping.
247 */
052aedbf
FT
248static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
249 size_t size, enum dma_data_direction dir,
00085f1e 250 unsigned long attrs)
7c2d9cd2
JM
251{
252 unsigned long iommu_page;
253 int npages;
254 int i;
255
06f55fd2
CH
256 if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
257 return;
258
259 /*
260 * This driver will not always use a GART mapping, but might have
261 * created a direct mapping instead. If that is the case there is
262 * nothing to unmap here.
263 */
264 if (dma_addr < iommu_bus_base ||
7c2d9cd2
JM
265 dma_addr >= iommu_bus_base + iommu_size)
266 return;
05fccb0e 267
7c2d9cd2 268 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
1477b8e5 269 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
7c2d9cd2
JM
270 for (i = 0; i < npages; i++) {
271 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
7c2d9cd2
JM
272 }
273 free_iommu(iommu_page, npages);
274}
275
17a941d8
MBY
276/*
277 * Wrapper for pci_unmap_single working with scatterlists.
278 */
160c1d8e 279static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 280 enum dma_data_direction dir, unsigned long attrs)
17a941d8 281{
9ee1bea4 282 struct scatterlist *s;
17a941d8
MBY
283 int i;
284
9ee1bea4 285 for_each_sg(sg, s, nents, i) {
60b08c67 286 if (!s->dma_length || !s->length)
17a941d8 287 break;
00085f1e 288 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
17a941d8
MBY
289 }
290}
1da177e4
LT
291
292/* Fallback for dma_map_sg in case of overflow */
293static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
294 int nents, int dir)
295{
9ee1bea4 296 struct scatterlist *s;
1da177e4
LT
297 int i;
298
299#ifdef CONFIG_IOMMU_DEBUG
123bf0e2 300 pr_debug("dma_map_sg overflow\n");
1da177e4
LT
301#endif
302
9ee1bea4 303 for_each_sg(sg, s, nents, i) {
58b053e4 304 unsigned long addr = sg_phys(s);
05fccb0e
IM
305
306 if (nonforced_iommu(dev, addr, s->length)) {
7b22ff53 307 addr = dma_map_area(dev, addr, s->length, dir, 0);
9e8aa6b5 308 if (addr == DMA_MAPPING_ERROR) {
05fccb0e 309 if (i > 0)
00085f1e 310 gart_unmap_sg(dev, sg, i, dir, 0);
05fccb0e 311 nents = 0;
1da177e4
LT
312 sg[0].dma_length = 0;
313 break;
314 }
315 }
316 s->dma_address = addr;
317 s->dma_length = s->length;
318 }
a32073bf 319 flush_gart();
05fccb0e 320
1da177e4
LT
321 return nents;
322}
323
324/* Map multiple scatterlist entries continuous into the first. */
fde9a109
FT
325static int __dma_map_cont(struct device *dev, struct scatterlist *start,
326 int nelems, struct scatterlist *sout,
327 unsigned long pages)
1da177e4 328{
7b22ff53 329 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
05fccb0e 330 unsigned long iommu_page = iommu_start;
9ee1bea4 331 struct scatterlist *s;
1da177e4
LT
332 int i;
333
334 if (iommu_start == -1)
335 return -1;
9ee1bea4
JA
336
337 for_each_sg(start, s, nelems, i) {
1da177e4
LT
338 unsigned long pages, addr;
339 unsigned long phys_addr = s->dma_address;
05fccb0e 340
9ee1bea4
JA
341 BUG_ON(s != start && s->offset);
342 if (s == start) {
1da177e4
LT
343 sout->dma_address = iommu_bus_base;
344 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
345 sout->dma_length = s->length;
05fccb0e
IM
346 } else {
347 sout->dma_length += s->length;
1da177e4
LT
348 }
349
350 addr = phys_addr;
1477b8e5 351 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
05fccb0e
IM
352 while (pages--) {
353 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
1da177e4
LT
354 addr += PAGE_SIZE;
355 iommu_page++;
0d541064 356 }
05fccb0e
IM
357 }
358 BUG_ON(iommu_page - iommu_start != pages);
359
1da177e4
LT
360 return 0;
361}
362
05fccb0e 363static inline int
fde9a109
FT
364dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
365 struct scatterlist *sout, unsigned long pages, int need)
1da177e4 366{
9ee1bea4
JA
367 if (!need) {
368 BUG_ON(nelems != 1);
e88a39de 369 sout->dma_address = start->dma_address;
9ee1bea4 370 sout->dma_length = start->length;
1da177e4 371 return 0;
9ee1bea4 372 }
fde9a109 373 return __dma_map_cont(dev, start, nelems, sout, pages);
1da177e4 374}
05fccb0e 375
1da177e4
LT
376/*
377 * DMA map all entries in a scatterlist.
05fccb0e 378 * Merge chunks that have page aligned sizes into a continuous mapping.
1da177e4 379 */
160c1d8e 380static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 381 enum dma_data_direction dir, unsigned long attrs)
1da177e4 382{
9ee1bea4 383 struct scatterlist *s, *ps, *start_sg, *sgmap;
05fccb0e
IM
384 int need = 0, nextneed, i, out, start;
385 unsigned long pages = 0;
42d00284
FT
386 unsigned int seg_size;
387 unsigned int max_seg_size;
1da177e4 388
05fccb0e 389 if (nents == 0)
1da177e4
LT
390 return 0;
391
123bf0e2
IM
392 out = 0;
393 start = 0;
394 start_sg = sg;
395 sgmap = sg;
396 seg_size = 0;
397 max_seg_size = dma_get_max_seg_size(dev);
398 ps = NULL; /* shut up gcc */
399
9ee1bea4 400 for_each_sg(sg, s, nents, i) {
58b053e4 401 dma_addr_t addr = sg_phys(s);
05fccb0e 402
1da177e4 403 s->dma_address = addr;
05fccb0e 404 BUG_ON(s->length == 0);
1da177e4 405
05fccb0e 406 nextneed = need_iommu(dev, addr, s->length);
1da177e4
LT
407
408 /* Handle the previous not yet processed entries */
409 if (i > start) {
05fccb0e
IM
410 /*
411 * Can only merge when the last chunk ends on a
412 * page boundary and the new one doesn't have an
413 * offset.
414 */
1da177e4 415 if (!iommu_merge || !nextneed || !need || s->offset ||
42d00284 416 (s->length + seg_size > max_seg_size) ||
9ee1bea4 417 (ps->offset + ps->length) % PAGE_SIZE) {
fde9a109
FT
418 if (dma_map_cont(dev, start_sg, i - start,
419 sgmap, pages, need) < 0)
1da177e4
LT
420 goto error;
421 out++;
123bf0e2
IM
422
423 seg_size = 0;
424 sgmap = sg_next(sgmap);
425 pages = 0;
426 start = i;
427 start_sg = s;
1da177e4
LT
428 }
429 }
430
42d00284 431 seg_size += s->length;
1da177e4 432 need = nextneed;
1477b8e5 433 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
9ee1bea4 434 ps = s;
1da177e4 435 }
fde9a109 436 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
1da177e4
LT
437 goto error;
438 out++;
a32073bf 439 flush_gart();
9ee1bea4
JA
440 if (out < nents) {
441 sgmap = sg_next(sgmap);
442 sgmap->dma_length = 0;
443 }
1da177e4
LT
444 return out;
445
446error:
a32073bf 447 flush_gart();
00085f1e 448 gart_unmap_sg(dev, sg, out, dir, 0);
05fccb0e 449
a1002a48
KV
450 /* When it was forced or merged try again in a dumb way */
451 if (force_iommu || iommu_merge) {
452 out = dma_map_sg_nonforce(dev, sg, nents, dir);
453 if (out > 0)
454 return out;
455 }
1da177e4
LT
456 if (panic_on_overflow)
457 panic("dma_map_sg: overflow on %lu pages\n", pages);
05fccb0e 458
17a941d8 459 iommu_full(dev, pages << PAGE_SHIFT, dir);
9ee1bea4 460 for_each_sg(sg, s, nents, i)
9e8aa6b5 461 s->dma_address = DMA_MAPPING_ERROR;
1da177e4 462 return 0;
05fccb0e 463}
1da177e4 464
94581094
JR
465/* allocate and map a coherent mapping */
466static void *
467gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
00085f1e 468 gfp_t flag, unsigned long attrs)
94581094 469{
51c7eeba
CH
470 void *vaddr;
471
bc3ec75d 472 vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
51c7eeba
CH
473 if (!vaddr ||
474 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
475 return vaddr;
94581094 476
51c7eeba
CH
477 *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
478 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
479 flush_gart();
9e8aa6b5 480 if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
51c7eeba
CH
481 goto out_free;
482 return vaddr;
483out_free:
bc3ec75d 484 dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
94581094
JR
485 return NULL;
486}
487
43a5a5a0
JR
488/* free a coherent mapping */
489static void
490gart_free_coherent(struct device *dev, size_t size, void *vaddr,
00085f1e 491 dma_addr_t dma_addr, unsigned long attrs)
43a5a5a0 492{
00085f1e 493 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
bc3ec75d 494 dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
43a5a5a0
JR
495}
496
17a941d8 497static int no_agp;
1da177e4
LT
498
499static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
05fccb0e
IM
500{
501 unsigned long a;
502
503 if (!iommu_size) {
504 iommu_size = aper_size;
505 if (!no_agp)
506 iommu_size /= 2;
507 }
508
509 a = aper + iommu_size;
31422c51 510 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
1da177e4 511
05fccb0e 512 if (iommu_size < 64*1024*1024) {
123bf0e2 513 pr_warning(
05fccb0e
IM
514 "PCI-DMA: Warning: Small IOMMU %luMB."
515 " Consider increasing the AGP aperture in BIOS\n",
516 iommu_size >> 20);
517 }
518
1da177e4 519 return iommu_size;
05fccb0e 520}
1da177e4 521
05fccb0e
IM
522static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
523{
524 unsigned aper_size = 0, aper_base_32, aper_order;
1da177e4 525 u64 aper_base;
1da177e4 526
3bb6fbf9
PM
527 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
528 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
05fccb0e 529 aper_order = (aper_order >> 1) & 7;
1da177e4 530
05fccb0e 531 aper_base = aper_base_32 & 0x7fff;
1da177e4
LT
532 aper_base <<= 25;
533
05fccb0e
IM
534 aper_size = (32 * 1024 * 1024) << aper_order;
535 if (aper_base + aper_size > 0x100000000UL || !aper_size)
1da177e4
LT
536 aper_base = 0;
537
538 *size = aper_size;
539 return aper_base;
05fccb0e 540}
1da177e4 541
6703f6d1
RW
542static void enable_gart_translations(void)
543{
544 int i;
545
9653a5c7 546 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
547 return;
548
9653a5c7
HR
549 for (i = 0; i < amd_nb_num(); i++) {
550 struct pci_dev *dev = node_to_amd_nb(i)->misc;
6703f6d1
RW
551
552 enable_gart_translation(dev, __pa(agp_gatt_table));
553 }
4b83873d
JR
554
555 /* Flush the GART-TLB to remove stale entries */
eec1d4fa 556 amd_flush_garts();
6703f6d1
RW
557}
558
559/*
560 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
561 * resume in the same way as they are handled in gart_iommu_hole_init().
562 */
563static bool fix_up_north_bridges;
564static u32 aperture_order;
565static u32 aperture_alloc;
566
567void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
568{
569 fix_up_north_bridges = true;
570 aperture_order = aper_order;
571 aperture_alloc = aper_alloc;
572}
573
f3c6ea1b 574static void gart_fixup_northbridges(void)
cd76374e 575{
123bf0e2 576 int i;
6703f6d1 577
123bf0e2
IM
578 if (!fix_up_north_bridges)
579 return;
6703f6d1 580
9653a5c7 581 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
582 return;
583
123bf0e2 584 pr_info("PCI-DMA: Restoring GART aperture settings\n");
6703f6d1 585
9653a5c7
HR
586 for (i = 0; i < amd_nb_num(); i++) {
587 struct pci_dev *dev = node_to_amd_nb(i)->misc;
6703f6d1 588
123bf0e2
IM
589 /*
590 * Don't enable translations just yet. That is the next
591 * step. Restore the pre-suspend aperture settings.
592 */
260133ab 593 gart_set_size_and_enable(dev, aperture_order);
123bf0e2 594 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
6703f6d1 595 }
123bf0e2
IM
596}
597
f3c6ea1b 598static void gart_resume(void)
123bf0e2
IM
599{
600 pr_info("PCI-DMA: Resuming GART IOMMU\n");
601
f3c6ea1b 602 gart_fixup_northbridges();
6703f6d1
RW
603
604 enable_gart_translations();
cd76374e
PM
605}
606
f3c6ea1b 607static struct syscore_ops gart_syscore_ops = {
123bf0e2 608 .resume = gart_resume,
cd76374e
PM
609
610};
611
05fccb0e 612/*
1da177e4 613 * Private Northbridge GATT initialization in case we cannot use the
05fccb0e 614 * AGP driver for some reason.
1da177e4 615 */
eec1d4fa 616static __init int init_amd_gatt(struct agp_kern_info *info)
05fccb0e
IM
617{
618 unsigned aper_size, gatt_size, new_aper_size;
619 unsigned aper_base, new_aper_base;
1da177e4
LT
620 struct pci_dev *dev;
621 void *gatt;
f3c6ea1b 622 int i;
a32073bf 623
123bf0e2
IM
624 pr_info("PCI-DMA: Disabling AGP.\n");
625
1da177e4 626 aper_size = aper_base = info->aper_size = 0;
a32073bf 627 dev = NULL;
9653a5c7
HR
628 for (i = 0; i < amd_nb_num(); i++) {
629 dev = node_to_amd_nb(i)->misc;
05fccb0e
IM
630 new_aper_base = read_aperture(dev, &new_aper_size);
631 if (!new_aper_base)
632 goto nommu;
633
634 if (!aper_base) {
1da177e4
LT
635 aper_size = new_aper_size;
636 aper_base = new_aper_base;
05fccb0e
IM
637 }
638 if (aper_size != new_aper_size || aper_base != new_aper_base)
1da177e4
LT
639 goto nommu;
640 }
641 if (!aper_base)
05fccb0e 642 goto nommu;
123bf0e2 643
1da177e4 644 info->aper_base = aper_base;
05fccb0e 645 info->aper_size = aper_size >> 20;
1da177e4 646
05fccb0e 647 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
0114267b
JR
648 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
649 get_order(gatt_size));
05fccb0e 650 if (!gatt)
cf6387da 651 panic("Cannot allocate GATT table");
6d238cc4 652 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
cf6387da 653 panic("Could not set GART PTEs to uncacheable pages");
cf6387da 654
1da177e4 655 agp_gatt_table = gatt;
a32073bf 656
f3c6ea1b 657 register_syscore_ops(&gart_syscore_ops);
6703f6d1 658
a32073bf 659 flush_gart();
05fccb0e 660
123bf0e2 661 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
05fccb0e 662 aper_base, aper_size>>10);
7ab073b6 663
1da177e4
LT
664 return 0;
665
666 nommu:
05fccb0e 667 /* Should not happen anymore */
123bf0e2 668 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
ad361c98 669 "falling back to iommu=soft.\n");
05fccb0e
IM
670 return -1;
671}
1da177e4 672
5299709d 673static const struct dma_map_ops gart_dma_ops = {
05fccb0e
IM
674 .map_sg = gart_map_sg,
675 .unmap_sg = gart_unmap_sg,
052aedbf
FT
676 .map_page = gart_map_page,
677 .unmap_page = gart_unmap_page,
baa676fc
AP
678 .alloc = gart_alloc_coherent,
679 .free = gart_free_coherent,
f9f3232a
CH
680 .mmap = dma_common_mmap,
681 .get_sgtable = dma_common_get_sgtable,
fec777c3 682 .dma_supported = dma_direct_supported,
17a941d8
MBY
683};
684
338bac52 685static void gart_iommu_shutdown(void)
bc2cea6a
YL
686{
687 struct pci_dev *dev;
688 int i;
689
f3eee542
YL
690 /* don't shutdown it if there is AGP installed */
691 if (!no_agp)
bc2cea6a
YL
692 return;
693
9653a5c7 694 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
695 return;
696
9653a5c7 697 for (i = 0; i < amd_nb_num(); i++) {
05fccb0e 698 u32 ctl;
bc2cea6a 699
9653a5c7 700 dev = node_to_amd_nb(i)->misc;
3bb6fbf9 701 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
bc2cea6a 702
3bb6fbf9 703 ctl &= ~GARTEN;
bc2cea6a 704
3bb6fbf9 705 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
05fccb0e 706 }
bc2cea6a
YL
707}
708
de957628 709int __init gart_iommu_init(void)
05fccb0e 710{
1da177e4 711 struct agp_kern_info info;
1da177e4 712 unsigned long iommu_start;
d99e9016
YL
713 unsigned long aper_base, aper_size;
714 unsigned long start_pfn, end_pfn;
1da177e4 715 unsigned long scratch;
1da177e4 716
9653a5c7 717 if (!amd_nb_has_feature(AMD_NB_GART))
de957628 718 return 0;
a32073bf 719
1da177e4 720#ifndef CONFIG_AGP_AMD64
05fccb0e 721 no_agp = 1;
1da177e4
LT
722#else
723 /* Makefile puts PCI initialization via subsys_initcall first. */
eec1d4fa 724 /* Add other AMD AGP bridge drivers here */
05fccb0e
IM
725 no_agp = no_agp ||
726 (agp_amd64_init() < 0) ||
1da177e4 727 (agp_copy_info(agp_bridge, &info) < 0);
05fccb0e 728#endif
1da177e4 729
1da177e4 730 if (no_iommu ||
c987d12f 731 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
0440d4c0 732 !gart_iommu_aperture ||
eec1d4fa 733 (no_agp && init_amd_gatt(&info) < 0)) {
c987d12f 734 if (max_pfn > MAX_DMA32_PFN) {
123bf0e2
IM
735 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
736 pr_warning("falling back to iommu=soft.\n");
5b7b644c 737 }
de957628 738 return 0;
1da177e4
LT
739 }
740
d99e9016 741 /* need to map that range */
123bf0e2
IM
742 aper_size = info.aper_size << 20;
743 aper_base = info.aper_base;
744 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
745
5101730c
YL
746 start_pfn = PFN_DOWN(aper_base);
747 if (!pfn_range_is_mapped(start_pfn, end_pfn))
d99e9016 748 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
d99e9016 749
123bf0e2 750 pr_info("PCI-DMA: using GART IOMMU.\n");
05fccb0e
IM
751 iommu_size = check_iommu_size(info.aper_base, aper_size);
752 iommu_pages = iommu_size >> PAGE_SHIFT;
753
0114267b 754 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
05fccb0e
IM
755 get_order(iommu_pages/8));
756 if (!iommu_gart_bitmap)
757 panic("Cannot allocate iommu bitmap\n");
1da177e4 758
123bf0e2 759 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
05fccb0e 760 iommu_size >> 20);
1da177e4 761
123bf0e2
IM
762 agp_memory_reserved = iommu_size;
763 iommu_start = aper_size - iommu_size;
764 iommu_bus_base = info.aper_base + iommu_start;
123bf0e2 765 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
1da177e4 766
05fccb0e 767 /*
1da177e4
LT
768 * Unmap the IOMMU part of the GART. The alias of the page is
769 * always mapped with cache enabled and there is no full cache
770 * coherency across the GART remapping. The unmapping avoids
771 * automatic prefetches from the CPU allocating cache lines in
772 * there. All CPU accesses are done via the direct mapping to
773 * the backing memory. The GART address is only used by PCI
05fccb0e 774 * devices.
1da177e4 775 */
28d6ee41
AK
776 set_memory_np((unsigned long)__va(iommu_bus_base),
777 iommu_size >> PAGE_SHIFT);
184652eb
IM
778 /*
779 * Tricky. The GART table remaps the physical memory range,
780 * so the CPU wont notice potential aliases and if the memory
781 * is remapped to UC later on, we might surprise the PCI devices
782 * with a stray writeout of a cacheline. So play it sure and
783 * do an explicit, full-scale wbinvd() _after_ having marked all
784 * the pages as Not-Present:
785 */
786 wbinvd();
123bf0e2 787
fe2245c9
ML
788 /*
789 * Now all caches are flushed and we can safely enable
790 * GART hardware. Doing it early leaves the possibility
791 * of stale cache entries that can lead to GART PTE
792 * errors.
793 */
794 enable_gart_translations();
1da177e4 795
05fccb0e 796 /*
fa3d319a 797 * Try to workaround a bug (thanks to BenH):
05fccb0e 798 * Set unmapped entries to a scratch page instead of 0.
1da177e4 799 * Any prefetches that hit unmapped entries won't get an bus abort
fa3d319a 800 * then. (P2P bridge may be prefetching on DMA reads).
1da177e4 801 */
05fccb0e
IM
802 scratch = get_zeroed_page(GFP_KERNEL);
803 if (!scratch)
1da177e4
LT
804 panic("Cannot allocate iommu scratch page");
805 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
1da177e4 806
a32073bf 807 flush_gart();
17a941d8 808 dma_ops = &gart_dma_ops;
338bac52 809 x86_platform.iommu_shutdown = gart_iommu_shutdown;
75f1cdf1 810 swiotlb = 0;
de957628
FT
811
812 return 0;
05fccb0e 813}
1da177e4 814
43999d9e 815void __init gart_parse_options(char *p)
17a941d8
MBY
816{
817 int arg;
818
17a941d8
MBY
819 if (isdigit(*p) && get_option(&p, &arg))
820 iommu_size = arg;
41855b77 821 if (!strncmp(p, "fullflush", 9))
17a941d8 822 iommu_fullflush = 1;
05fccb0e 823 if (!strncmp(p, "nofullflush", 11))
17a941d8 824 iommu_fullflush = 0;
05fccb0e 825 if (!strncmp(p, "noagp", 5))
17a941d8 826 no_agp = 1;
05fccb0e 827 if (!strncmp(p, "noaperture", 10))
17a941d8
MBY
828 fix_aperture = 0;
829 /* duplicated from pci-dma.c */
05fccb0e 830 if (!strncmp(p, "force", 5))
0440d4c0 831 gart_iommu_aperture_allowed = 1;
05fccb0e 832 if (!strncmp(p, "allowed", 7))
0440d4c0 833 gart_iommu_aperture_allowed = 1;
17a941d8
MBY
834 if (!strncmp(p, "memaper", 7)) {
835 fallback_aper_force = 1;
836 p += 7;
837 if (*p == '=') {
838 ++p;
839 if (get_option(&p, &arg))
840 fallback_aper_order = arg;
841 }
842 }
843}
22e6daf4 844IOMMU_INIT_POST(gart_iommu_hole_init);