]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/sparc64/kernel/pci_iommu.c
[SPARC]: Fix comment typo in smp4m_blackbox_current().
[mirror_ubuntu-bionic-kernel.git] / arch / sparc64 / kernel / pci_iommu.c
CommitLineData
16ce82d8 1/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
1da177e4 2 *
16ce82d8 3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
4dbc30fb 10#include <linux/delay.h>
1da177e4
LT
11
12#include <asm/pbm.h>
13
14#include "iommu_common.h"
15
16#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
18
19/* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
22 */
23#define pci_iommu_read(__reg) \
24({ u64 __ret; \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
26 : "=r" (__ret) \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
28 : "memory"); \
29 __ret; \
30})
31#define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
33 : /* no outputs */ \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
36
37/* Must be invoked under the IOMMU lock. */
16ce82d8 38static void __iommu_flushall(struct iommu *iommu)
1da177e4
LT
39{
40 unsigned long tag;
41 int entry;
42
43 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
44 for (entry = 0; entry < 16; entry++) {
45 pci_iommu_write(tag, 0);
46 tag += 8;
47 }
48
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu->write_complete_reg);
1da177e4
LT
51}
52
53#define IOPTE_CONSISTENT(CTX) \
54 (IOPTE_VALID | IOPTE_CACHE | \
55 (((CTX) << 47) & IOPTE_CONTEXT))
56
57#define IOPTE_STREAMING(CTX) \
58 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
59
60/* Existing mappings are never marked invalid, instead they
61 * are pointed to a dummy page.
62 */
63#define IOPTE_IS_DUMMY(iommu, iopte) \
64 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
65
16ce82d8 66static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
1da177e4
LT
67{
68 unsigned long val = iopte_val(*iopte);
69
70 val &= ~IOPTE_PAGE;
71 val |= iommu->dummy_page_pa;
72
73 iopte_val(*iopte) = val;
74}
75
688cb30b 76/* Based largely upon the ppc64 iommu allocator. */
16ce82d8 77static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
688cb30b 78{
9b3627f3 79 struct iommu_arena *arena = &iommu->arena;
688cb30b
DM
80 unsigned long n, i, start, end, limit;
81 int pass;
82
83 limit = arena->limit;
84 start = arena->hint;
85 pass = 0;
86
87again:
88 n = find_next_zero_bit(arena->map, limit, start);
89 end = n + npages;
90 if (unlikely(end >= limit)) {
91 if (likely(pass < 1)) {
92 limit = start;
93 start = 0;
94 __iommu_flushall(iommu);
95 pass++;
96 goto again;
97 } else {
98 /* Scanned the whole thing, give up. */
99 return -1;
100 }
101 }
102
103 for (i = n; i < end; i++) {
104 if (test_bit(i, arena->map)) {
105 start = i + 1;
106 goto again;
107 }
108 }
109
110 for (i = n; i < end; i++)
111 __set_bit(i, arena->map);
112
113 arena->hint = end;
114
115 return n;
116}
117
9b3627f3 118static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
688cb30b
DM
119{
120 unsigned long i;
121
122 for (i = base; i < (base + npages); i++)
123 __clear_bit(i, arena->map);
124}
125
16ce82d8 126void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
1da177e4 127{
688cb30b
DM
128 unsigned long i, tsbbase, order, sz, num_tsb_entries;
129
130 num_tsb_entries = tsbsize / sizeof(iopte_t);
51e85136
DM
131
132 /* Setup initial software IOMMU state. */
133 spin_lock_init(&iommu->lock);
134 iommu->ctx_lowest_free = 1;
135 iommu->page_table_map_base = dma_offset;
136 iommu->dma_addr_mask = dma_addr_mask;
137
688cb30b
DM
138 /* Allocate and initialize the free area map. */
139 sz = num_tsb_entries / 8;
140 sz = (sz + 7UL) & ~7UL;
9132983a 141 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
688cb30b
DM
142 if (!iommu->arena.map) {
143 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
51e85136 144 prom_halt();
51e85136 145 }
688cb30b 146 iommu->arena.limit = num_tsb_entries;
1da177e4 147
51e85136
DM
148 /* Allocate and initialize the dummy page which we
149 * set inactive IO PTEs to point to.
150 */
151 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
152 if (!iommu->dummy_page) {
153 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
154 prom_halt();
155 }
156 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
157 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
158
159 /* Now allocate and setup the IOMMU page table itself. */
160 order = get_order(tsbsize);
161 tsbbase = __get_free_pages(GFP_KERNEL, order);
162 if (!tsbbase) {
163 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
164 prom_halt();
165 }
166 iommu->page_table = (iopte_t *)tsbbase;
1da177e4 167
688cb30b 168 for (i = 0; i < num_tsb_entries; i++)
1da177e4
LT
169 iopte_make_dummy(iommu, &iommu->page_table[i]);
170}
171
16ce82d8 172static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
1da177e4 173{
688cb30b 174 long entry;
1da177e4 175
688cb30b
DM
176 entry = pci_arena_alloc(iommu, npages);
177 if (unlikely(entry < 0))
178 return NULL;
1da177e4 179
688cb30b 180 return iommu->page_table + entry;
1da177e4
LT
181}
182
16ce82d8 183static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
1da177e4 184{
688cb30b 185 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
1da177e4
LT
186}
187
16ce82d8 188static int iommu_alloc_ctx(struct iommu *iommu)
7c963ad1
DM
189{
190 int lowest = iommu->ctx_lowest_free;
191 int sz = IOMMU_NUM_CTXS - lowest;
192 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
193
194 if (unlikely(n == sz)) {
195 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
196 if (unlikely(n == lowest)) {
197 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
198 n = 0;
199 }
200 }
201 if (n)
202 __set_bit(n, iommu->ctx_bitmap);
203
204 return n;
205}
206
16ce82d8 207static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
7c963ad1
DM
208{
209 if (likely(ctx)) {
210 __clear_bit(ctx, iommu->ctx_bitmap);
211 if (ctx < iommu->ctx_lowest_free)
212 iommu->ctx_lowest_free = ctx;
213 }
214}
215
1da177e4
LT
216/* Allocate and map kernel buffer of size SIZE using consistent mode
217 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
218 * successful and set *DMA_ADDRP to the PCI side dma address.
219 */
42f14237 220static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
1da177e4 221{
16ce82d8 222 struct iommu *iommu;
1da177e4 223 iopte_t *iopte;
688cb30b 224 unsigned long flags, order, first_page;
1da177e4
LT
225 void *ret;
226 int npages;
227
228 size = IO_PAGE_ALIGN(size);
229 order = get_order(size);
230 if (order >= 10)
231 return NULL;
232
42f14237 233 first_page = __get_free_pages(gfp, order);
1da177e4
LT
234 if (first_page == 0UL)
235 return NULL;
236 memset((char *)first_page, 0, PAGE_SIZE << order);
237
a2fb23af 238 iommu = pdev->dev.archdata.iommu;
1da177e4
LT
239
240 spin_lock_irqsave(&iommu->lock, flags);
688cb30b
DM
241 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
242 spin_unlock_irqrestore(&iommu->lock, flags);
243
244 if (unlikely(iopte == NULL)) {
1da177e4
LT
245 free_pages(first_page, order);
246 return NULL;
247 }
248
249 *dma_addrp = (iommu->page_table_map_base +
250 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
251 ret = (void *) first_page;
252 npages = size >> IO_PAGE_SHIFT;
1da177e4
LT
253 first_page = __pa(first_page);
254 while (npages--) {
688cb30b 255 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
1da177e4
LT
256 IOPTE_WRITE |
257 (first_page & IOPTE_PAGE));
258 iopte++;
259 first_page += IO_PAGE_SIZE;
260 }
261
1da177e4
LT
262 return ret;
263}
264
265/* Free and unmap a consistent DMA translation. */
8f6a93a1 266static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
1da177e4 267{
16ce82d8 268 struct iommu *iommu;
1da177e4 269 iopte_t *iopte;
688cb30b 270 unsigned long flags, order, npages;
1da177e4
LT
271
272 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
a2fb23af 273 iommu = pdev->dev.archdata.iommu;
1da177e4
LT
274 iopte = iommu->page_table +
275 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
276
277 spin_lock_irqsave(&iommu->lock, flags);
278
012d64ff 279 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
7c963ad1 280
1da177e4
LT
281 spin_unlock_irqrestore(&iommu->lock, flags);
282
283 order = get_order(size);
284 if (order < 10)
285 free_pages((unsigned long)cpu, order);
286}
287
288/* Map a single buffer at PTR of SZ bytes for PCI DMA
289 * in streaming mode.
290 */
8f6a93a1 291static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
1da177e4 292{
16ce82d8
DM
293 struct iommu *iommu;
294 struct strbuf *strbuf;
1da177e4
LT
295 iopte_t *base;
296 unsigned long flags, npages, oaddr;
297 unsigned long i, base_paddr, ctx;
298 u32 bus_addr, ret;
299 unsigned long iopte_protection;
300
a2fb23af
DM
301 iommu = pdev->dev.archdata.iommu;
302 strbuf = pdev->dev.archdata.stc;
1da177e4 303
688cb30b
DM
304 if (unlikely(direction == PCI_DMA_NONE))
305 goto bad_no_ctx;
1da177e4
LT
306
307 oaddr = (unsigned long)ptr;
308 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
309 npages >>= IO_PAGE_SHIFT;
310
311 spin_lock_irqsave(&iommu->lock, flags);
688cb30b
DM
312 base = alloc_npages(iommu, npages);
313 ctx = 0;
314 if (iommu->iommu_ctxflush)
315 ctx = iommu_alloc_ctx(iommu);
316 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 317
688cb30b 318 if (unlikely(!base))
1da177e4 319 goto bad;
688cb30b 320
1da177e4
LT
321 bus_addr = (iommu->page_table_map_base +
322 ((base - iommu->page_table) << IO_PAGE_SHIFT));
323 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
324 base_paddr = __pa(oaddr & IO_PAGE_MASK);
1da177e4
LT
325 if (strbuf->strbuf_enabled)
326 iopte_protection = IOPTE_STREAMING(ctx);
327 else
328 iopte_protection = IOPTE_CONSISTENT(ctx);
329 if (direction != PCI_DMA_TODEVICE)
330 iopte_protection |= IOPTE_WRITE;
331
332 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
333 iopte_val(*base) = iopte_protection | base_paddr;
334
1da177e4
LT
335 return ret;
336
337bad:
688cb30b
DM
338 iommu_free_ctx(iommu, ctx);
339bad_no_ctx:
340 if (printk_ratelimit())
341 WARN_ON(1);
1da177e4
LT
342 return PCI_DMA_ERROR_CODE;
343}
344
16ce82d8 345static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
4dbc30fb
DM
346{
347 int limit;
348
4dbc30fb
DM
349 if (strbuf->strbuf_ctxflush &&
350 iommu->iommu_ctxflush) {
351 unsigned long matchreg, flushreg;
7c963ad1 352 u64 val;
4dbc30fb
DM
353
354 flushreg = strbuf->strbuf_ctxflush;
355 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
356
a228dfd5 357 pci_iommu_write(flushreg, ctx);
88314ee7
DM
358 val = pci_iommu_read(matchreg);
359 val &= 0xffff;
360 if (!val)
7c963ad1
DM
361 goto do_flush_sync;
362
7c963ad1
DM
363 while (val) {
364 if (val & 0x1)
365 pci_iommu_write(flushreg, ctx);
366 val >>= 1;
a228dfd5 367 }
7c963ad1
DM
368 val = pci_iommu_read(matchreg);
369 if (unlikely(val)) {
4dbc30fb 370 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
7c963ad1
DM
371 "timeout matchreg[%lx] ctx[%lx]\n",
372 val, ctx);
373 goto do_page_flush;
374 }
4dbc30fb
DM
375 } else {
376 unsigned long i;
377
7c963ad1 378 do_page_flush:
4dbc30fb
DM
379 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
380 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
381 }
382
7c963ad1
DM
383do_flush_sync:
384 /* If the device could not have possibly put dirty data into
385 * the streaming cache, no flush-flag synchronization needs
386 * to be performed.
387 */
388 if (direction == PCI_DMA_TODEVICE)
389 return;
390
391 PCI_STC_FLUSHFLAG_INIT(strbuf);
4dbc30fb
DM
392 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
393 (void) pci_iommu_read(iommu->write_complete_reg);
394
a228dfd5 395 limit = 100000;
4dbc30fb
DM
396 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
397 limit--;
398 if (!limit)
399 break;
a228dfd5 400 udelay(1);
4f07118f 401 rmb();
4dbc30fb
DM
402 }
403 if (!limit)
404 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
405 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
406 vaddr, ctx, npages);
407}
408
1da177e4 409/* Unmap a single streaming mode DMA translation. */
8f6a93a1 410static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
1da177e4 411{
16ce82d8
DM
412 struct iommu *iommu;
413 struct strbuf *strbuf;
1da177e4 414 iopte_t *base;
688cb30b 415 unsigned long flags, npages, ctx, i;
1da177e4 416
688cb30b
DM
417 if (unlikely(direction == PCI_DMA_NONE)) {
418 if (printk_ratelimit())
419 WARN_ON(1);
420 return;
421 }
1da177e4 422
a2fb23af
DM
423 iommu = pdev->dev.archdata.iommu;
424 strbuf = pdev->dev.archdata.stc;
1da177e4
LT
425
426 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
427 npages >>= IO_PAGE_SHIFT;
428 base = iommu->page_table +
429 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
430#ifdef DEBUG_PCI_IOMMU
431 if (IOPTE_IS_DUMMY(iommu, base))
432 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
433 bus_addr, sz, __builtin_return_address(0));
434#endif
435 bus_addr &= IO_PAGE_MASK;
436
437 spin_lock_irqsave(&iommu->lock, flags);
438
439 /* Record the context, if any. */
440 ctx = 0;
441 if (iommu->iommu_ctxflush)
442 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
443
444 /* Step 1: Kick data out of streaming buffers if necessary. */
4dbc30fb 445 if (strbuf->strbuf_enabled)
688cb30b
DM
446 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
447 npages, direction);
1da177e4 448
688cb30b
DM
449 /* Step 2: Clear out TSB entries. */
450 for (i = 0; i < npages; i++)
451 iopte_make_dummy(iommu, base + i);
1da177e4 452
688cb30b 453 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
1da177e4 454
7c963ad1
DM
455 iommu_free_ctx(iommu, ctx);
456
1da177e4
LT
457 spin_unlock_irqrestore(&iommu->lock, flags);
458}
459
460#define SG_ENT_PHYS_ADDRESS(SG) \
461 (__pa(page_address((SG)->page)) + (SG)->offset)
462
463static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
464 int nused, int nelems, unsigned long iopte_protection)
465{
466 struct scatterlist *dma_sg = sg;
467 struct scatterlist *sg_end = sg + nelems;
468 int i;
469
470 for (i = 0; i < nused; i++) {
471 unsigned long pteval = ~0UL;
472 u32 dma_npages;
473
474 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
475 dma_sg->dma_length +
476 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
477 do {
478 unsigned long offset;
479 signed int len;
480
481 /* If we are here, we know we have at least one
482 * more page to map. So walk forward until we
483 * hit a page crossing, and begin creating new
484 * mappings from that spot.
485 */
486 for (;;) {
487 unsigned long tmp;
488
489 tmp = SG_ENT_PHYS_ADDRESS(sg);
490 len = sg->length;
491 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
492 pteval = tmp & IO_PAGE_MASK;
493 offset = tmp & (IO_PAGE_SIZE - 1UL);
494 break;
495 }
496 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
497 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
498 offset = 0UL;
499 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
500 break;
501 }
502 sg++;
503 }
504
505 pteval = iopte_protection | (pteval & IOPTE_PAGE);
506 while (len > 0) {
507 *iopte++ = __iopte(pteval);
508 pteval += IO_PAGE_SIZE;
509 len -= (IO_PAGE_SIZE - offset);
510 offset = 0;
511 dma_npages--;
512 }
513
514 pteval = (pteval & IOPTE_PAGE) + len;
515 sg++;
516
517 /* Skip over any tail mappings we've fully mapped,
518 * adjusting pteval along the way. Stop when we
519 * detect a page crossing event.
520 */
521 while (sg < sg_end &&
522 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
523 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
524 ((pteval ^
525 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
526 pteval += sg->length;
527 sg++;
528 }
529 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
530 pteval = ~0UL;
531 } while (dma_npages != 0);
532 dma_sg++;
533 }
534}
535
536/* Map a set of buffers described by SGLIST with NELEMS array
537 * elements in streaming mode for PCI DMA.
538 * When making changes here, inspect the assembly output. I was having
539 * hard time to kepp this routine out of using stack slots for holding variables.
540 */
8f6a93a1 541static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
1da177e4 542{
16ce82d8
DM
543 struct iommu *iommu;
544 struct strbuf *strbuf;
1da177e4
LT
545 unsigned long flags, ctx, npages, iopte_protection;
546 iopte_t *base;
547 u32 dma_base;
548 struct scatterlist *sgtmp;
549 int used;
550
551 /* Fast path single entry scatterlists. */
552 if (nelems == 1) {
553 sglist->dma_address =
18397944
DM
554 pci_4u_map_single(pdev,
555 (page_address(sglist->page) + sglist->offset),
556 sglist->length, direction);
688cb30b
DM
557 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
558 return 0;
1da177e4
LT
559 sglist->dma_length = sglist->length;
560 return 1;
561 }
562
a2fb23af
DM
563 iommu = pdev->dev.archdata.iommu;
564 strbuf = pdev->dev.archdata.stc;
1da177e4 565
688cb30b
DM
566 if (unlikely(direction == PCI_DMA_NONE))
567 goto bad_no_ctx;
1da177e4
LT
568
569 /* Step 1: Prepare scatter list. */
570
571 npages = prepare_sg(sglist, nelems);
572
688cb30b 573 /* Step 2: Allocate a cluster and context, if necessary. */
1da177e4
LT
574
575 spin_lock_irqsave(&iommu->lock, flags);
576
688cb30b
DM
577 base = alloc_npages(iommu, npages);
578 ctx = 0;
579 if (iommu->iommu_ctxflush)
580 ctx = iommu_alloc_ctx(iommu);
581
582 spin_unlock_irqrestore(&iommu->lock, flags);
583
1da177e4
LT
584 if (base == NULL)
585 goto bad;
688cb30b
DM
586
587 dma_base = iommu->page_table_map_base +
588 ((base - iommu->page_table) << IO_PAGE_SHIFT);
1da177e4
LT
589
590 /* Step 3: Normalize DMA addresses. */
591 used = nelems;
592
593 sgtmp = sglist;
594 while (used && sgtmp->dma_length) {
595 sgtmp->dma_address += dma_base;
596 sgtmp++;
597 used--;
598 }
599 used = nelems - used;
600
688cb30b 601 /* Step 4: Create the mappings. */
1da177e4
LT
602 if (strbuf->strbuf_enabled)
603 iopte_protection = IOPTE_STREAMING(ctx);
604 else
605 iopte_protection = IOPTE_CONSISTENT(ctx);
606 if (direction != PCI_DMA_TODEVICE)
607 iopte_protection |= IOPTE_WRITE;
688cb30b
DM
608
609 fill_sg(base, sglist, used, nelems, iopte_protection);
610
1da177e4
LT
611#ifdef VERIFY_SG
612 verify_sglist(sglist, nelems, base, npages);
613#endif
614
1da177e4
LT
615 return used;
616
617bad:
688cb30b
DM
618 iommu_free_ctx(iommu, ctx);
619bad_no_ctx:
620 if (printk_ratelimit())
621 WARN_ON(1);
622 return 0;
1da177e4
LT
623}
624
625/* Unmap a set of streaming mode DMA translations. */
8f6a93a1 626static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
1da177e4 627{
16ce82d8
DM
628 struct iommu *iommu;
629 struct strbuf *strbuf;
1da177e4
LT
630 iopte_t *base;
631 unsigned long flags, ctx, i, npages;
632 u32 bus_addr;
633
688cb30b
DM
634 if (unlikely(direction == PCI_DMA_NONE)) {
635 if (printk_ratelimit())
636 WARN_ON(1);
637 }
1da177e4 638
a2fb23af
DM
639 iommu = pdev->dev.archdata.iommu;
640 strbuf = pdev->dev.archdata.stc;
1da177e4
LT
641
642 bus_addr = sglist->dma_address & IO_PAGE_MASK;
643
644 for (i = 1; i < nelems; i++)
645 if (sglist[i].dma_length == 0)
646 break;
647 i--;
688cb30b
DM
648 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
649 bus_addr) >> IO_PAGE_SHIFT;
1da177e4
LT
650
651 base = iommu->page_table +
652 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
653
654#ifdef DEBUG_PCI_IOMMU
655 if (IOPTE_IS_DUMMY(iommu, base))
656 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
657#endif
658
659 spin_lock_irqsave(&iommu->lock, flags);
660
661 /* Record the context, if any. */
662 ctx = 0;
663 if (iommu->iommu_ctxflush)
664 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
665
666 /* Step 1: Kick data out of streaming buffers if necessary. */
4dbc30fb 667 if (strbuf->strbuf_enabled)
7c963ad1 668 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4 669
688cb30b
DM
670 /* Step 2: Clear out the TSB entries. */
671 for (i = 0; i < npages; i++)
672 iopte_make_dummy(iommu, base + i);
1da177e4 673
688cb30b 674 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
1da177e4 675
7c963ad1
DM
676 iommu_free_ctx(iommu, ctx);
677
1da177e4
LT
678 spin_unlock_irqrestore(&iommu->lock, flags);
679}
680
681/* Make physical memory consistent for a single
682 * streaming mode DMA translation after a transfer.
683 */
8f6a93a1 684static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
1da177e4 685{
16ce82d8
DM
686 struct iommu *iommu;
687 struct strbuf *strbuf;
1da177e4
LT
688 unsigned long flags, ctx, npages;
689
a2fb23af
DM
690 iommu = pdev->dev.archdata.iommu;
691 strbuf = pdev->dev.archdata.stc;
1da177e4
LT
692
693 if (!strbuf->strbuf_enabled)
694 return;
695
696 spin_lock_irqsave(&iommu->lock, flags);
697
698 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
699 npages >>= IO_PAGE_SHIFT;
700 bus_addr &= IO_PAGE_MASK;
701
702 /* Step 1: Record the context, if any. */
703 ctx = 0;
704 if (iommu->iommu_ctxflush &&
705 strbuf->strbuf_ctxflush) {
706 iopte_t *iopte;
707
708 iopte = iommu->page_table +
709 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
710 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
711 }
712
713 /* Step 2: Kick data out of streaming buffers. */
7c963ad1 714 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
715
716 spin_unlock_irqrestore(&iommu->lock, flags);
717}
718
719/* Make physical memory consistent for a set of streaming
720 * mode DMA translations after a transfer.
721 */
8f6a93a1 722static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
1da177e4 723{
16ce82d8
DM
724 struct iommu *iommu;
725 struct strbuf *strbuf;
4dbc30fb
DM
726 unsigned long flags, ctx, npages, i;
727 u32 bus_addr;
1da177e4 728
a2fb23af
DM
729 iommu = pdev->dev.archdata.iommu;
730 strbuf = pdev->dev.archdata.stc;
1da177e4
LT
731
732 if (!strbuf->strbuf_enabled)
733 return;
734
735 spin_lock_irqsave(&iommu->lock, flags);
736
737 /* Step 1: Record the context, if any. */
738 ctx = 0;
739 if (iommu->iommu_ctxflush &&
740 strbuf->strbuf_ctxflush) {
741 iopte_t *iopte;
742
743 iopte = iommu->page_table +
744 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
745 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
746 }
747
748 /* Step 2: Kick data out of streaming buffers. */
4dbc30fb
DM
749 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
750 for(i = 1; i < nelems; i++)
751 if (!sglist[i].dma_length)
752 break;
753 i--;
754 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
755 - bus_addr) >> IO_PAGE_SHIFT;
7c963ad1 756 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
757
758 spin_unlock_irqrestore(&iommu->lock, flags);
759}
760
c6e87566 761const struct pci_iommu_ops pci_sun4u_iommu_ops = {
8f6a93a1
DM
762 .alloc_consistent = pci_4u_alloc_consistent,
763 .free_consistent = pci_4u_free_consistent,
764 .map_single = pci_4u_map_single,
765 .unmap_single = pci_4u_unmap_single,
766 .map_sg = pci_4u_map_sg,
767 .unmap_sg = pci_4u_unmap_sg,
768 .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu,
769 .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu,
770};
771
1da177e4
LT
772static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
773{
774 struct pci_dev *ali_isa_bridge;
775 u8 val;
776
777 /* ALI sound chips generate 31-bits of DMA, a special register
778 * determines what bit 31 is emitted as.
779 */
780 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
781 PCI_DEVICE_ID_AL_M1533,
782 NULL);
783
784 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
785 if (set_bit)
786 val |= 0x01;
787 else
788 val &= ~0x01;
789 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
790 pci_dev_put(ali_isa_bridge);
791}
792
793int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
794{
1da177e4
LT
795 u64 dma_addr_mask;
796
797 if (pdev == NULL) {
798 dma_addr_mask = 0xffffffff;
799 } else {
16ce82d8 800 struct iommu *iommu = pdev->dev.archdata.iommu;
1da177e4
LT
801
802 dma_addr_mask = iommu->dma_addr_mask;
803
804 if (pdev->vendor == PCI_VENDOR_ID_AL &&
805 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
806 device_mask == 0x7fffffff) {
807 ali_sound_dma_hack(pdev,
808 (dma_addr_mask & 0x80000000) != 0);
809 return 1;
810 }
811 }
812
813 if (device_mask >= (1UL << 32UL))
814 return 0;
815
816 return (device_mask & dma_addr_mask) == dma_addr_mask;
817}