]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/s390/pci/pci_dma.c
s390/pci_dma: improve map_sg
[mirror_ubuntu-zesty-kernel.git] / arch / s390 / pci / pci_dma.c
CommitLineData
828b35f6
JG
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h>
22459321 13#include <linux/vmalloc.h>
828b35f6
JG
14#include <linux/pci.h>
15#include <asm/pci_dma.h>
16
828b35f6
JG
17static struct kmem_cache *dma_region_table_cache;
18static struct kmem_cache *dma_page_table_cache;
c60d1ae4
GS
19static int s390_iommu_strict;
20
21static int zpci_refresh_global(struct zpci_dev *zdev)
22{
23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
24 zdev->iommu_pages * PAGE_SIZE);
25}
828b35f6 26
8128f23c 27unsigned long *dma_alloc_cpu_table(void)
828b35f6
JG
28{
29 unsigned long *table, *entry;
30
31 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
32 if (!table)
33 return NULL;
34
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
4d5a6b72 36 *entry = ZPCI_TABLE_INVALID;
828b35f6
JG
37 return table;
38}
39
40static void dma_free_cpu_table(void *table)
41{
42 kmem_cache_free(dma_region_table_cache, table);
43}
44
45static unsigned long *dma_alloc_page_table(void)
46{
47 unsigned long *table, *entry;
48
49 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
50 if (!table)
51 return NULL;
52
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
4d5a6b72 54 *entry = ZPCI_PTE_INVALID;
828b35f6
JG
55 return table;
56}
57
58static void dma_free_page_table(void *table)
59{
60 kmem_cache_free(dma_page_table_cache, table);
61}
62
63static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
64{
65 unsigned long *sto;
66
67 if (reg_entry_isvalid(*entry))
68 sto = get_rt_sto(*entry);
69 else {
70 sto = dma_alloc_cpu_table();
71 if (!sto)
72 return NULL;
73
74 set_rt_sto(entry, sto);
75 validate_rt_entry(entry);
76 entry_clr_protected(entry);
77 }
78 return sto;
79}
80
81static unsigned long *dma_get_page_table_origin(unsigned long *entry)
82{
83 unsigned long *pto;
84
85 if (reg_entry_isvalid(*entry))
86 pto = get_st_pto(*entry);
87 else {
88 pto = dma_alloc_page_table();
89 if (!pto)
90 return NULL;
91 set_st_pto(entry, pto);
92 validate_st_entry(entry);
93 entry_clr_protected(entry);
94 }
95 return pto;
96}
97
66728eee 98unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
828b35f6
JG
99{
100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px;
102
103 rtx = calc_rtx(dma_addr);
104 sto = dma_get_seg_table_origin(&rto[rtx]);
105 if (!sto)
106 return NULL;
107
108 sx = calc_sx(dma_addr);
109 pto = dma_get_page_table_origin(&sto[sx]);
110 if (!pto)
111 return NULL;
112
113 px = calc_px(dma_addr);
114 return &pto[px];
115}
116
66728eee 117void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
828b35f6 118{
828b35f6
JG
119 if (flags & ZPCI_PTE_INVALID) {
120 invalidate_pt_entry(entry);
828b35f6
JG
121 } else {
122 set_pt_pfaa(entry, page_addr);
123 validate_pt_entry(entry);
124 }
125
126 if (flags & ZPCI_TABLE_PROTECTED)
127 entry_set_protected(entry);
128 else
129 entry_clr_protected(entry);
130}
131
132static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
133 dma_addr_t dma_addr, size_t size, int flags)
134{
135 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
136 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
137 dma_addr_t start_dma_addr = dma_addr;
138 unsigned long irq_flags;
66728eee 139 unsigned long *entry;
828b35f6
JG
140 int i, rc = 0;
141
142 if (!nr_pages)
143 return -EINVAL;
144
145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
66728eee
SO
146 if (!zdev->dma_table) {
147 rc = -EINVAL;
828b35f6 148 goto no_refresh;
66728eee 149 }
828b35f6
JG
150
151 for (i = 0; i < nr_pages; i++) {
66728eee
SO
152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
153 if (!entry) {
154 rc = -ENOMEM;
155 goto undo_cpu_trans;
156 }
157 dma_update_cpu_trans(entry, page_addr, flags);
828b35f6
JG
158 page_addr += PAGE_SIZE;
159 dma_addr += PAGE_SIZE;
160 }
161
162 /*
c60d1ae4
GS
163 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
164 * translations when previously invalid translation-table entries are
165 * validated. With lazy unmap, it also is skipped for previously valid
166 * entries, but a global rpcit is then required before any address can
167 * be re-used, i.e. after each iommu bitmap wrap-around.
828b35f6
JG
168 */
169 if (!zdev->tlb_refresh &&
c60d1ae4
GS
170 (!s390_iommu_strict ||
171 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
828b35f6 172 goto no_refresh;
b2a9e87d 173
9389339f
MS
174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
175 nr_pages * PAGE_SIZE);
66728eee
SO
176undo_cpu_trans:
177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
178 flags = ZPCI_PTE_INVALID;
179 while (i-- > 0) {
180 page_addr -= PAGE_SIZE;
181 dma_addr -= PAGE_SIZE;
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
183 if (!entry)
184 break;
185 dma_update_cpu_trans(entry, page_addr, flags);
186 }
187 }
828b35f6
JG
188
189no_refresh:
190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
191 return rc;
192}
193
8128f23c 194void dma_free_seg_table(unsigned long entry)
828b35f6
JG
195{
196 unsigned long *sto = get_rt_sto(entry);
197 int sx;
198
199 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
200 if (reg_entry_isvalid(sto[sx]))
201 dma_free_page_table(get_st_pto(sto[sx]));
202
203 dma_free_cpu_table(sto);
204}
205
8128f23c 206void dma_cleanup_tables(unsigned long *table)
828b35f6 207{
828b35f6
JG
208 int rtx;
209
8128f23c 210 if (!table)
828b35f6
JG
211 return;
212
213 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
214 if (reg_entry_isvalid(table[rtx]))
215 dma_free_seg_table(table[rtx]);
216
217 dma_free_cpu_table(table);
828b35f6
JG
218}
219
9a99649f 220static unsigned long __dma_alloc_iommu(struct device *dev,
5ec6d491 221 unsigned long start, int size)
828b35f6 222{
9a99649f 223 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
5ec6d491 224 unsigned long boundary_size;
828b35f6 225
9a99649f 226 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
5ec6d491 227 PAGE_SIZE) >> PAGE_SHIFT;
828b35f6 228 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
8ee2db3c
SO
229 start, size, zdev->start_dma >> PAGE_SHIFT,
230 boundary_size, 0);
828b35f6
JG
231}
232
8cb63b78 233static dma_addr_t dma_alloc_address(struct device *dev, int size)
828b35f6 234{
9a99649f 235 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
828b35f6
JG
236 unsigned long offset, flags;
237
238 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
9a99649f 239 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
c60d1ae4
GS
240 if (offset == -1) {
241 /* wrap-around */
9a99649f 242 offset = __dma_alloc_iommu(dev, 0, size);
8cb63b78
SO
243 if (offset == -1) {
244 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
245 return DMA_ERROR_CODE;
246 }
247 if (!zdev->tlb_refresh && !s390_iommu_strict)
c60d1ae4
GS
248 /* global flush after wrap-around with lazy unmap */
249 zpci_refresh_global(zdev);
828b35f6 250 }
8cb63b78 251 zdev->next_bit = offset + size;
828b35f6 252 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
8cb63b78
SO
253
254 return zdev->start_dma + offset * PAGE_SIZE;
828b35f6
JG
255}
256
8cb63b78 257static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
828b35f6 258{
9a99649f 259 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
8cb63b78
SO
260 unsigned long flags, offset;
261
262 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
828b35f6
JG
263
264 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
265 if (!zdev->iommu_bitmap)
266 goto out;
267 bitmap_clear(zdev->iommu_bitmap, offset, size);
c60d1ae4
GS
268 /*
269 * Lazy flush for unmap: need to move next_bit to avoid address re-use
270 * until wrap-around.
271 */
272 if (!s390_iommu_strict && offset >= zdev->next_bit)
828b35f6
JG
273 zdev->next_bit = offset + size;
274out:
275 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
276}
277
52d43d81
SO
278static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
279{
280 struct {
281 unsigned long rc;
282 unsigned long addr;
283 } __packed data = {rc, addr};
284
285 zpci_err_hex(&data, sizeof(data));
286}
287
828b35f6
JG
288static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
289 unsigned long offset, size_t size,
290 enum dma_data_direction direction,
00085f1e 291 unsigned long attrs)
828b35f6 292{
198a5278 293 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
828b35f6
JG
294 unsigned long pa = page_to_phys(page) + offset;
295 int flags = ZPCI_PTE_VALID;
8cb63b78 296 unsigned long nr_pages;
828b35f6 297 dma_addr_t dma_addr;
52d43d81 298 int ret;
828b35f6 299
828b35f6
JG
300 /* This rounds up number of pages based on size and offset */
301 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
8cb63b78
SO
302 dma_addr = dma_alloc_address(dev, nr_pages);
303 if (dma_addr == DMA_ERROR_CODE) {
52d43d81 304 ret = -ENOSPC;
828b35f6 305 goto out_err;
52d43d81 306 }
828b35f6
JG
307
308 /* Use rounded up size */
309 size = nr_pages * PAGE_SIZE;
828b35f6
JG
310
311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
312 flags |= ZPCI_TABLE_PROTECTED;
313
52d43d81
SO
314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
315 if (ret)
316 goto out_free;
317
318 atomic64_add(nr_pages, &zdev->mapped_pages);
319 return dma_addr + (offset & ~PAGE_MASK);
828b35f6
JG
320
321out_free:
8cb63b78 322 dma_free_address(dev, dma_addr, nr_pages);
828b35f6 323out_err:
1f1dcbd4 324 zpci_err("map error:\n");
52d43d81 325 zpci_err_dma(ret, pa);
828b35f6
JG
326 return DMA_ERROR_CODE;
327}
328
329static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
330 size_t size, enum dma_data_direction direction,
00085f1e 331 unsigned long attrs)
828b35f6 332{
198a5278 333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
52d43d81 334 int npages, ret;
828b35f6
JG
335
336 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
337 dma_addr = dma_addr & PAGE_MASK;
52d43d81
SO
338 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
339 ZPCI_PTE_INVALID);
340 if (ret) {
1f1dcbd4 341 zpci_err("unmap error:\n");
52d43d81
SO
342 zpci_err_dma(ret, dma_addr);
343 return;
1f1dcbd4 344 }
828b35f6 345
6001018a 346 atomic64_add(npages, &zdev->unmapped_pages);
8cb63b78 347 dma_free_address(dev, dma_addr, npages);
828b35f6
JG
348}
349
350static void *s390_dma_alloc(struct device *dev, size_t size,
351 dma_addr_t *dma_handle, gfp_t flag,
00085f1e 352 unsigned long attrs)
828b35f6 353{
198a5278 354 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
828b35f6
JG
355 struct page *page;
356 unsigned long pa;
357 dma_addr_t map;
358
359 size = PAGE_ALIGN(size);
360 page = alloc_pages(flag, get_order(size));
361 if (!page)
362 return NULL;
d0b08853 363
828b35f6
JG
364 pa = page_to_phys(page);
365 memset((void *) pa, 0, size);
366
00085f1e 367 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
828b35f6
JG
368 if (dma_mapping_error(dev, map)) {
369 free_pages(pa, get_order(size));
370 return NULL;
371 }
372
6001018a 373 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
828b35f6
JG
374 if (dma_handle)
375 *dma_handle = map;
376 return (void *) pa;
377}
378
379static void s390_dma_free(struct device *dev, size_t size,
380 void *pa, dma_addr_t dma_handle,
00085f1e 381 unsigned long attrs)
828b35f6 382{
198a5278 383 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
f7038b7c
SO
384
385 size = PAGE_ALIGN(size);
6001018a 386 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
00085f1e 387 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
828b35f6
JG
388 free_pages((unsigned long) pa, get_order(size));
389}
390
ee877b81
SO
391/* Map a segment into a contiguous dma address area */
392static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
393 size_t size, dma_addr_t *handle,
394 enum dma_data_direction dir)
828b35f6 395{
ee877b81
SO
396 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
397 dma_addr_t dma_addr_base, dma_addr;
398 int flags = ZPCI_PTE_VALID;
828b35f6 399 struct scatterlist *s;
ee877b81
SO
400 unsigned long pa;
401 int ret;
828b35f6 402
ee877b81
SO
403 size = PAGE_ALIGN(size);
404 dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
405 if (dma_addr_base == DMA_ERROR_CODE)
406 return -ENOMEM;
407
408 dma_addr = dma_addr_base;
409 if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
410 flags |= ZPCI_TABLE_PROTECTED;
411
412 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
413 pa = page_to_phys(sg_page(s)) + s->offset;
414 ret = dma_update_trans(zdev, pa, dma_addr, s->length, flags);
415 if (ret)
828b35f6 416 goto unmap;
ee877b81
SO
417
418 dma_addr += s->length;
828b35f6 419 }
ee877b81
SO
420 *handle = dma_addr_base;
421 atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
422
423 return ret;
828b35f6
JG
424
425unmap:
ee877b81
SO
426 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
427 ZPCI_PTE_INVALID);
428 dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
429 zpci_err("map error:\n");
430 zpci_err_dma(ret, pa);
431 return ret;
432}
433
434static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
435 int nr_elements, enum dma_data_direction dir,
436 unsigned long attrs)
437{
438 struct scatterlist *s = sg, *start = sg, *dma = sg;
439 unsigned int max = dma_get_max_seg_size(dev);
440 unsigned int size = s->offset + s->length;
441 unsigned int offset = s->offset;
442 int count = 0, i;
443
444 for (i = 1; i < nr_elements; i++) {
445 s = sg_next(s);
446
447 s->dma_address = DMA_ERROR_CODE;
828b35f6 448 s->dma_length = 0;
ee877b81
SO
449
450 if (s->offset || (size & ~PAGE_MASK) ||
451 size + s->length > max) {
452 if (__s390_dma_map_sg(dev, start, size,
453 &dma->dma_address, dir))
454 goto unmap;
455
456 dma->dma_address += offset;
457 dma->dma_length = size - offset;
458
459 size = offset = s->offset;
460 start = s;
461 dma = sg_next(dma);
462 count++;
463 }
464 size += s->length;
828b35f6 465 }
ee877b81
SO
466 if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
467 goto unmap;
468
469 dma->dma_address += offset;
470 dma->dma_length = size - offset;
471
472 return count + 1;
473unmap:
474 for_each_sg(sg, s, count, i)
475 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
476 dir, attrs);
477
478 return 0;
828b35f6
JG
479}
480
481static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
482 int nr_elements, enum dma_data_direction dir,
00085f1e 483 unsigned long attrs)
828b35f6
JG
484{
485 struct scatterlist *s;
486 int i;
487
488 for_each_sg(sg, s, nr_elements, i) {
ee877b81
SO
489 if (s->dma_length)
490 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
491 dir, attrs);
828b35f6
JG
492 s->dma_address = 0;
493 s->dma_length = 0;
494 }
495}
496
497int zpci_dma_init_device(struct zpci_dev *zdev)
498{
828b35f6
JG
499 int rc;
500
8128f23c
GS
501 /*
502 * At this point, if the device is part of an IOMMU domain, this would
503 * be a strong hint towards a bug in the IOMMU API (common) code and/or
504 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
505 */
506 WARN_ON(zdev->s390_domain);
507
828b35f6
JG
508 spin_lock_init(&zdev->iommu_bitmap_lock);
509 spin_lock_init(&zdev->dma_table_lock);
510
511 zdev->dma_table = dma_alloc_cpu_table();
512 if (!zdev->dma_table) {
513 rc = -ENOMEM;
dba59909 514 goto out;
828b35f6
JG
515 }
516
69eea95c
GS
517 /*
518 * Restrict the iommu bitmap size to the minimum of the following:
519 * - main memory size
520 * - 3-level pagetable address limit minus start_dma offset
521 * - DMA address range allowed by the hardware (clp query pci fn)
522 *
523 * Also set zdev->end_dma to the actual end address of the usable
524 * range, instead of the theoretical maximum as reported by hardware.
525 */
53b1bc9a 526 zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
69eea95c
GS
527 zdev->iommu_size = min3((u64) high_memory,
528 ZPCI_TABLE_SIZE_RT - zdev->start_dma,
529 zdev->end_dma - zdev->start_dma + 1);
530 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
828b35f6 531 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
22459321 532 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
828b35f6
JG
533 if (!zdev->iommu_bitmap) {
534 rc = -ENOMEM;
dba59909 535 goto free_dma_table;
828b35f6
JG
536 }
537
69eea95c 538 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
828b35f6
JG
539 (u64) zdev->dma_table);
540 if (rc)
dba59909 541 goto free_bitmap;
828b35f6 542
dba59909
SO
543 return 0;
544free_bitmap:
545 vfree(zdev->iommu_bitmap);
546 zdev->iommu_bitmap = NULL;
547free_dma_table:
828b35f6 548 dma_free_cpu_table(zdev->dma_table);
dba59909
SO
549 zdev->dma_table = NULL;
550out:
828b35f6
JG
551 return rc;
552}
553
554void zpci_dma_exit_device(struct zpci_dev *zdev)
555{
8128f23c
GS
556 /*
557 * At this point, if the device is part of an IOMMU domain, this would
558 * be a strong hint towards a bug in the IOMMU API (common) code and/or
559 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
560 */
561 WARN_ON(zdev->s390_domain);
562
828b35f6 563 zpci_unregister_ioat(zdev, 0);
8128f23c
GS
564 dma_cleanup_tables(zdev->dma_table);
565 zdev->dma_table = NULL;
22459321 566 vfree(zdev->iommu_bitmap);
828b35f6
JG
567 zdev->iommu_bitmap = NULL;
568 zdev->next_bit = 0;
569}
570
571static int __init dma_alloc_cpu_table_caches(void)
572{
573 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
574 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
575 0, NULL);
576 if (!dma_region_table_cache)
577 return -ENOMEM;
578
579 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
580 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
581 0, NULL);
582 if (!dma_page_table_cache) {
583 kmem_cache_destroy(dma_region_table_cache);
584 return -ENOMEM;
585 }
586 return 0;
587}
588
589int __init zpci_dma_init(void)
590{
591 return dma_alloc_cpu_table_caches();
592}
593
594void zpci_dma_exit(void)
595{
596 kmem_cache_destroy(dma_page_table_cache);
597 kmem_cache_destroy(dma_region_table_cache);
598}
599
600#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
601
602static int __init dma_debug_do_init(void)
603{
604 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
605 return 0;
606}
607fs_initcall(dma_debug_do_init);
608
e82becfc 609struct dma_map_ops s390_pci_dma_ops = {
828b35f6
JG
610 .alloc = s390_dma_alloc,
611 .free = s390_dma_free,
612 .map_sg = s390_dma_map_sg,
613 .unmap_sg = s390_dma_unmap_sg,
614 .map_page = s390_dma_map_pages,
615 .unmap_page = s390_dma_unmap_pages,
616 /* if we support direct DMA this must be conditional */
617 .is_phys = 0,
618 /* dma_supported is unconditionally true without a callback */
619};
e82becfc 620EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
c60d1ae4
GS
621
622static int __init s390_iommu_setup(char *str)
623{
624 if (!strncmp(str, "strict", 6))
625 s390_iommu_strict = 1;
626 return 0;
627}
628
629__setup("s390_iommu=", s390_iommu_setup);