]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
4 | * | |
5 | * Rewrite, cleanup, new allocation schemes, virtual merging: | |
6 | * Copyright (C) 2004 Olof Johansson, IBM Corporation | |
7 | * and Ben. Herrenschmidt, IBM Corporation | |
8 | * | |
9 | * Dynamic DMA mapping support, bus-independent parts. | |
1da177e4 LT |
10 | */ |
11 | ||
12 | ||
1da177e4 LT |
13 | #include <linux/init.h> |
14 | #include <linux/types.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/dma-mapping.h> | |
a66022c4 | 20 | #include <linux/bitmap.h> |
fb3475e9 | 21 | #include <linux/iommu-helper.h> |
62a8bd6c | 22 | #include <linux/crash_dump.h> |
b4c3a872 | 23 | #include <linux/hash.h> |
d6b9a81b AB |
24 | #include <linux/fault-inject.h> |
25 | #include <linux/pci.h> | |
4e13c1ac AK |
26 | #include <linux/iommu.h> |
27 | #include <linux/sched.h> | |
1da177e4 LT |
28 | #include <asm/io.h> |
29 | #include <asm/prom.h> | |
30 | #include <asm/iommu.h> | |
31 | #include <asm/pci-bridge.h> | |
32 | #include <asm/machdep.h> | |
5f50867b | 33 | #include <asm/kdump.h> |
3ccc00a7 | 34 | #include <asm/fadump.h> |
d6b9a81b | 35 | #include <asm/vio.h> |
4e13c1ac | 36 | #include <asm/tce.h> |
c10c21ef | 37 | #include <asm/mmu_context.h> |
1da177e4 LT |
38 | |
39 | #define DBG(...) | |
40 | ||
191aee58 | 41 | static int novmerge; |
56997559 | 42 | |
6490c490 RJ |
43 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); |
44 | ||
1da177e4 LT |
45 | static int __init setup_iommu(char *str) |
46 | { | |
47 | if (!strcmp(str, "novmerge")) | |
48 | novmerge = 1; | |
49 | else if (!strcmp(str, "vmerge")) | |
50 | novmerge = 0; | |
51 | return 1; | |
52 | } | |
53 | ||
54 | __setup("iommu=", setup_iommu); | |
55 | ||
b4c3a872 AB |
56 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); |
57 | ||
58 | /* | |
59 | * We precalculate the hash to avoid doing it on every allocation. | |
60 | * | |
61 | * The hash is important to spread CPUs across all the pools. For example, | |
62 | * on a POWER7 with 4 way SMT we want interrupts on the primary threads and | |
63 | * with 4 pools all primary threads would map to the same pool. | |
64 | */ | |
65 | static int __init setup_iommu_pool_hash(void) | |
66 | { | |
67 | unsigned int i; | |
68 | ||
69 | for_each_possible_cpu(i) | |
70 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | |
71 | ||
72 | return 0; | |
73 | } | |
74 | subsys_initcall(setup_iommu_pool_hash); | |
75 | ||
d6b9a81b AB |
76 | #ifdef CONFIG_FAIL_IOMMU |
77 | ||
78 | static DECLARE_FAULT_ATTR(fail_iommu); | |
79 | ||
80 | static int __init setup_fail_iommu(char *str) | |
81 | { | |
82 | return setup_fault_attr(&fail_iommu, str); | |
83 | } | |
84 | __setup("fail_iommu=", setup_fail_iommu); | |
85 | ||
86 | static bool should_fail_iommu(struct device *dev) | |
87 | { | |
88 | return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1); | |
89 | } | |
90 | ||
91 | static int __init fail_iommu_debugfs(void) | |
92 | { | |
93 | struct dentry *dir = fault_create_debugfs_attr("fail_iommu", | |
94 | NULL, &fail_iommu); | |
95 | ||
8c6ffba0 | 96 | return PTR_ERR_OR_ZERO(dir); |
d6b9a81b AB |
97 | } |
98 | late_initcall(fail_iommu_debugfs); | |
99 | ||
100 | static ssize_t fail_iommu_show(struct device *dev, | |
101 | struct device_attribute *attr, char *buf) | |
102 | { | |
103 | return sprintf(buf, "%d\n", dev->archdata.fail_iommu); | |
104 | } | |
105 | ||
106 | static ssize_t fail_iommu_store(struct device *dev, | |
107 | struct device_attribute *attr, const char *buf, | |
108 | size_t count) | |
109 | { | |
110 | int i; | |
111 | ||
112 | if (count > 0 && sscanf(buf, "%d", &i) > 0) | |
113 | dev->archdata.fail_iommu = (i == 0) ? 0 : 1; | |
114 | ||
115 | return count; | |
116 | } | |
117 | ||
8a7aef2c | 118 | static DEVICE_ATTR_RW(fail_iommu); |
d6b9a81b AB |
119 | |
120 | static int fail_iommu_bus_notify(struct notifier_block *nb, | |
121 | unsigned long action, void *data) | |
122 | { | |
123 | struct device *dev = data; | |
124 | ||
125 | if (action == BUS_NOTIFY_ADD_DEVICE) { | |
126 | if (device_create_file(dev, &dev_attr_fail_iommu)) | |
127 | pr_warn("Unable to create IOMMU fault injection sysfs " | |
128 | "entries\n"); | |
129 | } else if (action == BUS_NOTIFY_DEL_DEVICE) { | |
130 | device_remove_file(dev, &dev_attr_fail_iommu); | |
131 | } | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static struct notifier_block fail_iommu_bus_notifier = { | |
137 | .notifier_call = fail_iommu_bus_notify | |
138 | }; | |
139 | ||
140 | static int __init fail_iommu_setup(void) | |
141 | { | |
142 | #ifdef CONFIG_PCI | |
143 | bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier); | |
144 | #endif | |
145 | #ifdef CONFIG_IBMVIO | |
146 | bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier); | |
147 | #endif | |
148 | ||
149 | return 0; | |
150 | } | |
151 | /* | |
152 | * Must execute after PCI and VIO subsystem have initialised but before | |
153 | * devices are probed. | |
154 | */ | |
155 | arch_initcall(fail_iommu_setup); | |
156 | #else | |
157 | static inline bool should_fail_iommu(struct device *dev) | |
158 | { | |
159 | return false; | |
160 | } | |
161 | #endif | |
162 | ||
fb3475e9 FT |
163 | static unsigned long iommu_range_alloc(struct device *dev, |
164 | struct iommu_table *tbl, | |
1da177e4 LT |
165 | unsigned long npages, |
166 | unsigned long *handle, | |
7daa411b | 167 | unsigned long mask, |
1da177e4 LT |
168 | unsigned int align_order) |
169 | { | |
fb3475e9 | 170 | unsigned long n, end, start; |
1da177e4 LT |
171 | unsigned long limit; |
172 | int largealloc = npages > 15; | |
173 | int pass = 0; | |
174 | unsigned long align_mask; | |
fb3475e9 | 175 | unsigned long boundary_size; |
d3622137 | 176 | unsigned long flags; |
b4c3a872 AB |
177 | unsigned int pool_nr; |
178 | struct iommu_pool *pool; | |
1da177e4 | 179 | |
63b85621 | 180 | align_mask = (1ull << align_order) - 1; |
1da177e4 LT |
181 | |
182 | /* This allocator was derived from x86_64's bit string search */ | |
183 | ||
184 | /* Sanity check */ | |
13a2eea1 | 185 | if (unlikely(npages == 0)) { |
1da177e4 LT |
186 | if (printk_ratelimit()) |
187 | WARN_ON(1); | |
d11e3d3d | 188 | return DMA_MAPPING_ERROR; |
1da177e4 LT |
189 | } |
190 | ||
d6b9a81b | 191 | if (should_fail_iommu(dev)) |
d11e3d3d | 192 | return DMA_MAPPING_ERROR; |
d6b9a81b | 193 | |
b4c3a872 AB |
194 | /* |
195 | * We don't need to disable preemption here because any CPU can | |
196 | * safely use any IOMMU pool. | |
197 | */ | |
75f327c6 | 198 | pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); |
d3622137 | 199 | |
b4c3a872 AB |
200 | if (largealloc) |
201 | pool = &(tbl->large_pool); | |
1da177e4 | 202 | else |
b4c3a872 | 203 | pool = &(tbl->pools[pool_nr]); |
1da177e4 | 204 | |
b4c3a872 AB |
205 | spin_lock_irqsave(&(pool->lock), flags); |
206 | ||
207 | again: | |
d900bd73 AB |
208 | if ((pass == 0) && handle && *handle && |
209 | (*handle >= pool->start) && (*handle < pool->end)) | |
b4c3a872 AB |
210 | start = *handle; |
211 | else | |
212 | start = pool->hint; | |
1da177e4 | 213 | |
b4c3a872 | 214 | limit = pool->end; |
1da177e4 LT |
215 | |
216 | /* The case below can happen if we have a small segment appended | |
217 | * to a large, or when the previous alloc was at the very end of | |
218 | * the available space. If so, go back to the initial start. | |
219 | */ | |
220 | if (start >= limit) | |
b4c3a872 | 221 | start = pool->start; |
1da177e4 | 222 | |
7daa411b OJ |
223 | if (limit + tbl->it_offset > mask) { |
224 | limit = mask - tbl->it_offset + 1; | |
225 | /* If we're constrained on address range, first try | |
226 | * at the masked hint to avoid O(n) search complexity, | |
b4c3a872 | 227 | * but on second pass, start at 0 in pool 0. |
7daa411b | 228 | */ |
b4c3a872 | 229 | if ((start & mask) >= limit || pass > 0) { |
d900bd73 | 230 | spin_unlock(&(pool->lock)); |
b4c3a872 | 231 | pool = &(tbl->pools[0]); |
d900bd73 | 232 | spin_lock(&(pool->lock)); |
b4c3a872 AB |
233 | start = pool->start; |
234 | } else { | |
7daa411b | 235 | start &= mask; |
b4c3a872 | 236 | } |
7daa411b OJ |
237 | } |
238 | ||
fb3475e9 FT |
239 | if (dev) |
240 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
d0847757 | 241 | 1 << tbl->it_page_shift); |
fb3475e9 | 242 | else |
d0847757 | 243 | boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); |
fb3475e9 | 244 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ |
1da177e4 | 245 | |
d0847757 AP |
246 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, |
247 | boundary_size >> tbl->it_page_shift, align_mask); | |
fb3475e9 | 248 | if (n == -1) { |
b4c3a872 AB |
249 | if (likely(pass == 0)) { |
250 | /* First try the pool from the start */ | |
251 | pool->hint = pool->start; | |
1da177e4 LT |
252 | pass++; |
253 | goto again; | |
b4c3a872 AB |
254 | |
255 | } else if (pass <= tbl->nr_pools) { | |
256 | /* Now try scanning all the other pools */ | |
257 | spin_unlock(&(pool->lock)); | |
258 | pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); | |
259 | pool = &tbl->pools[pool_nr]; | |
260 | spin_lock(&(pool->lock)); | |
261 | pool->hint = pool->start; | |
262 | pass++; | |
263 | goto again; | |
264 | ||
1da177e4 | 265 | } else { |
b4c3a872 AB |
266 | /* Give up */ |
267 | spin_unlock_irqrestore(&(pool->lock), flags); | |
d11e3d3d | 268 | return DMA_MAPPING_ERROR; |
1da177e4 LT |
269 | } |
270 | } | |
271 | ||
fb3475e9 | 272 | end = n + npages; |
1da177e4 LT |
273 | |
274 | /* Bump the hint to a new block for small allocs. */ | |
275 | if (largealloc) { | |
276 | /* Don't bump to new block to avoid fragmentation */ | |
b4c3a872 | 277 | pool->hint = end; |
1da177e4 LT |
278 | } else { |
279 | /* Overflow will be taken care of at the next allocation */ | |
b4c3a872 | 280 | pool->hint = (end + tbl->it_blocksize - 1) & |
1da177e4 LT |
281 | ~(tbl->it_blocksize - 1); |
282 | } | |
283 | ||
284 | /* Update handle for SG allocations */ | |
285 | if (handle) | |
286 | *handle = end; | |
287 | ||
b4c3a872 AB |
288 | spin_unlock_irqrestore(&(pool->lock), flags); |
289 | ||
1da177e4 LT |
290 | return n; |
291 | } | |
292 | ||
fb3475e9 FT |
293 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
294 | void *page, unsigned int npages, | |
295 | enum dma_data_direction direction, | |
4f3dd8a0 | 296 | unsigned long mask, unsigned int align_order, |
00085f1e | 297 | unsigned long attrs) |
1da177e4 | 298 | { |
d3622137 | 299 | unsigned long entry; |
d11e3d3d | 300 | dma_addr_t ret = DMA_MAPPING_ERROR; |
6490c490 | 301 | int build_fail; |
7daa411b | 302 | |
fb3475e9 | 303 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
1da177e4 | 304 | |
d11e3d3d CH |
305 | if (unlikely(entry == DMA_MAPPING_ERROR)) |
306 | return DMA_MAPPING_ERROR; | |
1da177e4 LT |
307 | |
308 | entry += tbl->it_offset; /* Offset into real TCE table */ | |
d0847757 | 309 | ret = entry << tbl->it_page_shift; /* Set the return dma address */ |
1da177e4 LT |
310 | |
311 | /* Put the TCEs in the HW table */ | |
da004c36 | 312 | build_fail = tbl->it_ops->set(tbl, entry, npages, |
d0847757 AP |
313 | (unsigned long)page & |
314 | IOMMU_PAGE_MASK(tbl), direction, attrs); | |
6490c490 | 315 | |
da004c36 | 316 | /* tbl->it_ops->set() only returns non-zero for transient errors. |
6490c490 | 317 | * Clean up the table bitmap in this case and return |
d11e3d3d | 318 | * DMA_MAPPING_ERROR. For all other errors the functionality is |
6490c490 RJ |
319 | * not altered. |
320 | */ | |
321 | if (unlikely(build_fail)) { | |
322 | __iommu_free(tbl, ret, npages); | |
d11e3d3d | 323 | return DMA_MAPPING_ERROR; |
6490c490 | 324 | } |
1da177e4 LT |
325 | |
326 | /* Flush/invalidate TLB caches if necessary */ | |
da004c36 AK |
327 | if (tbl->it_ops->flush) |
328 | tbl->it_ops->flush(tbl); | |
1da177e4 | 329 | |
1da177e4 LT |
330 | /* Make sure updates are seen by hardware */ |
331 | mb(); | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
67ca1415 AB |
336 | static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, |
337 | unsigned int npages) | |
1da177e4 LT |
338 | { |
339 | unsigned long entry, free_entry; | |
1da177e4 | 340 | |
d0847757 | 341 | entry = dma_addr >> tbl->it_page_shift; |
1da177e4 LT |
342 | free_entry = entry - tbl->it_offset; |
343 | ||
344 | if (((free_entry + npages) > tbl->it_size) || | |
345 | (entry < tbl->it_offset)) { | |
346 | if (printk_ratelimit()) { | |
347 | printk(KERN_INFO "iommu_free: invalid entry\n"); | |
348 | printk(KERN_INFO "\tentry = 0x%lx\n", entry); | |
fe333321 IM |
349 | printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); |
350 | printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); | |
351 | printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); | |
352 | printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); | |
353 | printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); | |
354 | printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); | |
1da177e4 LT |
355 | WARN_ON(1); |
356 | } | |
67ca1415 AB |
357 | |
358 | return false; | |
1da177e4 LT |
359 | } |
360 | ||
67ca1415 AB |
361 | return true; |
362 | } | |
363 | ||
b4c3a872 AB |
364 | static struct iommu_pool *get_pool(struct iommu_table *tbl, |
365 | unsigned long entry) | |
366 | { | |
367 | struct iommu_pool *p; | |
368 | unsigned long largepool_start = tbl->large_pool.start; | |
369 | ||
370 | /* The large pool is the last pool at the top of the table */ | |
371 | if (entry >= largepool_start) { | |
372 | p = &tbl->large_pool; | |
373 | } else { | |
374 | unsigned int pool_nr = entry / tbl->poolsize; | |
375 | ||
376 | BUG_ON(pool_nr > tbl->nr_pools); | |
377 | p = &tbl->pools[pool_nr]; | |
378 | } | |
379 | ||
380 | return p; | |
381 | } | |
382 | ||
67ca1415 AB |
383 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
384 | unsigned int npages) | |
1da177e4 | 385 | { |
67ca1415 | 386 | unsigned long entry, free_entry; |
1da177e4 | 387 | unsigned long flags; |
b4c3a872 | 388 | struct iommu_pool *pool; |
1da177e4 | 389 | |
d0847757 | 390 | entry = dma_addr >> tbl->it_page_shift; |
67ca1415 AB |
391 | free_entry = entry - tbl->it_offset; |
392 | ||
b4c3a872 AB |
393 | pool = get_pool(tbl, free_entry); |
394 | ||
67ca1415 AB |
395 | if (!iommu_free_check(tbl, dma_addr, npages)) |
396 | return; | |
397 | ||
da004c36 | 398 | tbl->it_ops->clear(tbl, entry, npages); |
67ca1415 | 399 | |
b4c3a872 | 400 | spin_lock_irqsave(&(pool->lock), flags); |
67ca1415 | 401 | bitmap_clear(tbl->it_map, free_entry, npages); |
b4c3a872 | 402 | spin_unlock_irqrestore(&(pool->lock), flags); |
67ca1415 AB |
403 | } |
404 | ||
405 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
406 | unsigned int npages) | |
407 | { | |
408 | __iommu_free(tbl, dma_addr, npages); | |
1da177e4 LT |
409 | |
410 | /* Make sure TLB cache is flushed if the HW needs it. We do | |
411 | * not do an mb() here on purpose, it is not needed on any of | |
412 | * the current platforms. | |
413 | */ | |
da004c36 AK |
414 | if (tbl->it_ops->flush) |
415 | tbl->it_ops->flush(tbl); | |
1da177e4 LT |
416 | } |
417 | ||
0690cbd2 JR |
418 | int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
419 | struct scatterlist *sglist, int nelems, | |
420 | unsigned long mask, enum dma_data_direction direction, | |
00085f1e | 421 | unsigned long attrs) |
1da177e4 LT |
422 | { |
423 | dma_addr_t dma_next = 0, dma_addr; | |
1da177e4 | 424 | struct scatterlist *s, *outs, *segstart; |
6490c490 | 425 | int outcount, incount, i, build_fail = 0; |
d262c32a | 426 | unsigned int align; |
1da177e4 | 427 | unsigned long handle; |
740c3ce6 | 428 | unsigned int max_seg_size; |
1da177e4 LT |
429 | |
430 | BUG_ON(direction == DMA_NONE); | |
431 | ||
432 | if ((nelems == 0) || !tbl) | |
433 | return 0; | |
434 | ||
435 | outs = s = segstart = &sglist[0]; | |
436 | outcount = 1; | |
ac9af7cb | 437 | incount = nelems; |
1da177e4 LT |
438 | handle = 0; |
439 | ||
440 | /* Init first segment length for backout at failure */ | |
441 | outs->dma_length = 0; | |
442 | ||
5d2efba6 | 443 | DBG("sg mapping %d elements:\n", nelems); |
1da177e4 | 444 | |
740c3ce6 | 445 | max_seg_size = dma_get_max_seg_size(dev); |
78bdc310 | 446 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
447 | unsigned long vaddr, npages, entry, slen; |
448 | ||
449 | slen = s->length; | |
450 | /* Sanity check */ | |
451 | if (slen == 0) { | |
452 | dma_next = 0; | |
453 | continue; | |
454 | } | |
455 | /* Allocate iommu entries for that segment */ | |
58b053e4 | 456 | vaddr = (unsigned long) sg_virt(s); |
d0847757 | 457 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); |
d262c32a | 458 | align = 0; |
d0847757 | 459 | if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && |
d262c32a | 460 | (vaddr & ~PAGE_MASK) == 0) |
d0847757 | 461 | align = PAGE_SHIFT - tbl->it_page_shift; |
fb3475e9 | 462 | entry = iommu_range_alloc(dev, tbl, npages, &handle, |
d0847757 | 463 | mask >> tbl->it_page_shift, align); |
1da177e4 LT |
464 | |
465 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | |
466 | ||
467 | /* Handle failure */ | |
d11e3d3d | 468 | if (unlikely(entry == DMA_MAPPING_ERROR)) { |
af8a2498 MFO |
469 | if (!(attrs & DMA_ATTR_NO_WARN) && |
470 | printk_ratelimit()) | |
4dfa9c47 AB |
471 | dev_info(dev, "iommu_alloc failed, tbl %p " |
472 | "vaddr %lx npages %lu\n", tbl, vaddr, | |
473 | npages); | |
1da177e4 LT |
474 | goto failure; |
475 | } | |
476 | ||
477 | /* Convert entry to a dma_addr_t */ | |
478 | entry += tbl->it_offset; | |
d0847757 AP |
479 | dma_addr = entry << tbl->it_page_shift; |
480 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); | |
1da177e4 | 481 | |
5d2efba6 | 482 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", |
1da177e4 LT |
483 | npages, entry, dma_addr); |
484 | ||
485 | /* Insert into HW table */ | |
da004c36 | 486 | build_fail = tbl->it_ops->set(tbl, entry, npages, |
d0847757 AP |
487 | vaddr & IOMMU_PAGE_MASK(tbl), |
488 | direction, attrs); | |
6490c490 RJ |
489 | if(unlikely(build_fail)) |
490 | goto failure; | |
1da177e4 LT |
491 | |
492 | /* If we are in an open segment, try merging */ | |
493 | if (segstart != s) { | |
494 | DBG(" - trying merge...\n"); | |
495 | /* We cannot merge if: | |
496 | * - allocated dma_addr isn't contiguous to previous allocation | |
497 | */ | |
740c3ce6 FT |
498 | if (novmerge || (dma_addr != dma_next) || |
499 | (outs->dma_length + s->length > max_seg_size)) { | |
1da177e4 LT |
500 | /* Can't merge: create a new segment */ |
501 | segstart = s; | |
78bdc310 JA |
502 | outcount++; |
503 | outs = sg_next(outs); | |
1da177e4 LT |
504 | DBG(" can't merge, new segment.\n"); |
505 | } else { | |
506 | outs->dma_length += s->length; | |
5d2efba6 | 507 | DBG(" merged, new len: %ux\n", outs->dma_length); |
1da177e4 LT |
508 | } |
509 | } | |
510 | ||
511 | if (segstart == s) { | |
512 | /* This is a new segment, fill entries */ | |
513 | DBG(" - filling new segment.\n"); | |
514 | outs->dma_address = dma_addr; | |
515 | outs->dma_length = slen; | |
516 | } | |
517 | ||
518 | /* Calculate next page pointer for contiguous check */ | |
519 | dma_next = dma_addr + slen; | |
520 | ||
521 | DBG(" - dma next is: %lx\n", dma_next); | |
522 | } | |
523 | ||
524 | /* Flush/invalidate TLB caches if necessary */ | |
da004c36 AK |
525 | if (tbl->it_ops->flush) |
526 | tbl->it_ops->flush(tbl); | |
1da177e4 | 527 | |
1da177e4 LT |
528 | DBG("mapped %d elements:\n", outcount); |
529 | ||
0690cbd2 | 530 | /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the |
1da177e4 LT |
531 | * next entry of the sglist if we didn't fill the list completely |
532 | */ | |
ac9af7cb | 533 | if (outcount < incount) { |
78bdc310 | 534 | outs = sg_next(outs); |
d11e3d3d | 535 | outs->dma_address = DMA_MAPPING_ERROR; |
1da177e4 LT |
536 | outs->dma_length = 0; |
537 | } | |
a958a264 JM |
538 | |
539 | /* Make sure updates are seen by hardware */ | |
540 | mb(); | |
541 | ||
1da177e4 LT |
542 | return outcount; |
543 | ||
544 | failure: | |
78bdc310 | 545 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
546 | if (s->dma_length != 0) { |
547 | unsigned long vaddr, npages; | |
548 | ||
d0847757 | 549 | vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); |
2994a3b2 | 550 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
d0847757 | 551 | IOMMU_PAGE_SIZE(tbl)); |
d3622137 | 552 | __iommu_free(tbl, vaddr, npages); |
d11e3d3d | 553 | s->dma_address = DMA_MAPPING_ERROR; |
a958a264 | 554 | s->dma_length = 0; |
1da177e4 | 555 | } |
78bdc310 JA |
556 | if (s == outs) |
557 | break; | |
1da177e4 | 558 | } |
1da177e4 LT |
559 | return 0; |
560 | } | |
561 | ||
562 | ||
0690cbd2 JR |
563 | void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
564 | int nelems, enum dma_data_direction direction, | |
00085f1e | 565 | unsigned long attrs) |
1da177e4 | 566 | { |
78bdc310 | 567 | struct scatterlist *sg; |
1da177e4 LT |
568 | |
569 | BUG_ON(direction == DMA_NONE); | |
570 | ||
571 | if (!tbl) | |
572 | return; | |
573 | ||
78bdc310 | 574 | sg = sglist; |
1da177e4 LT |
575 | while (nelems--) { |
576 | unsigned int npages; | |
78bdc310 | 577 | dma_addr_t dma_handle = sg->dma_address; |
1da177e4 | 578 | |
78bdc310 | 579 | if (sg->dma_length == 0) |
1da177e4 | 580 | break; |
2994a3b2 | 581 | npages = iommu_num_pages(dma_handle, sg->dma_length, |
d0847757 | 582 | IOMMU_PAGE_SIZE(tbl)); |
d3622137 | 583 | __iommu_free(tbl, dma_handle, npages); |
78bdc310 | 584 | sg = sg_next(sg); |
1da177e4 LT |
585 | } |
586 | ||
587 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | |
588 | * do not do an mb() here, the affected platforms do not need it | |
589 | * when freeing. | |
590 | */ | |
da004c36 AK |
591 | if (tbl->it_ops->flush) |
592 | tbl->it_ops->flush(tbl); | |
1da177e4 LT |
593 | } |
594 | ||
54622f10 MK |
595 | static void iommu_table_clear(struct iommu_table *tbl) |
596 | { | |
3ccc00a7 MS |
597 | /* |
598 | * In case of firmware assisted dump system goes through clean | |
599 | * reboot process at the time of system crash. Hence it's safe to | |
600 | * clear the TCE entries if firmware assisted dump is active. | |
601 | */ | |
602 | if (!is_kdump_kernel() || is_fadump_active()) { | |
54622f10 | 603 | /* Clear the table in case firmware left allocations in it */ |
da004c36 | 604 | tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); |
54622f10 MK |
605 | return; |
606 | } | |
607 | ||
608 | #ifdef CONFIG_CRASH_DUMP | |
da004c36 | 609 | if (tbl->it_ops->get) { |
54622f10 MK |
610 | unsigned long index, tceval, tcecount = 0; |
611 | ||
612 | /* Reserve the existing mappings left by the first kernel. */ | |
613 | for (index = 0; index < tbl->it_size; index++) { | |
da004c36 | 614 | tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); |
54622f10 MK |
615 | /* |
616 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | |
617 | */ | |
618 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | |
619 | __set_bit(index, tbl->it_map); | |
620 | tcecount++; | |
621 | } | |
622 | } | |
623 | ||
624 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | |
625 | printk(KERN_WARNING "TCE table is full; freeing "); | |
626 | printk(KERN_WARNING "%d entries for the kdump boot\n", | |
627 | KDUMP_MIN_TCE_ENTRIES); | |
628 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | |
629 | index < tbl->it_size; index++) | |
630 | __clear_bit(index, tbl->it_map); | |
631 | } | |
632 | } | |
633 | #endif | |
634 | } | |
635 | ||
1da177e4 LT |
636 | /* |
637 | * Build a iommu_table structure. This contains a bit map which | |
638 | * is used to manage allocation of the tce space. | |
639 | */ | |
ca1588e7 | 640 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
1da177e4 LT |
641 | { |
642 | unsigned long sz; | |
643 | static int welcomed = 0; | |
ca1588e7 | 644 | struct page *page; |
b4c3a872 AB |
645 | unsigned int i; |
646 | struct iommu_pool *p; | |
1da177e4 | 647 | |
da004c36 AK |
648 | BUG_ON(!tbl->it_ops); |
649 | ||
1da177e4 | 650 | /* number of bytes needed for the bitmap */ |
c5a0809a | 651 | sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); |
1da177e4 | 652 | |
1cf389df | 653 | page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); |
ca1588e7 | 654 | if (!page) |
1da177e4 | 655 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); |
ca1588e7 | 656 | tbl->it_map = page_address(page); |
1da177e4 LT |
657 | memset(tbl->it_map, 0, sz); |
658 | ||
d12b524f TLSC |
659 | /* |
660 | * Reserve page 0 so it will not be used for any mappings. | |
661 | * This avoids buggy drivers that consider page 0 to be invalid | |
662 | * to crash the machine or even lose data. | |
663 | */ | |
664 | if (tbl->it_offset == 0) | |
665 | set_bit(0, tbl->it_map); | |
666 | ||
b4c3a872 | 667 | /* We only split the IOMMU table if we have 1GB or more of space */ |
d0847757 | 668 | if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) |
b4c3a872 AB |
669 | tbl->nr_pools = IOMMU_NR_POOLS; |
670 | else | |
671 | tbl->nr_pools = 1; | |
672 | ||
673 | /* We reserve the top 1/4 of the table for large allocations */ | |
dcd261ba | 674 | tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; |
b4c3a872 | 675 | |
dcd261ba | 676 | for (i = 0; i < tbl->nr_pools; i++) { |
b4c3a872 AB |
677 | p = &tbl->pools[i]; |
678 | spin_lock_init(&(p->lock)); | |
679 | p->start = tbl->poolsize * i; | |
680 | p->hint = p->start; | |
681 | p->end = p->start + tbl->poolsize; | |
682 | } | |
683 | ||
684 | p = &tbl->large_pool; | |
685 | spin_lock_init(&(p->lock)); | |
686 | p->start = tbl->poolsize * i; | |
687 | p->hint = p->start; | |
688 | p->end = tbl->it_size; | |
1da177e4 | 689 | |
54622f10 | 690 | iommu_table_clear(tbl); |
d3588ba9 | 691 | |
1da177e4 LT |
692 | if (!welcomed) { |
693 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | |
694 | novmerge ? "disabled" : "enabled"); | |
695 | welcomed = 1; | |
696 | } | |
697 | ||
698 | return tbl; | |
699 | } | |
700 | ||
e5afdf9d | 701 | static void iommu_table_free(struct kref *kref) |
1da177e4 | 702 | { |
c5a0809a | 703 | unsigned long bitmap_sz; |
1da177e4 | 704 | unsigned int order; |
e5afdf9d | 705 | struct iommu_table *tbl; |
1da177e4 | 706 | |
e5afdf9d | 707 | tbl = container_of(kref, struct iommu_table, it_kref); |
8aca92d8 | 708 | |
11edf116 AK |
709 | if (tbl->it_ops->free) |
710 | tbl->it_ops->free(tbl); | |
711 | ||
8aca92d8 AK |
712 | if (!tbl->it_map) { |
713 | kfree(tbl); | |
1da177e4 LT |
714 | return; |
715 | } | |
716 | ||
7f966d39 TLSC |
717 | /* |
718 | * In case we have reserved the first bit, we should not emit | |
719 | * the warning below. | |
720 | */ | |
721 | if (tbl->it_offset == 0) | |
722 | clear_bit(0, tbl->it_map); | |
723 | ||
1da177e4 | 724 | /* verify that table contains no entries */ |
c5a0809a | 725 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) |
e5afdf9d | 726 | pr_warn("%s: Unexpected TCEs\n", __func__); |
1da177e4 LT |
727 | |
728 | /* calculate bitmap size in bytes */ | |
c5a0809a | 729 | bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); |
1da177e4 LT |
730 | |
731 | /* free bitmap */ | |
732 | order = get_order(bitmap_sz); | |
733 | free_pages((unsigned long) tbl->it_map, order); | |
734 | ||
735 | /* free table */ | |
736 | kfree(tbl); | |
737 | } | |
e5afdf9d AK |
738 | |
739 | struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) | |
740 | { | |
741 | if (kref_get_unless_zero(&tbl->it_kref)) | |
742 | return tbl; | |
743 | ||
744 | return NULL; | |
745 | } | |
746 | EXPORT_SYMBOL_GPL(iommu_tce_table_get); | |
747 | ||
748 | int iommu_tce_table_put(struct iommu_table *tbl) | |
749 | { | |
750 | if (WARN_ON(!tbl)) | |
751 | return 0; | |
752 | ||
753 | return kref_put(&tbl->it_kref, iommu_table_free); | |
754 | } | |
755 | EXPORT_SYMBOL_GPL(iommu_tce_table_put); | |
1da177e4 LT |
756 | |
757 | /* Creates TCEs for a user provided buffer. The user buffer must be | |
f9226d57 MN |
758 | * contiguous real kernel storage (not vmalloc). The address passed here |
759 | * comprises a page address and offset into that page. The dma_addr_t | |
760 | * returned will point to the same byte within the page as was passed in. | |
1da177e4 | 761 | */ |
f9226d57 MN |
762 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
763 | struct page *page, unsigned long offset, size_t size, | |
764 | unsigned long mask, enum dma_data_direction direction, | |
00085f1e | 765 | unsigned long attrs) |
1da177e4 | 766 | { |
d11e3d3d | 767 | dma_addr_t dma_handle = DMA_MAPPING_ERROR; |
f9226d57 | 768 | void *vaddr; |
1da177e4 | 769 | unsigned long uaddr; |
d262c32a | 770 | unsigned int npages, align; |
1da177e4 LT |
771 | |
772 | BUG_ON(direction == DMA_NONE); | |
773 | ||
f9226d57 | 774 | vaddr = page_address(page) + offset; |
1da177e4 | 775 | uaddr = (unsigned long)vaddr; |
1da177e4 LT |
776 | |
777 | if (tbl) { | |
984ecdd6 | 778 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); |
d262c32a | 779 | align = 0; |
d0847757 | 780 | if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && |
d262c32a | 781 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) |
d0847757 | 782 | align = PAGE_SHIFT - tbl->it_page_shift; |
d262c32a | 783 | |
fb3475e9 | 784 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
d0847757 | 785 | mask >> tbl->it_page_shift, align, |
4f3dd8a0 | 786 | attrs); |
d11e3d3d | 787 | if (dma_handle == DMA_MAPPING_ERROR) { |
af8a2498 MFO |
788 | if (!(attrs & DMA_ATTR_NO_WARN) && |
789 | printk_ratelimit()) { | |
4dfa9c47 AB |
790 | dev_info(dev, "iommu_alloc failed, tbl %p " |
791 | "vaddr %p npages %d\n", tbl, vaddr, | |
792 | npages); | |
1da177e4 LT |
793 | } |
794 | } else | |
d0847757 | 795 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); |
1da177e4 LT |
796 | } |
797 | ||
798 | return dma_handle; | |
799 | } | |
800 | ||
f9226d57 MN |
801 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
802 | size_t size, enum dma_data_direction direction, | |
00085f1e | 803 | unsigned long attrs) |
1da177e4 | 804 | { |
5d2efba6 LV |
805 | unsigned int npages; |
806 | ||
1da177e4 LT |
807 | BUG_ON(direction == DMA_NONE); |
808 | ||
5d2efba6 | 809 | if (tbl) { |
d0847757 AP |
810 | npages = iommu_num_pages(dma_handle, size, |
811 | IOMMU_PAGE_SIZE(tbl)); | |
5d2efba6 LV |
812 | iommu_free(tbl, dma_handle, npages); |
813 | } | |
1da177e4 LT |
814 | } |
815 | ||
816 | /* Allocates a contiguous real buffer and creates mappings over it. | |
817 | * Returns the virtual address of the buffer and sets dma_handle | |
818 | * to the dma address (mapping) of the first page. | |
819 | */ | |
fb3475e9 FT |
820 | void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
821 | size_t size, dma_addr_t *dma_handle, | |
822 | unsigned long mask, gfp_t flag, int node) | |
1da177e4 LT |
823 | { |
824 | void *ret = NULL; | |
825 | dma_addr_t mapping; | |
5d2efba6 LV |
826 | unsigned int order; |
827 | unsigned int nio_pages, io_order; | |
8eb6c6e3 | 828 | struct page *page; |
1da177e4 LT |
829 | |
830 | size = PAGE_ALIGN(size); | |
1da177e4 LT |
831 | order = get_order(size); |
832 | ||
833 | /* | |
834 | * Client asked for way too much space. This is checked later | |
835 | * anyway. It is easier to debug here for the drivers than in | |
836 | * the tce tables. | |
837 | */ | |
838 | if (order >= IOMAP_MAX_ORDER) { | |
4dfa9c47 AB |
839 | dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", |
840 | size); | |
1da177e4 LT |
841 | return NULL; |
842 | } | |
843 | ||
844 | if (!tbl) | |
845 | return NULL; | |
846 | ||
847 | /* Alloc enough pages (and possibly more) */ | |
05061354 | 848 | page = alloc_pages_node(node, flag, order); |
8eb6c6e3 | 849 | if (!page) |
1da177e4 | 850 | return NULL; |
8eb6c6e3 | 851 | ret = page_address(page); |
1da177e4 LT |
852 | memset(ret, 0, size); |
853 | ||
854 | /* Set up tces to cover the allocated range */ | |
d0847757 AP |
855 | nio_pages = size >> tbl->it_page_shift; |
856 | io_order = get_iommu_order(size, tbl); | |
fb3475e9 | 857 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
00085f1e | 858 | mask >> tbl->it_page_shift, io_order, 0); |
d11e3d3d | 859 | if (mapping == DMA_MAPPING_ERROR) { |
1da177e4 | 860 | free_pages((unsigned long)ret, order); |
8eb6c6e3 CH |
861 | return NULL; |
862 | } | |
863 | *dma_handle = mapping; | |
1da177e4 LT |
864 | return ret; |
865 | } | |
866 | ||
867 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |
868 | void *vaddr, dma_addr_t dma_handle) | |
869 | { | |
1da177e4 | 870 | if (tbl) { |
5d2efba6 LV |
871 | unsigned int nio_pages; |
872 | ||
873 | size = PAGE_ALIGN(size); | |
d0847757 | 874 | nio_pages = size >> tbl->it_page_shift; |
5d2efba6 | 875 | iommu_free(tbl, dma_handle, nio_pages); |
1da177e4 | 876 | size = PAGE_ALIGN(size); |
1da177e4 LT |
877 | free_pages((unsigned long)vaddr, get_order(size)); |
878 | } | |
879 | } | |
4e13c1ac | 880 | |
10b35b2b AK |
881 | unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir) |
882 | { | |
883 | switch (dir) { | |
884 | case DMA_BIDIRECTIONAL: | |
885 | return TCE_PCI_READ | TCE_PCI_WRITE; | |
886 | case DMA_FROM_DEVICE: | |
887 | return TCE_PCI_WRITE; | |
888 | case DMA_TO_DEVICE: | |
889 | return TCE_PCI_READ; | |
890 | default: | |
891 | return 0; | |
892 | } | |
893 | } | |
894 | EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm); | |
895 | ||
4e13c1ac AK |
896 | #ifdef CONFIG_IOMMU_API |
897 | /* | |
898 | * SPAPR TCE API | |
899 | */ | |
900 | static void group_release(void *iommu_data) | |
901 | { | |
b348aa65 AK |
902 | struct iommu_table_group *table_group = iommu_data; |
903 | ||
904 | table_group->group = NULL; | |
4e13c1ac AK |
905 | } |
906 | ||
b348aa65 | 907 | void iommu_register_group(struct iommu_table_group *table_group, |
4e13c1ac AK |
908 | int pci_domain_number, unsigned long pe_num) |
909 | { | |
910 | struct iommu_group *grp; | |
911 | char *name; | |
912 | ||
913 | grp = iommu_group_alloc(); | |
914 | if (IS_ERR(grp)) { | |
915 | pr_warn("powerpc iommu api: cannot create new group, err=%ld\n", | |
916 | PTR_ERR(grp)); | |
917 | return; | |
918 | } | |
b348aa65 AK |
919 | table_group->group = grp; |
920 | iommu_group_set_iommudata(grp, table_group, group_release); | |
4e13c1ac AK |
921 | name = kasprintf(GFP_KERNEL, "domain%d-pe%lx", |
922 | pci_domain_number, pe_num); | |
923 | if (!name) | |
924 | return; | |
925 | iommu_group_set_name(grp, name); | |
926 | kfree(name); | |
927 | } | |
928 | ||
929 | enum dma_data_direction iommu_tce_direction(unsigned long tce) | |
930 | { | |
931 | if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE)) | |
932 | return DMA_BIDIRECTIONAL; | |
933 | else if (tce & TCE_PCI_READ) | |
934 | return DMA_TO_DEVICE; | |
935 | else if (tce & TCE_PCI_WRITE) | |
936 | return DMA_FROM_DEVICE; | |
937 | else | |
938 | return DMA_NONE; | |
939 | } | |
940 | EXPORT_SYMBOL_GPL(iommu_tce_direction); | |
941 | ||
942 | void iommu_flush_tce(struct iommu_table *tbl) | |
943 | { | |
944 | /* Flush/invalidate TLB caches if necessary */ | |
da004c36 AK |
945 | if (tbl->it_ops->flush) |
946 | tbl->it_ops->flush(tbl); | |
4e13c1ac AK |
947 | |
948 | /* Make sure updates are seen by hardware */ | |
949 | mb(); | |
950 | } | |
951 | EXPORT_SYMBOL_GPL(iommu_flush_tce); | |
952 | ||
b1af23d8 AK |
953 | int iommu_tce_check_ioba(unsigned long page_shift, |
954 | unsigned long offset, unsigned long size, | |
955 | unsigned long ioba, unsigned long npages) | |
4e13c1ac | 956 | { |
b1af23d8 | 957 | unsigned long mask = (1UL << page_shift) - 1; |
4e13c1ac | 958 | |
b1af23d8 | 959 | if (ioba & mask) |
4e13c1ac AK |
960 | return -EINVAL; |
961 | ||
b1af23d8 AK |
962 | ioba >>= page_shift; |
963 | if (ioba < offset) | |
4e13c1ac AK |
964 | return -EINVAL; |
965 | ||
b1af23d8 | 966 | if ((ioba + 1) > (offset + size)) |
4e13c1ac AK |
967 | return -EINVAL; |
968 | ||
969 | return 0; | |
970 | } | |
b1af23d8 | 971 | EXPORT_SYMBOL_GPL(iommu_tce_check_ioba); |
4e13c1ac | 972 | |
b1af23d8 | 973 | int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa) |
4e13c1ac | 974 | { |
b1af23d8 | 975 | unsigned long mask = (1UL << page_shift) - 1; |
4e13c1ac | 976 | |
b1af23d8 | 977 | if (gpa & mask) |
4e13c1ac AK |
978 | return -EINVAL; |
979 | ||
980 | return 0; | |
981 | } | |
b1af23d8 | 982 | EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); |
4e13c1ac | 983 | |
c10c21ef AK |
984 | long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, |
985 | unsigned long entry, unsigned long *hpa, | |
986 | enum dma_data_direction *direction) | |
4e13c1ac | 987 | { |
05c6cfb9 | 988 | long ret; |
c10c21ef | 989 | unsigned long size = 0; |
4e13c1ac | 990 | |
05c6cfb9 | 991 | ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); |
4e13c1ac | 992 | |
05c6cfb9 | 993 | if (!ret && ((*direction == DMA_FROM_DEVICE) || |
c10c21ef AK |
994 | (*direction == DMA_BIDIRECTIONAL)) && |
995 | !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, | |
996 | &size)) | |
05c6cfb9 | 997 | SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); |
4e13c1ac AK |
998 | |
999 | /* if (unlikely(ret)) | |
1000 | pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", | |
84f1966e | 1001 | __func__, hwaddr, entry << tbl->it_page_shift, |
4e13c1ac AK |
1002 | hwaddr, ret); */ |
1003 | ||
1004 | return ret; | |
1005 | } | |
05c6cfb9 | 1006 | EXPORT_SYMBOL_GPL(iommu_tce_xchg); |
4e13c1ac | 1007 | |
4e13c1ac AK |
1008 | int iommu_take_ownership(struct iommu_table *tbl) |
1009 | { | |
b82c75bf AK |
1010 | unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; |
1011 | int ret = 0; | |
1012 | ||
05c6cfb9 AK |
1013 | /* |
1014 | * VFIO does not control TCE entries allocation and the guest | |
1015 | * can write new TCEs on top of existing ones so iommu_tce_build() | |
1016 | * must be able to release old pages. This functionality | |
1017 | * requires exchange() callback defined so if it is not | |
1018 | * implemented, we disallow taking ownership over the table. | |
1019 | */ | |
1020 | if (!tbl->it_ops->exchange) | |
1021 | return -EINVAL; | |
1022 | ||
b82c75bf AK |
1023 | spin_lock_irqsave(&tbl->large_pool.lock, flags); |
1024 | for (i = 0; i < tbl->nr_pools; i++) | |
1025 | spin_lock(&tbl->pools[i].lock); | |
4e13c1ac AK |
1026 | |
1027 | if (tbl->it_offset == 0) | |
1028 | clear_bit(0, tbl->it_map); | |
1029 | ||
1030 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) { | |
1031 | pr_err("iommu_tce: it_map is not empty"); | |
b82c75bf AK |
1032 | ret = -EBUSY; |
1033 | /* Restore bit#0 set by iommu_init_table() */ | |
1034 | if (tbl->it_offset == 0) | |
1035 | set_bit(0, tbl->it_map); | |
1036 | } else { | |
1037 | memset(tbl->it_map, 0xff, sz); | |
4e13c1ac AK |
1038 | } |
1039 | ||
b82c75bf AK |
1040 | for (i = 0; i < tbl->nr_pools; i++) |
1041 | spin_unlock(&tbl->pools[i].lock); | |
1042 | spin_unlock_irqrestore(&tbl->large_pool.lock, flags); | |
4e13c1ac | 1043 | |
b82c75bf | 1044 | return ret; |
4e13c1ac AK |
1045 | } |
1046 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | |
1047 | ||
1048 | void iommu_release_ownership(struct iommu_table *tbl) | |
1049 | { | |
b82c75bf AK |
1050 | unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; |
1051 | ||
1052 | spin_lock_irqsave(&tbl->large_pool.lock, flags); | |
1053 | for (i = 0; i < tbl->nr_pools; i++) | |
1054 | spin_lock(&tbl->pools[i].lock); | |
4e13c1ac | 1055 | |
4e13c1ac AK |
1056 | memset(tbl->it_map, 0, sz); |
1057 | ||
1058 | /* Restore bit#0 set by iommu_init_table() */ | |
1059 | if (tbl->it_offset == 0) | |
1060 | set_bit(0, tbl->it_map); | |
b82c75bf AK |
1061 | |
1062 | for (i = 0; i < tbl->nr_pools; i++) | |
1063 | spin_unlock(&tbl->pools[i].lock); | |
1064 | spin_unlock_irqrestore(&tbl->large_pool.lock, flags); | |
4e13c1ac AK |
1065 | } |
1066 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | |
1067 | ||
c4e9d3c1 | 1068 | int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) |
4e13c1ac | 1069 | { |
763fe0ad GS |
1070 | /* |
1071 | * The sysfs entries should be populated before | |
1072 | * binding IOMMU group. If sysfs entries isn't | |
1073 | * ready, we simply bail. | |
1074 | */ | |
1075 | if (!device_is_registered(dev)) | |
1076 | return -ENOENT; | |
1077 | ||
bf8763d8 | 1078 | if (device_iommu_mapped(dev)) { |
763fe0ad GS |
1079 | pr_debug("%s: Skipping device %s with iommu group %d\n", |
1080 | __func__, dev_name(dev), | |
1081 | iommu_group_id(dev->iommu_group)); | |
4e13c1ac AK |
1082 | return -EBUSY; |
1083 | } | |
1084 | ||
763fe0ad | 1085 | pr_debug("%s: Adding %s to iommu group %d\n", |
c4e9d3c1 | 1086 | __func__, dev_name(dev), iommu_group_id(table_group->group)); |
d0847757 | 1087 | |
c4e9d3c1 | 1088 | return iommu_group_add_device(table_group->group, dev); |
4e13c1ac | 1089 | } |
d905c5df | 1090 | EXPORT_SYMBOL_GPL(iommu_add_device); |
4e13c1ac | 1091 | |
d905c5df | 1092 | void iommu_del_device(struct device *dev) |
4e13c1ac | 1093 | { |
0c4b9e27 GS |
1094 | /* |
1095 | * Some devices might not have IOMMU table and group | |
1096 | * and we needn't detach them from the associated | |
1097 | * IOMMU groups | |
1098 | */ | |
bf8763d8 | 1099 | if (!device_iommu_mapped(dev)) { |
0c4b9e27 GS |
1100 | pr_debug("iommu_tce: skipping device %s with no tbl\n", |
1101 | dev_name(dev)); | |
1102 | return; | |
1103 | } | |
1104 | ||
4e13c1ac AK |
1105 | iommu_group_remove_device(dev); |
1106 | } | |
d905c5df | 1107 | EXPORT_SYMBOL_GPL(iommu_del_device); |
4e13c1ac | 1108 | #endif /* CONFIG_IOMMU_API */ |