]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
3 | * | |
4 | * Rewrite, cleanup, new allocation schemes, virtual merging: | |
5 | * Copyright (C) 2004 Olof Johansson, IBM Corporation | |
6 | * and Ben. Herrenschmidt, IBM Corporation | |
7 | * | |
8 | * Dynamic DMA mapping support, bus-independent parts. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
25 | ||
1da177e4 LT |
26 | #include <linux/init.h> |
27 | #include <linux/types.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/spinlock.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/dma-mapping.h> | |
a66022c4 | 33 | #include <linux/bitmap.h> |
fb3475e9 | 34 | #include <linux/iommu-helper.h> |
62a8bd6c | 35 | #include <linux/crash_dump.h> |
b4c3a872 | 36 | #include <linux/hash.h> |
d6b9a81b AB |
37 | #include <linux/fault-inject.h> |
38 | #include <linux/pci.h> | |
4e13c1ac AK |
39 | #include <linux/iommu.h> |
40 | #include <linux/sched.h> | |
1da177e4 LT |
41 | #include <asm/io.h> |
42 | #include <asm/prom.h> | |
43 | #include <asm/iommu.h> | |
44 | #include <asm/pci-bridge.h> | |
45 | #include <asm/machdep.h> | |
5f50867b | 46 | #include <asm/kdump.h> |
3ccc00a7 | 47 | #include <asm/fadump.h> |
d6b9a81b | 48 | #include <asm/vio.h> |
4e13c1ac | 49 | #include <asm/tce.h> |
1da177e4 LT |
50 | |
51 | #define DBG(...) | |
52 | ||
191aee58 | 53 | static int novmerge; |
56997559 | 54 | |
6490c490 RJ |
55 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); |
56 | ||
1da177e4 LT |
57 | static int __init setup_iommu(char *str) |
58 | { | |
59 | if (!strcmp(str, "novmerge")) | |
60 | novmerge = 1; | |
61 | else if (!strcmp(str, "vmerge")) | |
62 | novmerge = 0; | |
63 | return 1; | |
64 | } | |
65 | ||
66 | __setup("iommu=", setup_iommu); | |
67 | ||
b4c3a872 AB |
68 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); |
69 | ||
70 | /* | |
71 | * We precalculate the hash to avoid doing it on every allocation. | |
72 | * | |
73 | * The hash is important to spread CPUs across all the pools. For example, | |
74 | * on a POWER7 with 4 way SMT we want interrupts on the primary threads and | |
75 | * with 4 pools all primary threads would map to the same pool. | |
76 | */ | |
77 | static int __init setup_iommu_pool_hash(void) | |
78 | { | |
79 | unsigned int i; | |
80 | ||
81 | for_each_possible_cpu(i) | |
82 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | |
83 | ||
84 | return 0; | |
85 | } | |
86 | subsys_initcall(setup_iommu_pool_hash); | |
87 | ||
d6b9a81b AB |
88 | #ifdef CONFIG_FAIL_IOMMU |
89 | ||
90 | static DECLARE_FAULT_ATTR(fail_iommu); | |
91 | ||
92 | static int __init setup_fail_iommu(char *str) | |
93 | { | |
94 | return setup_fault_attr(&fail_iommu, str); | |
95 | } | |
96 | __setup("fail_iommu=", setup_fail_iommu); | |
97 | ||
98 | static bool should_fail_iommu(struct device *dev) | |
99 | { | |
100 | return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1); | |
101 | } | |
102 | ||
103 | static int __init fail_iommu_debugfs(void) | |
104 | { | |
105 | struct dentry *dir = fault_create_debugfs_attr("fail_iommu", | |
106 | NULL, &fail_iommu); | |
107 | ||
8c6ffba0 | 108 | return PTR_ERR_OR_ZERO(dir); |
d6b9a81b AB |
109 | } |
110 | late_initcall(fail_iommu_debugfs); | |
111 | ||
112 | static ssize_t fail_iommu_show(struct device *dev, | |
113 | struct device_attribute *attr, char *buf) | |
114 | { | |
115 | return sprintf(buf, "%d\n", dev->archdata.fail_iommu); | |
116 | } | |
117 | ||
118 | static ssize_t fail_iommu_store(struct device *dev, | |
119 | struct device_attribute *attr, const char *buf, | |
120 | size_t count) | |
121 | { | |
122 | int i; | |
123 | ||
124 | if (count > 0 && sscanf(buf, "%d", &i) > 0) | |
125 | dev->archdata.fail_iommu = (i == 0) ? 0 : 1; | |
126 | ||
127 | return count; | |
128 | } | |
129 | ||
130 | static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show, | |
131 | fail_iommu_store); | |
132 | ||
133 | static int fail_iommu_bus_notify(struct notifier_block *nb, | |
134 | unsigned long action, void *data) | |
135 | { | |
136 | struct device *dev = data; | |
137 | ||
138 | if (action == BUS_NOTIFY_ADD_DEVICE) { | |
139 | if (device_create_file(dev, &dev_attr_fail_iommu)) | |
140 | pr_warn("Unable to create IOMMU fault injection sysfs " | |
141 | "entries\n"); | |
142 | } else if (action == BUS_NOTIFY_DEL_DEVICE) { | |
143 | device_remove_file(dev, &dev_attr_fail_iommu); | |
144 | } | |
145 | ||
146 | return 0; | |
147 | } | |
148 | ||
149 | static struct notifier_block fail_iommu_bus_notifier = { | |
150 | .notifier_call = fail_iommu_bus_notify | |
151 | }; | |
152 | ||
153 | static int __init fail_iommu_setup(void) | |
154 | { | |
155 | #ifdef CONFIG_PCI | |
156 | bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier); | |
157 | #endif | |
158 | #ifdef CONFIG_IBMVIO | |
159 | bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier); | |
160 | #endif | |
161 | ||
162 | return 0; | |
163 | } | |
164 | /* | |
165 | * Must execute after PCI and VIO subsystem have initialised but before | |
166 | * devices are probed. | |
167 | */ | |
168 | arch_initcall(fail_iommu_setup); | |
169 | #else | |
170 | static inline bool should_fail_iommu(struct device *dev) | |
171 | { | |
172 | return false; | |
173 | } | |
174 | #endif | |
175 | ||
fb3475e9 FT |
176 | static unsigned long iommu_range_alloc(struct device *dev, |
177 | struct iommu_table *tbl, | |
1da177e4 LT |
178 | unsigned long npages, |
179 | unsigned long *handle, | |
7daa411b | 180 | unsigned long mask, |
1da177e4 LT |
181 | unsigned int align_order) |
182 | { | |
fb3475e9 | 183 | unsigned long n, end, start; |
1da177e4 LT |
184 | unsigned long limit; |
185 | int largealloc = npages > 15; | |
186 | int pass = 0; | |
187 | unsigned long align_mask; | |
fb3475e9 | 188 | unsigned long boundary_size; |
d3622137 | 189 | unsigned long flags; |
b4c3a872 AB |
190 | unsigned int pool_nr; |
191 | struct iommu_pool *pool; | |
1da177e4 LT |
192 | |
193 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | |
194 | ||
195 | /* This allocator was derived from x86_64's bit string search */ | |
196 | ||
197 | /* Sanity check */ | |
13a2eea1 | 198 | if (unlikely(npages == 0)) { |
1da177e4 LT |
199 | if (printk_ratelimit()) |
200 | WARN_ON(1); | |
201 | return DMA_ERROR_CODE; | |
202 | } | |
203 | ||
d6b9a81b AB |
204 | if (should_fail_iommu(dev)) |
205 | return DMA_ERROR_CODE; | |
206 | ||
b4c3a872 AB |
207 | /* |
208 | * We don't need to disable preemption here because any CPU can | |
209 | * safely use any IOMMU pool. | |
210 | */ | |
211 | pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); | |
d3622137 | 212 | |
b4c3a872 AB |
213 | if (largealloc) |
214 | pool = &(tbl->large_pool); | |
1da177e4 | 215 | else |
b4c3a872 | 216 | pool = &(tbl->pools[pool_nr]); |
1da177e4 | 217 | |
b4c3a872 AB |
218 | spin_lock_irqsave(&(pool->lock), flags); |
219 | ||
220 | again: | |
d900bd73 AB |
221 | if ((pass == 0) && handle && *handle && |
222 | (*handle >= pool->start) && (*handle < pool->end)) | |
b4c3a872 AB |
223 | start = *handle; |
224 | else | |
225 | start = pool->hint; | |
1da177e4 | 226 | |
b4c3a872 | 227 | limit = pool->end; |
1da177e4 LT |
228 | |
229 | /* The case below can happen if we have a small segment appended | |
230 | * to a large, or when the previous alloc was at the very end of | |
231 | * the available space. If so, go back to the initial start. | |
232 | */ | |
233 | if (start >= limit) | |
b4c3a872 | 234 | start = pool->start; |
1da177e4 | 235 | |
7daa411b OJ |
236 | if (limit + tbl->it_offset > mask) { |
237 | limit = mask - tbl->it_offset + 1; | |
238 | /* If we're constrained on address range, first try | |
239 | * at the masked hint to avoid O(n) search complexity, | |
b4c3a872 | 240 | * but on second pass, start at 0 in pool 0. |
7daa411b | 241 | */ |
b4c3a872 | 242 | if ((start & mask) >= limit || pass > 0) { |
d900bd73 | 243 | spin_unlock(&(pool->lock)); |
b4c3a872 | 244 | pool = &(tbl->pools[0]); |
d900bd73 | 245 | spin_lock(&(pool->lock)); |
b4c3a872 AB |
246 | start = pool->start; |
247 | } else { | |
7daa411b | 248 | start &= mask; |
b4c3a872 | 249 | } |
7daa411b OJ |
250 | } |
251 | ||
fb3475e9 FT |
252 | if (dev) |
253 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
254 | 1 << IOMMU_PAGE_SHIFT); | |
255 | else | |
256 | boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); | |
257 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ | |
1da177e4 | 258 | |
fb3475e9 FT |
259 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, |
260 | tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, | |
261 | align_mask); | |
262 | if (n == -1) { | |
b4c3a872 AB |
263 | if (likely(pass == 0)) { |
264 | /* First try the pool from the start */ | |
265 | pool->hint = pool->start; | |
1da177e4 LT |
266 | pass++; |
267 | goto again; | |
b4c3a872 AB |
268 | |
269 | } else if (pass <= tbl->nr_pools) { | |
270 | /* Now try scanning all the other pools */ | |
271 | spin_unlock(&(pool->lock)); | |
272 | pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); | |
273 | pool = &tbl->pools[pool_nr]; | |
274 | spin_lock(&(pool->lock)); | |
275 | pool->hint = pool->start; | |
276 | pass++; | |
277 | goto again; | |
278 | ||
1da177e4 | 279 | } else { |
b4c3a872 AB |
280 | /* Give up */ |
281 | spin_unlock_irqrestore(&(pool->lock), flags); | |
1da177e4 LT |
282 | return DMA_ERROR_CODE; |
283 | } | |
284 | } | |
285 | ||
fb3475e9 | 286 | end = n + npages; |
1da177e4 LT |
287 | |
288 | /* Bump the hint to a new block for small allocs. */ | |
289 | if (largealloc) { | |
290 | /* Don't bump to new block to avoid fragmentation */ | |
b4c3a872 | 291 | pool->hint = end; |
1da177e4 LT |
292 | } else { |
293 | /* Overflow will be taken care of at the next allocation */ | |
b4c3a872 | 294 | pool->hint = (end + tbl->it_blocksize - 1) & |
1da177e4 LT |
295 | ~(tbl->it_blocksize - 1); |
296 | } | |
297 | ||
298 | /* Update handle for SG allocations */ | |
299 | if (handle) | |
300 | *handle = end; | |
301 | ||
b4c3a872 AB |
302 | spin_unlock_irqrestore(&(pool->lock), flags); |
303 | ||
1da177e4 LT |
304 | return n; |
305 | } | |
306 | ||
fb3475e9 FT |
307 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
308 | void *page, unsigned int npages, | |
309 | enum dma_data_direction direction, | |
4f3dd8a0 MN |
310 | unsigned long mask, unsigned int align_order, |
311 | struct dma_attrs *attrs) | |
1da177e4 | 312 | { |
d3622137 | 313 | unsigned long entry; |
1da177e4 | 314 | dma_addr_t ret = DMA_ERROR_CODE; |
6490c490 | 315 | int build_fail; |
7daa411b | 316 | |
fb3475e9 | 317 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
1da177e4 | 318 | |
0e4bc95d | 319 | if (unlikely(entry == DMA_ERROR_CODE)) |
1da177e4 | 320 | return DMA_ERROR_CODE; |
1da177e4 LT |
321 | |
322 | entry += tbl->it_offset; /* Offset into real TCE table */ | |
5d2efba6 | 323 | ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ |
1da177e4 LT |
324 | |
325 | /* Put the TCEs in the HW table */ | |
6490c490 RJ |
326 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
327 | (unsigned long)page & IOMMU_PAGE_MASK, | |
328 | direction, attrs); | |
329 | ||
330 | /* ppc_md.tce_build() only returns non-zero for transient errors. | |
331 | * Clean up the table bitmap in this case and return | |
332 | * DMA_ERROR_CODE. For all other errors the functionality is | |
333 | * not altered. | |
334 | */ | |
335 | if (unlikely(build_fail)) { | |
336 | __iommu_free(tbl, ret, npages); | |
6490c490 RJ |
337 | return DMA_ERROR_CODE; |
338 | } | |
1da177e4 LT |
339 | |
340 | /* Flush/invalidate TLB caches if necessary */ | |
341 | if (ppc_md.tce_flush) | |
342 | ppc_md.tce_flush(tbl); | |
343 | ||
1da177e4 LT |
344 | /* Make sure updates are seen by hardware */ |
345 | mb(); | |
346 | ||
347 | return ret; | |
348 | } | |
349 | ||
67ca1415 AB |
350 | static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, |
351 | unsigned int npages) | |
1da177e4 LT |
352 | { |
353 | unsigned long entry, free_entry; | |
1da177e4 | 354 | |
5d2efba6 | 355 | entry = dma_addr >> IOMMU_PAGE_SHIFT; |
1da177e4 LT |
356 | free_entry = entry - tbl->it_offset; |
357 | ||
358 | if (((free_entry + npages) > tbl->it_size) || | |
359 | (entry < tbl->it_offset)) { | |
360 | if (printk_ratelimit()) { | |
361 | printk(KERN_INFO "iommu_free: invalid entry\n"); | |
362 | printk(KERN_INFO "\tentry = 0x%lx\n", entry); | |
fe333321 IM |
363 | printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); |
364 | printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); | |
365 | printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); | |
366 | printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); | |
367 | printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); | |
368 | printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); | |
1da177e4 LT |
369 | WARN_ON(1); |
370 | } | |
67ca1415 AB |
371 | |
372 | return false; | |
1da177e4 LT |
373 | } |
374 | ||
67ca1415 AB |
375 | return true; |
376 | } | |
377 | ||
b4c3a872 AB |
378 | static struct iommu_pool *get_pool(struct iommu_table *tbl, |
379 | unsigned long entry) | |
380 | { | |
381 | struct iommu_pool *p; | |
382 | unsigned long largepool_start = tbl->large_pool.start; | |
383 | ||
384 | /* The large pool is the last pool at the top of the table */ | |
385 | if (entry >= largepool_start) { | |
386 | p = &tbl->large_pool; | |
387 | } else { | |
388 | unsigned int pool_nr = entry / tbl->poolsize; | |
389 | ||
390 | BUG_ON(pool_nr > tbl->nr_pools); | |
391 | p = &tbl->pools[pool_nr]; | |
392 | } | |
393 | ||
394 | return p; | |
395 | } | |
396 | ||
67ca1415 AB |
397 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
398 | unsigned int npages) | |
1da177e4 | 399 | { |
67ca1415 | 400 | unsigned long entry, free_entry; |
1da177e4 | 401 | unsigned long flags; |
b4c3a872 | 402 | struct iommu_pool *pool; |
1da177e4 | 403 | |
67ca1415 AB |
404 | entry = dma_addr >> IOMMU_PAGE_SHIFT; |
405 | free_entry = entry - tbl->it_offset; | |
406 | ||
b4c3a872 AB |
407 | pool = get_pool(tbl, free_entry); |
408 | ||
67ca1415 AB |
409 | if (!iommu_free_check(tbl, dma_addr, npages)) |
410 | return; | |
411 | ||
412 | ppc_md.tce_free(tbl, entry, npages); | |
413 | ||
b4c3a872 | 414 | spin_lock_irqsave(&(pool->lock), flags); |
67ca1415 | 415 | bitmap_clear(tbl->it_map, free_entry, npages); |
b4c3a872 | 416 | spin_unlock_irqrestore(&(pool->lock), flags); |
67ca1415 AB |
417 | } |
418 | ||
419 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
420 | unsigned int npages) | |
421 | { | |
422 | __iommu_free(tbl, dma_addr, npages); | |
1da177e4 LT |
423 | |
424 | /* Make sure TLB cache is flushed if the HW needs it. We do | |
425 | * not do an mb() here on purpose, it is not needed on any of | |
426 | * the current platforms. | |
427 | */ | |
428 | if (ppc_md.tce_flush) | |
429 | ppc_md.tce_flush(tbl); | |
1da177e4 LT |
430 | } |
431 | ||
c8692362 MN |
432 | int iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
433 | struct scatterlist *sglist, int nelems, | |
3affedc4 MN |
434 | unsigned long mask, enum dma_data_direction direction, |
435 | struct dma_attrs *attrs) | |
1da177e4 LT |
436 | { |
437 | dma_addr_t dma_next = 0, dma_addr; | |
1da177e4 | 438 | struct scatterlist *s, *outs, *segstart; |
6490c490 | 439 | int outcount, incount, i, build_fail = 0; |
d262c32a | 440 | unsigned int align; |
1da177e4 | 441 | unsigned long handle; |
740c3ce6 | 442 | unsigned int max_seg_size; |
1da177e4 LT |
443 | |
444 | BUG_ON(direction == DMA_NONE); | |
445 | ||
446 | if ((nelems == 0) || !tbl) | |
447 | return 0; | |
448 | ||
449 | outs = s = segstart = &sglist[0]; | |
450 | outcount = 1; | |
ac9af7cb | 451 | incount = nelems; |
1da177e4 LT |
452 | handle = 0; |
453 | ||
454 | /* Init first segment length for backout at failure */ | |
455 | outs->dma_length = 0; | |
456 | ||
5d2efba6 | 457 | DBG("sg mapping %d elements:\n", nelems); |
1da177e4 | 458 | |
740c3ce6 | 459 | max_seg_size = dma_get_max_seg_size(dev); |
78bdc310 | 460 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
461 | unsigned long vaddr, npages, entry, slen; |
462 | ||
463 | slen = s->length; | |
464 | /* Sanity check */ | |
465 | if (slen == 0) { | |
466 | dma_next = 0; | |
467 | continue; | |
468 | } | |
469 | /* Allocate iommu entries for that segment */ | |
58b053e4 | 470 | vaddr = (unsigned long) sg_virt(s); |
2994a3b2 | 471 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); |
d262c32a BH |
472 | align = 0; |
473 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && | |
474 | (vaddr & ~PAGE_MASK) == 0) | |
475 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | |
fb3475e9 | 476 | entry = iommu_range_alloc(dev, tbl, npages, &handle, |
d262c32a | 477 | mask >> IOMMU_PAGE_SHIFT, align); |
1da177e4 LT |
478 | |
479 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | |
480 | ||
481 | /* Handle failure */ | |
482 | if (unlikely(entry == DMA_ERROR_CODE)) { | |
483 | if (printk_ratelimit()) | |
4dfa9c47 AB |
484 | dev_info(dev, "iommu_alloc failed, tbl %p " |
485 | "vaddr %lx npages %lu\n", tbl, vaddr, | |
486 | npages); | |
1da177e4 LT |
487 | goto failure; |
488 | } | |
489 | ||
490 | /* Convert entry to a dma_addr_t */ | |
491 | entry += tbl->it_offset; | |
5d2efba6 LV |
492 | dma_addr = entry << IOMMU_PAGE_SHIFT; |
493 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); | |
1da177e4 | 494 | |
5d2efba6 | 495 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", |
1da177e4 LT |
496 | npages, entry, dma_addr); |
497 | ||
498 | /* Insert into HW table */ | |
6490c490 RJ |
499 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
500 | vaddr & IOMMU_PAGE_MASK, | |
501 | direction, attrs); | |
502 | if(unlikely(build_fail)) | |
503 | goto failure; | |
1da177e4 LT |
504 | |
505 | /* If we are in an open segment, try merging */ | |
506 | if (segstart != s) { | |
507 | DBG(" - trying merge...\n"); | |
508 | /* We cannot merge if: | |
509 | * - allocated dma_addr isn't contiguous to previous allocation | |
510 | */ | |
740c3ce6 FT |
511 | if (novmerge || (dma_addr != dma_next) || |
512 | (outs->dma_length + s->length > max_seg_size)) { | |
1da177e4 LT |
513 | /* Can't merge: create a new segment */ |
514 | segstart = s; | |
78bdc310 JA |
515 | outcount++; |
516 | outs = sg_next(outs); | |
1da177e4 LT |
517 | DBG(" can't merge, new segment.\n"); |
518 | } else { | |
519 | outs->dma_length += s->length; | |
5d2efba6 | 520 | DBG(" merged, new len: %ux\n", outs->dma_length); |
1da177e4 LT |
521 | } |
522 | } | |
523 | ||
524 | if (segstart == s) { | |
525 | /* This is a new segment, fill entries */ | |
526 | DBG(" - filling new segment.\n"); | |
527 | outs->dma_address = dma_addr; | |
528 | outs->dma_length = slen; | |
529 | } | |
530 | ||
531 | /* Calculate next page pointer for contiguous check */ | |
532 | dma_next = dma_addr + slen; | |
533 | ||
534 | DBG(" - dma next is: %lx\n", dma_next); | |
535 | } | |
536 | ||
537 | /* Flush/invalidate TLB caches if necessary */ | |
538 | if (ppc_md.tce_flush) | |
539 | ppc_md.tce_flush(tbl); | |
540 | ||
1da177e4 LT |
541 | DBG("mapped %d elements:\n", outcount); |
542 | ||
ac9af7cb | 543 | /* For the sake of iommu_unmap_sg, we clear out the length in the |
1da177e4 LT |
544 | * next entry of the sglist if we didn't fill the list completely |
545 | */ | |
ac9af7cb | 546 | if (outcount < incount) { |
78bdc310 | 547 | outs = sg_next(outs); |
1da177e4 LT |
548 | outs->dma_address = DMA_ERROR_CODE; |
549 | outs->dma_length = 0; | |
550 | } | |
a958a264 JM |
551 | |
552 | /* Make sure updates are seen by hardware */ | |
553 | mb(); | |
554 | ||
1da177e4 LT |
555 | return outcount; |
556 | ||
557 | failure: | |
78bdc310 | 558 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
559 | if (s->dma_length != 0) { |
560 | unsigned long vaddr, npages; | |
561 | ||
5d2efba6 | 562 | vaddr = s->dma_address & IOMMU_PAGE_MASK; |
2994a3b2 JR |
563 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
564 | IOMMU_PAGE_SIZE); | |
d3622137 | 565 | __iommu_free(tbl, vaddr, npages); |
a958a264 JM |
566 | s->dma_address = DMA_ERROR_CODE; |
567 | s->dma_length = 0; | |
1da177e4 | 568 | } |
78bdc310 JA |
569 | if (s == outs) |
570 | break; | |
1da177e4 | 571 | } |
1da177e4 LT |
572 | return 0; |
573 | } | |
574 | ||
575 | ||
576 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |
3affedc4 MN |
577 | int nelems, enum dma_data_direction direction, |
578 | struct dma_attrs *attrs) | |
1da177e4 | 579 | { |
78bdc310 | 580 | struct scatterlist *sg; |
1da177e4 LT |
581 | |
582 | BUG_ON(direction == DMA_NONE); | |
583 | ||
584 | if (!tbl) | |
585 | return; | |
586 | ||
78bdc310 | 587 | sg = sglist; |
1da177e4 LT |
588 | while (nelems--) { |
589 | unsigned int npages; | |
78bdc310 | 590 | dma_addr_t dma_handle = sg->dma_address; |
1da177e4 | 591 | |
78bdc310 | 592 | if (sg->dma_length == 0) |
1da177e4 | 593 | break; |
2994a3b2 JR |
594 | npages = iommu_num_pages(dma_handle, sg->dma_length, |
595 | IOMMU_PAGE_SIZE); | |
d3622137 | 596 | __iommu_free(tbl, dma_handle, npages); |
78bdc310 | 597 | sg = sg_next(sg); |
1da177e4 LT |
598 | } |
599 | ||
600 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | |
601 | * do not do an mb() here, the affected platforms do not need it | |
602 | * when freeing. | |
603 | */ | |
604 | if (ppc_md.tce_flush) | |
605 | ppc_md.tce_flush(tbl); | |
1da177e4 LT |
606 | } |
607 | ||
54622f10 MK |
608 | static void iommu_table_clear(struct iommu_table *tbl) |
609 | { | |
3ccc00a7 MS |
610 | /* |
611 | * In case of firmware assisted dump system goes through clean | |
612 | * reboot process at the time of system crash. Hence it's safe to | |
613 | * clear the TCE entries if firmware assisted dump is active. | |
614 | */ | |
615 | if (!is_kdump_kernel() || is_fadump_active()) { | |
54622f10 MK |
616 | /* Clear the table in case firmware left allocations in it */ |
617 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | |
618 | return; | |
619 | } | |
620 | ||
621 | #ifdef CONFIG_CRASH_DUMP | |
622 | if (ppc_md.tce_get) { | |
623 | unsigned long index, tceval, tcecount = 0; | |
624 | ||
625 | /* Reserve the existing mappings left by the first kernel. */ | |
626 | for (index = 0; index < tbl->it_size; index++) { | |
627 | tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); | |
628 | /* | |
629 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | |
630 | */ | |
631 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | |
632 | __set_bit(index, tbl->it_map); | |
633 | tcecount++; | |
634 | } | |
635 | } | |
636 | ||
637 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | |
638 | printk(KERN_WARNING "TCE table is full; freeing "); | |
639 | printk(KERN_WARNING "%d entries for the kdump boot\n", | |
640 | KDUMP_MIN_TCE_ENTRIES); | |
641 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | |
642 | index < tbl->it_size; index++) | |
643 | __clear_bit(index, tbl->it_map); | |
644 | } | |
645 | } | |
646 | #endif | |
647 | } | |
648 | ||
1da177e4 LT |
649 | /* |
650 | * Build a iommu_table structure. This contains a bit map which | |
651 | * is used to manage allocation of the tce space. | |
652 | */ | |
ca1588e7 | 653 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
1da177e4 LT |
654 | { |
655 | unsigned long sz; | |
656 | static int welcomed = 0; | |
ca1588e7 | 657 | struct page *page; |
b4c3a872 AB |
658 | unsigned int i; |
659 | struct iommu_pool *p; | |
1da177e4 LT |
660 | |
661 | /* number of bytes needed for the bitmap */ | |
c5a0809a | 662 | sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); |
1da177e4 | 663 | |
1cf389df | 664 | page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); |
ca1588e7 | 665 | if (!page) |
1da177e4 | 666 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); |
ca1588e7 | 667 | tbl->it_map = page_address(page); |
1da177e4 LT |
668 | memset(tbl->it_map, 0, sz); |
669 | ||
d12b524f TLSC |
670 | /* |
671 | * Reserve page 0 so it will not be used for any mappings. | |
672 | * This avoids buggy drivers that consider page 0 to be invalid | |
673 | * to crash the machine or even lose data. | |
674 | */ | |
675 | if (tbl->it_offset == 0) | |
676 | set_bit(0, tbl->it_map); | |
677 | ||
b4c3a872 AB |
678 | /* We only split the IOMMU table if we have 1GB or more of space */ |
679 | if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024)) | |
680 | tbl->nr_pools = IOMMU_NR_POOLS; | |
681 | else | |
682 | tbl->nr_pools = 1; | |
683 | ||
684 | /* We reserve the top 1/4 of the table for large allocations */ | |
dcd261ba | 685 | tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; |
b4c3a872 | 686 | |
dcd261ba | 687 | for (i = 0; i < tbl->nr_pools; i++) { |
b4c3a872 AB |
688 | p = &tbl->pools[i]; |
689 | spin_lock_init(&(p->lock)); | |
690 | p->start = tbl->poolsize * i; | |
691 | p->hint = p->start; | |
692 | p->end = p->start + tbl->poolsize; | |
693 | } | |
694 | ||
695 | p = &tbl->large_pool; | |
696 | spin_lock_init(&(p->lock)); | |
697 | p->start = tbl->poolsize * i; | |
698 | p->hint = p->start; | |
699 | p->end = tbl->it_size; | |
1da177e4 | 700 | |
54622f10 | 701 | iommu_table_clear(tbl); |
d3588ba9 | 702 | |
1da177e4 LT |
703 | if (!welcomed) { |
704 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | |
705 | novmerge ? "disabled" : "enabled"); | |
706 | welcomed = 1; | |
707 | } | |
708 | ||
709 | return tbl; | |
710 | } | |
711 | ||
68d315f5 | 712 | void iommu_free_table(struct iommu_table *tbl, const char *node_name) |
1da177e4 | 713 | { |
c5a0809a | 714 | unsigned long bitmap_sz; |
1da177e4 LT |
715 | unsigned int order; |
716 | ||
717 | if (!tbl || !tbl->it_map) { | |
e48b1b45 | 718 | printk(KERN_ERR "%s: expected TCE map for %s\n", __func__, |
68d315f5 | 719 | node_name); |
1da177e4 LT |
720 | return; |
721 | } | |
722 | ||
7f966d39 TLSC |
723 | /* |
724 | * In case we have reserved the first bit, we should not emit | |
725 | * the warning below. | |
726 | */ | |
727 | if (tbl->it_offset == 0) | |
728 | clear_bit(0, tbl->it_map); | |
729 | ||
4e13c1ac AK |
730 | #ifdef CONFIG_IOMMU_API |
731 | if (tbl->it_group) { | |
732 | iommu_group_put(tbl->it_group); | |
733 | BUG_ON(tbl->it_group); | |
734 | } | |
735 | #endif | |
736 | ||
1da177e4 | 737 | /* verify that table contains no entries */ |
c5a0809a AM |
738 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) |
739 | pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); | |
1da177e4 LT |
740 | |
741 | /* calculate bitmap size in bytes */ | |
c5a0809a | 742 | bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); |
1da177e4 LT |
743 | |
744 | /* free bitmap */ | |
745 | order = get_order(bitmap_sz); | |
746 | free_pages((unsigned long) tbl->it_map, order); | |
747 | ||
748 | /* free table */ | |
749 | kfree(tbl); | |
750 | } | |
751 | ||
752 | /* Creates TCEs for a user provided buffer. The user buffer must be | |
f9226d57 MN |
753 | * contiguous real kernel storage (not vmalloc). The address passed here |
754 | * comprises a page address and offset into that page. The dma_addr_t | |
755 | * returned will point to the same byte within the page as was passed in. | |
1da177e4 | 756 | */ |
f9226d57 MN |
757 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
758 | struct page *page, unsigned long offset, size_t size, | |
759 | unsigned long mask, enum dma_data_direction direction, | |
760 | struct dma_attrs *attrs) | |
1da177e4 LT |
761 | { |
762 | dma_addr_t dma_handle = DMA_ERROR_CODE; | |
f9226d57 | 763 | void *vaddr; |
1da177e4 | 764 | unsigned long uaddr; |
d262c32a | 765 | unsigned int npages, align; |
1da177e4 LT |
766 | |
767 | BUG_ON(direction == DMA_NONE); | |
768 | ||
f9226d57 | 769 | vaddr = page_address(page) + offset; |
1da177e4 | 770 | uaddr = (unsigned long)vaddr; |
2994a3b2 | 771 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); |
1da177e4 LT |
772 | |
773 | if (tbl) { | |
d262c32a BH |
774 | align = 0; |
775 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && | |
776 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) | |
777 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | |
778 | ||
fb3475e9 | 779 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
4f3dd8a0 MN |
780 | mask >> IOMMU_PAGE_SHIFT, align, |
781 | attrs); | |
1da177e4 LT |
782 | if (dma_handle == DMA_ERROR_CODE) { |
783 | if (printk_ratelimit()) { | |
4dfa9c47 AB |
784 | dev_info(dev, "iommu_alloc failed, tbl %p " |
785 | "vaddr %p npages %d\n", tbl, vaddr, | |
786 | npages); | |
1da177e4 LT |
787 | } |
788 | } else | |
5d2efba6 | 789 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); |
1da177e4 LT |
790 | } |
791 | ||
792 | return dma_handle; | |
793 | } | |
794 | ||
f9226d57 MN |
795 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
796 | size_t size, enum dma_data_direction direction, | |
797 | struct dma_attrs *attrs) | |
1da177e4 | 798 | { |
5d2efba6 LV |
799 | unsigned int npages; |
800 | ||
1da177e4 LT |
801 | BUG_ON(direction == DMA_NONE); |
802 | ||
5d2efba6 | 803 | if (tbl) { |
2994a3b2 | 804 | npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); |
5d2efba6 LV |
805 | iommu_free(tbl, dma_handle, npages); |
806 | } | |
1da177e4 LT |
807 | } |
808 | ||
809 | /* Allocates a contiguous real buffer and creates mappings over it. | |
810 | * Returns the virtual address of the buffer and sets dma_handle | |
811 | * to the dma address (mapping) of the first page. | |
812 | */ | |
fb3475e9 FT |
813 | void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
814 | size_t size, dma_addr_t *dma_handle, | |
815 | unsigned long mask, gfp_t flag, int node) | |
1da177e4 LT |
816 | { |
817 | void *ret = NULL; | |
818 | dma_addr_t mapping; | |
5d2efba6 LV |
819 | unsigned int order; |
820 | unsigned int nio_pages, io_order; | |
8eb6c6e3 | 821 | struct page *page; |
1da177e4 LT |
822 | |
823 | size = PAGE_ALIGN(size); | |
1da177e4 LT |
824 | order = get_order(size); |
825 | ||
826 | /* | |
827 | * Client asked for way too much space. This is checked later | |
828 | * anyway. It is easier to debug here for the drivers than in | |
829 | * the tce tables. | |
830 | */ | |
831 | if (order >= IOMAP_MAX_ORDER) { | |
4dfa9c47 AB |
832 | dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", |
833 | size); | |
1da177e4 LT |
834 | return NULL; |
835 | } | |
836 | ||
837 | if (!tbl) | |
838 | return NULL; | |
839 | ||
840 | /* Alloc enough pages (and possibly more) */ | |
05061354 | 841 | page = alloc_pages_node(node, flag, order); |
8eb6c6e3 | 842 | if (!page) |
1da177e4 | 843 | return NULL; |
8eb6c6e3 | 844 | ret = page_address(page); |
1da177e4 LT |
845 | memset(ret, 0, size); |
846 | ||
847 | /* Set up tces to cover the allocated range */ | |
5d2efba6 LV |
848 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
849 | io_order = get_iommu_order(size); | |
fb3475e9 | 850 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
4f3dd8a0 | 851 | mask >> IOMMU_PAGE_SHIFT, io_order, NULL); |
1da177e4 LT |
852 | if (mapping == DMA_ERROR_CODE) { |
853 | free_pages((unsigned long)ret, order); | |
8eb6c6e3 CH |
854 | return NULL; |
855 | } | |
856 | *dma_handle = mapping; | |
1da177e4 LT |
857 | return ret; |
858 | } | |
859 | ||
860 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |
861 | void *vaddr, dma_addr_t dma_handle) | |
862 | { | |
1da177e4 | 863 | if (tbl) { |
5d2efba6 LV |
864 | unsigned int nio_pages; |
865 | ||
866 | size = PAGE_ALIGN(size); | |
867 | nio_pages = size >> IOMMU_PAGE_SHIFT; | |
868 | iommu_free(tbl, dma_handle, nio_pages); | |
1da177e4 | 869 | size = PAGE_ALIGN(size); |
1da177e4 LT |
870 | free_pages((unsigned long)vaddr, get_order(size)); |
871 | } | |
872 | } | |
4e13c1ac AK |
873 | |
874 | #ifdef CONFIG_IOMMU_API | |
875 | /* | |
876 | * SPAPR TCE API | |
877 | */ | |
878 | static void group_release(void *iommu_data) | |
879 | { | |
880 | struct iommu_table *tbl = iommu_data; | |
881 | tbl->it_group = NULL; | |
882 | } | |
883 | ||
884 | void iommu_register_group(struct iommu_table *tbl, | |
885 | int pci_domain_number, unsigned long pe_num) | |
886 | { | |
887 | struct iommu_group *grp; | |
888 | char *name; | |
889 | ||
890 | grp = iommu_group_alloc(); | |
891 | if (IS_ERR(grp)) { | |
892 | pr_warn("powerpc iommu api: cannot create new group, err=%ld\n", | |
893 | PTR_ERR(grp)); | |
894 | return; | |
895 | } | |
896 | tbl->it_group = grp; | |
897 | iommu_group_set_iommudata(grp, tbl, group_release); | |
898 | name = kasprintf(GFP_KERNEL, "domain%d-pe%lx", | |
899 | pci_domain_number, pe_num); | |
900 | if (!name) | |
901 | return; | |
902 | iommu_group_set_name(grp, name); | |
903 | kfree(name); | |
904 | } | |
905 | ||
906 | enum dma_data_direction iommu_tce_direction(unsigned long tce) | |
907 | { | |
908 | if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE)) | |
909 | return DMA_BIDIRECTIONAL; | |
910 | else if (tce & TCE_PCI_READ) | |
911 | return DMA_TO_DEVICE; | |
912 | else if (tce & TCE_PCI_WRITE) | |
913 | return DMA_FROM_DEVICE; | |
914 | else | |
915 | return DMA_NONE; | |
916 | } | |
917 | EXPORT_SYMBOL_GPL(iommu_tce_direction); | |
918 | ||
919 | void iommu_flush_tce(struct iommu_table *tbl) | |
920 | { | |
921 | /* Flush/invalidate TLB caches if necessary */ | |
922 | if (ppc_md.tce_flush) | |
923 | ppc_md.tce_flush(tbl); | |
924 | ||
925 | /* Make sure updates are seen by hardware */ | |
926 | mb(); | |
927 | } | |
928 | EXPORT_SYMBOL_GPL(iommu_flush_tce); | |
929 | ||
930 | int iommu_tce_clear_param_check(struct iommu_table *tbl, | |
931 | unsigned long ioba, unsigned long tce_value, | |
932 | unsigned long npages) | |
933 | { | |
934 | /* ppc_md.tce_free() does not support any value but 0 */ | |
935 | if (tce_value) | |
936 | return -EINVAL; | |
937 | ||
938 | if (ioba & ~IOMMU_PAGE_MASK) | |
939 | return -EINVAL; | |
940 | ||
941 | ioba >>= IOMMU_PAGE_SHIFT; | |
942 | if (ioba < tbl->it_offset) | |
943 | return -EINVAL; | |
944 | ||
945 | if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) | |
946 | return -EINVAL; | |
947 | ||
948 | return 0; | |
949 | } | |
950 | EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); | |
951 | ||
952 | int iommu_tce_put_param_check(struct iommu_table *tbl, | |
953 | unsigned long ioba, unsigned long tce) | |
954 | { | |
955 | if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) | |
956 | return -EINVAL; | |
957 | ||
958 | if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) | |
959 | return -EINVAL; | |
960 | ||
961 | if (ioba & ~IOMMU_PAGE_MASK) | |
962 | return -EINVAL; | |
963 | ||
964 | ioba >>= IOMMU_PAGE_SHIFT; | |
965 | if (ioba < tbl->it_offset) | |
966 | return -EINVAL; | |
967 | ||
968 | if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) | |
969 | return -EINVAL; | |
970 | ||
971 | return 0; | |
972 | } | |
973 | EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); | |
974 | ||
975 | unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry) | |
976 | { | |
977 | unsigned long oldtce; | |
978 | struct iommu_pool *pool = get_pool(tbl, entry); | |
979 | ||
980 | spin_lock(&(pool->lock)); | |
981 | ||
982 | oldtce = ppc_md.tce_get(tbl, entry); | |
983 | if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)) | |
984 | ppc_md.tce_free(tbl, entry, 1); | |
985 | else | |
986 | oldtce = 0; | |
987 | ||
988 | spin_unlock(&(pool->lock)); | |
989 | ||
990 | return oldtce; | |
991 | } | |
992 | EXPORT_SYMBOL_GPL(iommu_clear_tce); | |
993 | ||
994 | int iommu_clear_tces_and_put_pages(struct iommu_table *tbl, | |
995 | unsigned long entry, unsigned long pages) | |
996 | { | |
997 | unsigned long oldtce; | |
998 | struct page *page; | |
999 | ||
1000 | for ( ; pages; --pages, ++entry) { | |
1001 | oldtce = iommu_clear_tce(tbl, entry); | |
1002 | if (!oldtce) | |
1003 | continue; | |
1004 | ||
1005 | page = pfn_to_page(oldtce >> PAGE_SHIFT); | |
1006 | WARN_ON(!page); | |
1007 | if (page) { | |
1008 | if (oldtce & TCE_PCI_WRITE) | |
1009 | SetPageDirty(page); | |
1010 | put_page(page); | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | return 0; | |
1015 | } | |
1016 | EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages); | |
1017 | ||
1018 | /* | |
1019 | * hwaddr is a kernel virtual address here (0xc... bazillion), | |
1020 | * tce_build converts it to a physical address. | |
1021 | */ | |
1022 | int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, | |
1023 | unsigned long hwaddr, enum dma_data_direction direction) | |
1024 | { | |
1025 | int ret = -EBUSY; | |
1026 | unsigned long oldtce; | |
1027 | struct iommu_pool *pool = get_pool(tbl, entry); | |
1028 | ||
1029 | spin_lock(&(pool->lock)); | |
1030 | ||
1031 | oldtce = ppc_md.tce_get(tbl, entry); | |
1032 | /* Add new entry if it is not busy */ | |
1033 | if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))) | |
1034 | ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL); | |
1035 | ||
1036 | spin_unlock(&(pool->lock)); | |
1037 | ||
1038 | /* if (unlikely(ret)) | |
1039 | pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", | |
1040 | __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, | |
1041 | hwaddr, ret); */ | |
1042 | ||
1043 | return ret; | |
1044 | } | |
1045 | EXPORT_SYMBOL_GPL(iommu_tce_build); | |
1046 | ||
1047 | int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, | |
1048 | unsigned long tce) | |
1049 | { | |
1050 | int ret; | |
1051 | struct page *page = NULL; | |
1052 | unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; | |
1053 | enum dma_data_direction direction = iommu_tce_direction(tce); | |
1054 | ||
1055 | ret = get_user_pages_fast(tce & PAGE_MASK, 1, | |
1056 | direction != DMA_TO_DEVICE, &page); | |
1057 | if (unlikely(ret != 1)) { | |
1058 | /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", | |
1059 | tce, entry << IOMMU_PAGE_SHIFT, ret); */ | |
1060 | return -EFAULT; | |
1061 | } | |
1062 | hwaddr = (unsigned long) page_address(page) + offset; | |
1063 | ||
1064 | ret = iommu_tce_build(tbl, entry, hwaddr, direction); | |
1065 | if (ret) | |
1066 | put_page(page); | |
1067 | ||
1068 | if (ret < 0) | |
1069 | pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", | |
1070 | __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); | |
1071 | ||
1072 | return ret; | |
1073 | } | |
1074 | EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode); | |
1075 | ||
1076 | int iommu_take_ownership(struct iommu_table *tbl) | |
1077 | { | |
1078 | unsigned long sz = (tbl->it_size + 7) >> 3; | |
1079 | ||
1080 | if (tbl->it_offset == 0) | |
1081 | clear_bit(0, tbl->it_map); | |
1082 | ||
1083 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) { | |
1084 | pr_err("iommu_tce: it_map is not empty"); | |
1085 | return -EBUSY; | |
1086 | } | |
1087 | ||
1088 | memset(tbl->it_map, 0xff, sz); | |
1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | |
1094 | ||
1095 | void iommu_release_ownership(struct iommu_table *tbl) | |
1096 | { | |
1097 | unsigned long sz = (tbl->it_size + 7) >> 3; | |
1098 | ||
1099 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | |
1100 | memset(tbl->it_map, 0, sz); | |
1101 | ||
1102 | /* Restore bit#0 set by iommu_init_table() */ | |
1103 | if (tbl->it_offset == 0) | |
1104 | set_bit(0, tbl->it_map); | |
1105 | } | |
1106 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | |
1107 | ||
1108 | static int iommu_add_device(struct device *dev) | |
1109 | { | |
1110 | struct iommu_table *tbl; | |
1111 | int ret = 0; | |
1112 | ||
1113 | if (WARN_ON(dev->iommu_group)) { | |
1114 | pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n", | |
1115 | dev_name(dev), | |
1116 | iommu_group_id(dev->iommu_group)); | |
1117 | return -EBUSY; | |
1118 | } | |
1119 | ||
1120 | tbl = get_iommu_table_base(dev); | |
1121 | if (!tbl || !tbl->it_group) { | |
1122 | pr_debug("iommu_tce: skipping device %s with no tbl\n", | |
1123 | dev_name(dev)); | |
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | pr_debug("iommu_tce: adding %s to iommu group %d\n", | |
1128 | dev_name(dev), iommu_group_id(tbl->it_group)); | |
1129 | ||
1130 | ret = iommu_group_add_device(tbl->it_group, dev); | |
1131 | if (ret < 0) | |
1132 | pr_err("iommu_tce: %s has not been added, ret=%d\n", | |
1133 | dev_name(dev), ret); | |
1134 | ||
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | static void iommu_del_device(struct device *dev) | |
1139 | { | |
1140 | iommu_group_remove_device(dev); | |
1141 | } | |
1142 | ||
1143 | static int iommu_bus_notifier(struct notifier_block *nb, | |
1144 | unsigned long action, void *data) | |
1145 | { | |
1146 | struct device *dev = data; | |
1147 | ||
1148 | switch (action) { | |
1149 | case BUS_NOTIFY_ADD_DEVICE: | |
1150 | return iommu_add_device(dev); | |
1151 | case BUS_NOTIFY_DEL_DEVICE: | |
1152 | iommu_del_device(dev); | |
1153 | return 0; | |
1154 | default: | |
1155 | return 0; | |
1156 | } | |
1157 | } | |
1158 | ||
1159 | static struct notifier_block tce_iommu_bus_nb = { | |
1160 | .notifier_call = iommu_bus_notifier, | |
1161 | }; | |
1162 | ||
1163 | static int __init tce_iommu_init(void) | |
1164 | { | |
1165 | struct pci_dev *pdev = NULL; | |
1166 | ||
1167 | BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE); | |
1168 | ||
1169 | for_each_pci_dev(pdev) | |
1170 | iommu_add_device(&pdev->dev); | |
1171 | ||
1172 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | |
1173 | return 0; | |
1174 | } | |
1175 | ||
1176 | subsys_initcall_sync(tce_iommu_init); | |
1177 | ||
1178 | #else | |
1179 | ||
1180 | void iommu_register_group(struct iommu_table *tbl, | |
1181 | int pci_domain_number, unsigned long pe_num) | |
1182 | { | |
1183 | } | |
1184 | ||
1185 | #endif /* CONFIG_IOMMU_API */ |