]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | |
3 | * | |
4 | * Rewrite, cleanup: | |
5 | * | |
6 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation | |
7 | * Copyright (C) 2006 Olof Johansson <olof@lixom.net> | |
8 | * | |
9 | * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. | |
10 | * | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | * GNU General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License | |
23 | * along with this program; if not, write to the Free Software | |
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
25 | */ | |
26 | ||
27 | #include <linux/init.h> | |
28 | #include <linux/types.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/mm.h> | |
31 | #include <linux/memblock.h> | |
32 | #include <linux/spinlock.h> | |
33 | #include <linux/string.h> | |
34 | #include <linux/pci.h> | |
35 | #include <linux/dma-mapping.h> | |
36 | #include <linux/crash_dump.h> | |
37 | #include <linux/memory.h> | |
38 | #include <linux/of.h> | |
39 | #include <linux/iommu.h> | |
40 | #include <asm/io.h> | |
41 | #include <asm/prom.h> | |
42 | #include <asm/rtas.h> | |
43 | #include <asm/iommu.h> | |
44 | #include <asm/pci-bridge.h> | |
45 | #include <asm/machdep.h> | |
46 | #include <asm/firmware.h> | |
47 | #include <asm/tce.h> | |
48 | #include <asm/ppc-pci.h> | |
49 | #include <asm/udbg.h> | |
50 | #include <asm/mmzone.h> | |
51 | #include <asm/plpar_wrappers.h> | |
52 | ||
53 | #include "pseries.h" | |
54 | ||
55 | static void iommu_pseries_free_table(struct iommu_table *tbl, | |
56 | const char *node_name) | |
57 | { | |
58 | #ifdef CONFIG_IOMMU_API | |
59 | if (tbl->it_group) { | |
60 | iommu_group_put(tbl->it_group); | |
61 | BUG_ON(tbl->it_group); | |
62 | } | |
63 | #endif | |
64 | iommu_free_table(tbl, node_name); | |
65 | } | |
66 | ||
67 | static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, | |
68 | __be64 *startp, __be64 *endp) | |
69 | { | |
70 | u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; | |
71 | unsigned long start, end, inc; | |
72 | ||
73 | start = __pa(startp); | |
74 | end = __pa(endp); | |
75 | inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */ | |
76 | ||
77 | /* If this is non-zero, change the format. We shift the | |
78 | * address and or in the magic from the device tree. */ | |
79 | if (tbl->it_busno) { | |
80 | start <<= 12; | |
81 | end <<= 12; | |
82 | inc <<= 12; | |
83 | start |= tbl->it_busno; | |
84 | end |= tbl->it_busno; | |
85 | } | |
86 | ||
87 | end |= inc - 1; /* round up end to be different than start */ | |
88 | ||
89 | mb(); /* Make sure TCEs in memory are written */ | |
90 | while (start <= end) { | |
91 | out_be64(invalidate, start); | |
92 | start += inc; | |
93 | } | |
94 | } | |
95 | ||
96 | static int tce_build_pSeries(struct iommu_table *tbl, long index, | |
97 | long npages, unsigned long uaddr, | |
98 | enum dma_data_direction direction, | |
99 | struct dma_attrs *attrs) | |
100 | { | |
101 | u64 proto_tce; | |
102 | __be64 *tcep, *tces; | |
103 | u64 rpn; | |
104 | ||
105 | proto_tce = TCE_PCI_READ; // Read allowed | |
106 | ||
107 | if (direction != DMA_TO_DEVICE) | |
108 | proto_tce |= TCE_PCI_WRITE; | |
109 | ||
110 | tces = tcep = ((__be64 *)tbl->it_base) + index; | |
111 | ||
112 | while (npages--) { | |
113 | /* can't move this out since we might cross MEMBLOCK boundary */ | |
114 | rpn = __pa(uaddr) >> TCE_SHIFT; | |
115 | *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); | |
116 | ||
117 | uaddr += TCE_PAGE_SIZE; | |
118 | tcep++; | |
119 | } | |
120 | ||
121 | if (tbl->it_type & TCE_PCI_SWINV_CREATE) | |
122 | tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); | |
123 | return 0; | |
124 | } | |
125 | ||
126 | ||
127 | static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) | |
128 | { | |
129 | __be64 *tcep, *tces; | |
130 | ||
131 | tces = tcep = ((__be64 *)tbl->it_base) + index; | |
132 | ||
133 | while (npages--) | |
134 | *(tcep++) = 0; | |
135 | ||
136 | if (tbl->it_type & TCE_PCI_SWINV_FREE) | |
137 | tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); | |
138 | } | |
139 | ||
140 | static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) | |
141 | { | |
142 | __be64 *tcep; | |
143 | ||
144 | tcep = ((__be64 *)tbl->it_base) + index; | |
145 | ||
146 | return be64_to_cpu(*tcep); | |
147 | } | |
148 | ||
149 | static void tce_free_pSeriesLP(struct iommu_table*, long, long); | |
150 | static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); | |
151 | ||
152 | static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | |
153 | long npages, unsigned long uaddr, | |
154 | enum dma_data_direction direction, | |
155 | struct dma_attrs *attrs) | |
156 | { | |
157 | u64 rc = 0; | |
158 | u64 proto_tce, tce; | |
159 | u64 rpn; | |
160 | int ret = 0; | |
161 | long tcenum_start = tcenum, npages_start = npages; | |
162 | ||
163 | rpn = __pa(uaddr) >> TCE_SHIFT; | |
164 | proto_tce = TCE_PCI_READ; | |
165 | if (direction != DMA_TO_DEVICE) | |
166 | proto_tce |= TCE_PCI_WRITE; | |
167 | ||
168 | while (npages--) { | |
169 | tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; | |
170 | rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); | |
171 | ||
172 | if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { | |
173 | ret = (int)rc; | |
174 | tce_free_pSeriesLP(tbl, tcenum_start, | |
175 | (npages_start - (npages + 1))); | |
176 | break; | |
177 | } | |
178 | ||
179 | if (rc && printk_ratelimit()) { | |
180 | printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); | |
181 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | |
182 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); | |
183 | printk("\ttce val = 0x%llx\n", tce ); | |
184 | dump_stack(); | |
185 | } | |
186 | ||
187 | tcenum++; | |
188 | rpn++; | |
189 | } | |
190 | return ret; | |
191 | } | |
192 | ||
193 | static DEFINE_PER_CPU(__be64 *, tce_page); | |
194 | ||
195 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |
196 | long npages, unsigned long uaddr, | |
197 | enum dma_data_direction direction, | |
198 | struct dma_attrs *attrs) | |
199 | { | |
200 | u64 rc = 0; | |
201 | u64 proto_tce; | |
202 | __be64 *tcep; | |
203 | u64 rpn; | |
204 | long l, limit; | |
205 | long tcenum_start = tcenum, npages_start = npages; | |
206 | int ret = 0; | |
207 | unsigned long flags; | |
208 | ||
209 | if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) { | |
210 | return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, | |
211 | direction, attrs); | |
212 | } | |
213 | ||
214 | local_irq_save(flags); /* to protect tcep and the page behind it */ | |
215 | ||
216 | tcep = __this_cpu_read(tce_page); | |
217 | ||
218 | /* This is safe to do since interrupts are off when we're called | |
219 | * from iommu_alloc{,_sg}() | |
220 | */ | |
221 | if (!tcep) { | |
222 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); | |
223 | /* If allocation fails, fall back to the loop implementation */ | |
224 | if (!tcep) { | |
225 | local_irq_restore(flags); | |
226 | return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, | |
227 | direction, attrs); | |
228 | } | |
229 | __this_cpu_write(tce_page, tcep); | |
230 | } | |
231 | ||
232 | rpn = __pa(uaddr) >> TCE_SHIFT; | |
233 | proto_tce = TCE_PCI_READ; | |
234 | if (direction != DMA_TO_DEVICE) | |
235 | proto_tce |= TCE_PCI_WRITE; | |
236 | ||
237 | /* We can map max one pageful of TCEs at a time */ | |
238 | do { | |
239 | /* | |
240 | * Set up the page with TCE data, looping through and setting | |
241 | * the values. | |
242 | */ | |
243 | limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); | |
244 | ||
245 | for (l = 0; l < limit; l++) { | |
246 | tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); | |
247 | rpn++; | |
248 | } | |
249 | ||
250 | rc = plpar_tce_put_indirect((u64)tbl->it_index, | |
251 | (u64)tcenum << 12, | |
252 | (u64)__pa(tcep), | |
253 | limit); | |
254 | ||
255 | npages -= limit; | |
256 | tcenum += limit; | |
257 | } while (npages > 0 && !rc); | |
258 | ||
259 | local_irq_restore(flags); | |
260 | ||
261 | if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { | |
262 | ret = (int)rc; | |
263 | tce_freemulti_pSeriesLP(tbl, tcenum_start, | |
264 | (npages_start - (npages + limit))); | |
265 | return ret; | |
266 | } | |
267 | ||
268 | if (rc && printk_ratelimit()) { | |
269 | printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); | |
270 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | |
271 | printk("\tnpages = 0x%llx\n", (u64)npages); | |
272 | printk("\ttce[0] val = 0x%llx\n", tcep[0]); | |
273 | dump_stack(); | |
274 | } | |
275 | return ret; | |
276 | } | |
277 | ||
278 | static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) | |
279 | { | |
280 | u64 rc; | |
281 | ||
282 | while (npages--) { | |
283 | rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); | |
284 | ||
285 | if (rc && printk_ratelimit()) { | |
286 | printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); | |
287 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | |
288 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); | |
289 | dump_stack(); | |
290 | } | |
291 | ||
292 | tcenum++; | |
293 | } | |
294 | } | |
295 | ||
296 | ||
297 | static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) | |
298 | { | |
299 | u64 rc; | |
300 | ||
301 | if (!firmware_has_feature(FW_FEATURE_MULTITCE)) | |
302 | return tce_free_pSeriesLP(tbl, tcenum, npages); | |
303 | ||
304 | rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); | |
305 | ||
306 | if (rc && printk_ratelimit()) { | |
307 | printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); | |
308 | printk("\trc = %lld\n", rc); | |
309 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | |
310 | printk("\tnpages = 0x%llx\n", (u64)npages); | |
311 | dump_stack(); | |
312 | } | |
313 | } | |
314 | ||
315 | static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) | |
316 | { | |
317 | u64 rc; | |
318 | unsigned long tce_ret; | |
319 | ||
320 | rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); | |
321 | ||
322 | if (rc && printk_ratelimit()) { | |
323 | printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); | |
324 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | |
325 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); | |
326 | dump_stack(); | |
327 | } | |
328 | ||
329 | return tce_ret; | |
330 | } | |
331 | ||
332 | /* this is compatible with cells for the device tree property */ | |
333 | struct dynamic_dma_window_prop { | |
334 | __be32 liobn; /* tce table number */ | |
335 | __be64 dma_base; /* address hi,lo */ | |
336 | __be32 tce_shift; /* ilog2(tce_page_size) */ | |
337 | __be32 window_shift; /* ilog2(tce_window_size) */ | |
338 | }; | |
339 | ||
340 | struct direct_window { | |
341 | struct device_node *device; | |
342 | const struct dynamic_dma_window_prop *prop; | |
343 | struct list_head list; | |
344 | }; | |
345 | ||
346 | /* Dynamic DMA Window support */ | |
347 | struct ddw_query_response { | |
348 | u32 windows_available; | |
349 | u32 largest_available_block; | |
350 | u32 page_size; | |
351 | u32 migration_capable; | |
352 | }; | |
353 | ||
354 | struct ddw_create_response { | |
355 | u32 liobn; | |
356 | u32 addr_hi; | |
357 | u32 addr_lo; | |
358 | }; | |
359 | ||
360 | static LIST_HEAD(direct_window_list); | |
361 | /* prevents races between memory on/offline and window creation */ | |
362 | static DEFINE_SPINLOCK(direct_window_list_lock); | |
363 | /* protects initializing window twice for same device */ | |
364 | static DEFINE_MUTEX(direct_window_init_mutex); | |
365 | #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" | |
366 | ||
367 | static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, | |
368 | unsigned long num_pfn, const void *arg) | |
369 | { | |
370 | const struct dynamic_dma_window_prop *maprange = arg; | |
371 | int rc; | |
372 | u64 tce_size, num_tce, dma_offset, next; | |
373 | u32 tce_shift; | |
374 | long limit; | |
375 | ||
376 | tce_shift = be32_to_cpu(maprange->tce_shift); | |
377 | tce_size = 1ULL << tce_shift; | |
378 | next = start_pfn << PAGE_SHIFT; | |
379 | num_tce = num_pfn << PAGE_SHIFT; | |
380 | ||
381 | /* round back to the beginning of the tce page size */ | |
382 | num_tce += next & (tce_size - 1); | |
383 | next &= ~(tce_size - 1); | |
384 | ||
385 | /* covert to number of tces */ | |
386 | num_tce |= tce_size - 1; | |
387 | num_tce >>= tce_shift; | |
388 | ||
389 | do { | |
390 | /* | |
391 | * Set up the page with TCE data, looping through and setting | |
392 | * the values. | |
393 | */ | |
394 | limit = min_t(long, num_tce, 512); | |
395 | dma_offset = next + be64_to_cpu(maprange->dma_base); | |
396 | ||
397 | rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), | |
398 | dma_offset, | |
399 | 0, limit); | |
400 | next += limit * tce_size; | |
401 | num_tce -= limit; | |
402 | } while (num_tce > 0 && !rc); | |
403 | ||
404 | return rc; | |
405 | } | |
406 | ||
407 | static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | |
408 | unsigned long num_pfn, const void *arg) | |
409 | { | |
410 | const struct dynamic_dma_window_prop *maprange = arg; | |
411 | u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn; | |
412 | __be64 *tcep; | |
413 | u32 tce_shift; | |
414 | u64 rc = 0; | |
415 | long l, limit; | |
416 | ||
417 | local_irq_disable(); /* to protect tcep and the page behind it */ | |
418 | tcep = __this_cpu_read(tce_page); | |
419 | ||
420 | if (!tcep) { | |
421 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); | |
422 | if (!tcep) { | |
423 | local_irq_enable(); | |
424 | return -ENOMEM; | |
425 | } | |
426 | __this_cpu_write(tce_page, tcep); | |
427 | } | |
428 | ||
429 | proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; | |
430 | ||
431 | liobn = (u64)be32_to_cpu(maprange->liobn); | |
432 | tce_shift = be32_to_cpu(maprange->tce_shift); | |
433 | tce_size = 1ULL << tce_shift; | |
434 | next = start_pfn << PAGE_SHIFT; | |
435 | num_tce = num_pfn << PAGE_SHIFT; | |
436 | ||
437 | /* round back to the beginning of the tce page size */ | |
438 | num_tce += next & (tce_size - 1); | |
439 | next &= ~(tce_size - 1); | |
440 | ||
441 | /* covert to number of tces */ | |
442 | num_tce |= tce_size - 1; | |
443 | num_tce >>= tce_shift; | |
444 | ||
445 | /* We can map max one pageful of TCEs at a time */ | |
446 | do { | |
447 | /* | |
448 | * Set up the page with TCE data, looping through and setting | |
449 | * the values. | |
450 | */ | |
451 | limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); | |
452 | dma_offset = next + be64_to_cpu(maprange->dma_base); | |
453 | ||
454 | for (l = 0; l < limit; l++) { | |
455 | tcep[l] = cpu_to_be64(proto_tce | next); | |
456 | next += tce_size; | |
457 | } | |
458 | ||
459 | rc = plpar_tce_put_indirect(liobn, | |
460 | dma_offset, | |
461 | (u64)__pa(tcep), | |
462 | limit); | |
463 | ||
464 | num_tce -= limit; | |
465 | } while (num_tce > 0 && !rc); | |
466 | ||
467 | /* error cleanup: caller will clear whole range */ | |
468 | ||
469 | local_irq_enable(); | |
470 | return rc; | |
471 | } | |
472 | ||
473 | static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, | |
474 | unsigned long num_pfn, void *arg) | |
475 | { | |
476 | return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); | |
477 | } | |
478 | ||
479 | #ifdef CONFIG_PCI | |
480 | static void iommu_table_setparms(struct pci_controller *phb, | |
481 | struct device_node *dn, | |
482 | struct iommu_table *tbl) | |
483 | { | |
484 | struct device_node *node; | |
485 | const unsigned long *basep, *sw_inval; | |
486 | const u32 *sizep; | |
487 | ||
488 | node = phb->dn; | |
489 | ||
490 | basep = of_get_property(node, "linux,tce-base", NULL); | |
491 | sizep = of_get_property(node, "linux,tce-size", NULL); | |
492 | if (basep == NULL || sizep == NULL) { | |
493 | printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has " | |
494 | "missing tce entries !\n", dn->full_name); | |
495 | return; | |
496 | } | |
497 | ||
498 | tbl->it_base = (unsigned long)__va(*basep); | |
499 | ||
500 | if (!is_kdump_kernel()) | |
501 | memset((void *)tbl->it_base, 0, *sizep); | |
502 | ||
503 | tbl->it_busno = phb->bus->number; | |
504 | tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; | |
505 | ||
506 | /* Units of tce entries */ | |
507 | tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; | |
508 | ||
509 | /* Test if we are going over 2GB of DMA space */ | |
510 | if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { | |
511 | udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); | |
512 | panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); | |
513 | } | |
514 | ||
515 | phb->dma_window_base_cur += phb->dma_window_size; | |
516 | ||
517 | /* Set the tce table size - measured in entries */ | |
518 | tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; | |
519 | ||
520 | tbl->it_index = 0; | |
521 | tbl->it_blocksize = 16; | |
522 | tbl->it_type = TCE_PCI; | |
523 | ||
524 | sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL); | |
525 | if (sw_inval) { | |
526 | /* | |
527 | * This property contains information on how to | |
528 | * invalidate the TCE entry. The first property is | |
529 | * the base MMIO address used to invalidate entries. | |
530 | * The second property tells us the format of the TCE | |
531 | * invalidate (whether it needs to be shifted) and | |
532 | * some magic routing info to add to our invalidate | |
533 | * command. | |
534 | */ | |
535 | tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8); | |
536 | tbl->it_busno = sw_inval[1]; /* overload this with magic */ | |
537 | tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; | |
538 | } | |
539 | } | |
540 | ||
541 | /* | |
542 | * iommu_table_setparms_lpar | |
543 | * | |
544 | * Function: On pSeries LPAR systems, return TCE table info, given a pci bus. | |
545 | */ | |
546 | static void iommu_table_setparms_lpar(struct pci_controller *phb, | |
547 | struct device_node *dn, | |
548 | struct iommu_table *tbl, | |
549 | const __be32 *dma_window) | |
550 | { | |
551 | unsigned long offset, size; | |
552 | ||
553 | of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); | |
554 | ||
555 | tbl->it_busno = phb->bus->number; | |
556 | tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; | |
557 | tbl->it_base = 0; | |
558 | tbl->it_blocksize = 16; | |
559 | tbl->it_type = TCE_PCI; | |
560 | tbl->it_offset = offset >> tbl->it_page_shift; | |
561 | tbl->it_size = size >> tbl->it_page_shift; | |
562 | } | |
563 | ||
564 | struct iommu_table_ops iommu_table_pseries_ops = { | |
565 | .set = tce_build_pSeries, | |
566 | .clear = tce_free_pSeries, | |
567 | .get = tce_get_pseries | |
568 | }; | |
569 | ||
570 | static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) | |
571 | { | |
572 | struct device_node *dn; | |
573 | struct iommu_table *tbl; | |
574 | struct device_node *isa_dn, *isa_dn_orig; | |
575 | struct device_node *tmp; | |
576 | struct pci_dn *pci; | |
577 | int children; | |
578 | ||
579 | dn = pci_bus_to_OF_node(bus); | |
580 | ||
581 | pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name); | |
582 | ||
583 | if (bus->self) { | |
584 | /* This is not a root bus, any setup will be done for the | |
585 | * device-side of the bridge in iommu_dev_setup_pSeries(). | |
586 | */ | |
587 | return; | |
588 | } | |
589 | pci = PCI_DN(dn); | |
590 | ||
591 | /* Check if the ISA bus on the system is under | |
592 | * this PHB. | |
593 | */ | |
594 | isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa"); | |
595 | ||
596 | while (isa_dn && isa_dn != dn) | |
597 | isa_dn = isa_dn->parent; | |
598 | ||
599 | of_node_put(isa_dn_orig); | |
600 | ||
601 | /* Count number of direct PCI children of the PHB. */ | |
602 | for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling) | |
603 | children++; | |
604 | ||
605 | pr_debug("Children: %d\n", children); | |
606 | ||
607 | /* Calculate amount of DMA window per slot. Each window must be | |
608 | * a power of two (due to pci_alloc_consistent requirements). | |
609 | * | |
610 | * Keep 256MB aside for PHBs with ISA. | |
611 | */ | |
612 | ||
613 | if (!isa_dn) { | |
614 | /* No ISA/IDE - just set window size and return */ | |
615 | pci->phb->dma_window_size = 0x80000000ul; /* To be divided */ | |
616 | ||
617 | while (pci->phb->dma_window_size * children > 0x80000000ul) | |
618 | pci->phb->dma_window_size >>= 1; | |
619 | pr_debug("No ISA/IDE, window size is 0x%llx\n", | |
620 | pci->phb->dma_window_size); | |
621 | pci->phb->dma_window_base_cur = 0; | |
622 | ||
623 | return; | |
624 | } | |
625 | ||
626 | /* If we have ISA, then we probably have an IDE | |
627 | * controller too. Allocate a 128MB table but | |
628 | * skip the first 128MB to avoid stepping on ISA | |
629 | * space. | |
630 | */ | |
631 | pci->phb->dma_window_size = 0x8000000ul; | |
632 | pci->phb->dma_window_base_cur = 0x8000000ul; | |
633 | ||
634 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | |
635 | pci->phb->node); | |
636 | ||
637 | iommu_table_setparms(pci->phb, dn, tbl); | |
638 | tbl->it_ops = &iommu_table_pseries_ops; | |
639 | pci->iommu_table = iommu_init_table(tbl, pci->phb->node); | |
640 | iommu_register_group(tbl, pci_domain_nr(bus), 0); | |
641 | ||
642 | /* Divide the rest (1.75GB) among the children */ | |
643 | pci->phb->dma_window_size = 0x80000000ul; | |
644 | while (pci->phb->dma_window_size * children > 0x70000000ul) | |
645 | pci->phb->dma_window_size >>= 1; | |
646 | ||
647 | pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); | |
648 | } | |
649 | ||
650 | struct iommu_table_ops iommu_table_lpar_multi_ops = { | |
651 | .set = tce_buildmulti_pSeriesLP, | |
652 | .clear = tce_freemulti_pSeriesLP, | |
653 | .get = tce_get_pSeriesLP | |
654 | }; | |
655 | ||
656 | static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) | |
657 | { | |
658 | struct iommu_table *tbl; | |
659 | struct device_node *dn, *pdn; | |
660 | struct pci_dn *ppci; | |
661 | const __be32 *dma_window = NULL; | |
662 | ||
663 | dn = pci_bus_to_OF_node(bus); | |
664 | ||
665 | pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", | |
666 | dn->full_name); | |
667 | ||
668 | /* Find nearest ibm,dma-window, walking up the device tree */ | |
669 | for (pdn = dn; pdn != NULL; pdn = pdn->parent) { | |
670 | dma_window = of_get_property(pdn, "ibm,dma-window", NULL); | |
671 | if (dma_window != NULL) | |
672 | break; | |
673 | } | |
674 | ||
675 | if (dma_window == NULL) { | |
676 | pr_debug(" no ibm,dma-window property !\n"); | |
677 | return; | |
678 | } | |
679 | ||
680 | ppci = PCI_DN(pdn); | |
681 | ||
682 | pr_debug(" parent is %s, iommu_table: 0x%p\n", | |
683 | pdn->full_name, ppci->iommu_table); | |
684 | ||
685 | if (!ppci->iommu_table) { | |
686 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | |
687 | ppci->phb->node); | |
688 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); | |
689 | tbl->it_ops = &iommu_table_lpar_multi_ops; | |
690 | ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); | |
691 | iommu_register_group(tbl, pci_domain_nr(bus), 0); | |
692 | pr_debug(" created table: %p\n", ppci->iommu_table); | |
693 | } | |
694 | } | |
695 | ||
696 | ||
697 | static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) | |
698 | { | |
699 | struct device_node *dn; | |
700 | struct iommu_table *tbl; | |
701 | ||
702 | pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev)); | |
703 | ||
704 | dn = dev->dev.of_node; | |
705 | ||
706 | /* If we're the direct child of a root bus, then we need to allocate | |
707 | * an iommu table ourselves. The bus setup code should have setup | |
708 | * the window sizes already. | |
709 | */ | |
710 | if (!dev->bus->self) { | |
711 | struct pci_controller *phb = PCI_DN(dn)->phb; | |
712 | ||
713 | pr_debug(" --> first child, no bridge. Allocating iommu table.\n"); | |
714 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | |
715 | phb->node); | |
716 | iommu_table_setparms(phb, dn, tbl); | |
717 | tbl->it_ops = &iommu_table_pseries_ops; | |
718 | PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); | |
719 | iommu_register_group(tbl, pci_domain_nr(phb->bus), 0); | |
720 | set_iommu_table_base(&dev->dev, tbl); | |
721 | iommu_add_device(&dev->dev); | |
722 | return; | |
723 | } | |
724 | ||
725 | /* If this device is further down the bus tree, search upwards until | |
726 | * an already allocated iommu table is found and use that. | |
727 | */ | |
728 | ||
729 | while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL) | |
730 | dn = dn->parent; | |
731 | ||
732 | if (dn && PCI_DN(dn)) { | |
733 | set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table); | |
734 | iommu_add_device(&dev->dev); | |
735 | } else | |
736 | printk(KERN_WARNING "iommu: Device %s has no iommu table\n", | |
737 | pci_name(dev)); | |
738 | } | |
739 | ||
740 | static int __read_mostly disable_ddw; | |
741 | ||
742 | static int __init disable_ddw_setup(char *str) | |
743 | { | |
744 | disable_ddw = 1; | |
745 | printk(KERN_INFO "ppc iommu: disabling ddw.\n"); | |
746 | ||
747 | return 0; | |
748 | } | |
749 | ||
750 | early_param("disable_ddw", disable_ddw_setup); | |
751 | ||
752 | static void remove_ddw(struct device_node *np, bool remove_prop) | |
753 | { | |
754 | struct dynamic_dma_window_prop *dwp; | |
755 | struct property *win64; | |
756 | u32 ddw_avail[3]; | |
757 | u64 liobn; | |
758 | int ret = 0; | |
759 | ||
760 | ret = of_property_read_u32_array(np, "ibm,ddw-applicable", | |
761 | &ddw_avail[0], 3); | |
762 | ||
763 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); | |
764 | if (!win64) | |
765 | return; | |
766 | ||
767 | if (ret || win64->length < sizeof(*dwp)) | |
768 | goto delprop; | |
769 | ||
770 | dwp = win64->value; | |
771 | liobn = (u64)be32_to_cpu(dwp->liobn); | |
772 | ||
773 | /* clear the whole window, note the arg is in kernel pages */ | |
774 | ret = tce_clearrange_multi_pSeriesLP(0, | |
775 | 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp); | |
776 | if (ret) | |
777 | pr_warning("%s failed to clear tces in window.\n", | |
778 | np->full_name); | |
779 | else | |
780 | pr_debug("%s successfully cleared tces in window.\n", | |
781 | np->full_name); | |
782 | ||
783 | ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); | |
784 | if (ret) | |
785 | pr_warning("%s: failed to remove direct window: rtas returned " | |
786 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | |
787 | np->full_name, ret, ddw_avail[2], liobn); | |
788 | else | |
789 | pr_debug("%s: successfully removed direct window: rtas returned " | |
790 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | |
791 | np->full_name, ret, ddw_avail[2], liobn); | |
792 | ||
793 | delprop: | |
794 | if (remove_prop) | |
795 | ret = of_remove_property(np, win64); | |
796 | if (ret) | |
797 | pr_warning("%s: failed to remove direct window property: %d\n", | |
798 | np->full_name, ret); | |
799 | } | |
800 | ||
801 | static u64 find_existing_ddw(struct device_node *pdn) | |
802 | { | |
803 | struct direct_window *window; | |
804 | const struct dynamic_dma_window_prop *direct64; | |
805 | u64 dma_addr = 0; | |
806 | ||
807 | spin_lock(&direct_window_list_lock); | |
808 | /* check if we already created a window and dupe that config if so */ | |
809 | list_for_each_entry(window, &direct_window_list, list) { | |
810 | if (window->device == pdn) { | |
811 | direct64 = window->prop; | |
812 | dma_addr = be64_to_cpu(direct64->dma_base); | |
813 | break; | |
814 | } | |
815 | } | |
816 | spin_unlock(&direct_window_list_lock); | |
817 | ||
818 | return dma_addr; | |
819 | } | |
820 | ||
821 | static int find_existing_ddw_windows(void) | |
822 | { | |
823 | int len; | |
824 | struct device_node *pdn; | |
825 | struct direct_window *window; | |
826 | const struct dynamic_dma_window_prop *direct64; | |
827 | ||
828 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | |
829 | return 0; | |
830 | ||
831 | for_each_node_with_property(pdn, DIRECT64_PROPNAME) { | |
832 | direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); | |
833 | if (!direct64) | |
834 | continue; | |
835 | ||
836 | window = kzalloc(sizeof(*window), GFP_KERNEL); | |
837 | if (!window || len < sizeof(struct dynamic_dma_window_prop)) { | |
838 | kfree(window); | |
839 | remove_ddw(pdn, true); | |
840 | continue; | |
841 | } | |
842 | ||
843 | window->device = pdn; | |
844 | window->prop = direct64; | |
845 | spin_lock(&direct_window_list_lock); | |
846 | list_add(&window->list, &direct_window_list); | |
847 | spin_unlock(&direct_window_list_lock); | |
848 | } | |
849 | ||
850 | return 0; | |
851 | } | |
852 | machine_arch_initcall(pseries, find_existing_ddw_windows); | |
853 | ||
854 | static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |
855 | struct ddw_query_response *query) | |
856 | { | |
857 | struct eeh_dev *edev; | |
858 | u32 cfg_addr; | |
859 | u64 buid; | |
860 | int ret; | |
861 | ||
862 | /* | |
863 | * Get the config address and phb buid of the PE window. | |
864 | * Rely on eeh to retrieve this for us. | |
865 | * Retrieve them from the pci device, not the node with the | |
866 | * dma-window property | |
867 | */ | |
868 | edev = pci_dev_to_eeh_dev(dev); | |
869 | cfg_addr = edev->config_addr; | |
870 | if (edev->pe_config_addr) | |
871 | cfg_addr = edev->pe_config_addr; | |
872 | buid = edev->phb->buid; | |
873 | ||
874 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, | |
875 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); | |
876 | dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" | |
877 | " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), | |
878 | BUID_LO(buid), ret); | |
879 | return ret; | |
880 | } | |
881 | ||
882 | static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |
883 | struct ddw_create_response *create, int page_shift, | |
884 | int window_shift) | |
885 | { | |
886 | struct eeh_dev *edev; | |
887 | u32 cfg_addr; | |
888 | u64 buid; | |
889 | int ret; | |
890 | ||
891 | /* | |
892 | * Get the config address and phb buid of the PE window. | |
893 | * Rely on eeh to retrieve this for us. | |
894 | * Retrieve them from the pci device, not the node with the | |
895 | * dma-window property | |
896 | */ | |
897 | edev = pci_dev_to_eeh_dev(dev); | |
898 | cfg_addr = edev->config_addr; | |
899 | if (edev->pe_config_addr) | |
900 | cfg_addr = edev->pe_config_addr; | |
901 | buid = edev->phb->buid; | |
902 | ||
903 | do { | |
904 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | |
905 | ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, | |
906 | cfg_addr, BUID_HI(buid), BUID_LO(buid), | |
907 | page_shift, window_shift); | |
908 | } while (rtas_busy_delay(ret)); | |
909 | dev_info(&dev->dev, | |
910 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " | |
911 | "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], | |
912 | cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, | |
913 | window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); | |
914 | ||
915 | return ret; | |
916 | } | |
917 | ||
918 | struct failed_ddw_pdn { | |
919 | struct device_node *pdn; | |
920 | struct list_head list; | |
921 | }; | |
922 | ||
923 | static LIST_HEAD(failed_ddw_pdn_list); | |
924 | ||
925 | /* | |
926 | * If the PE supports dynamic dma windows, and there is space for a table | |
927 | * that can map all pages in a linear offset, then setup such a table, | |
928 | * and record the dma-offset in the struct device. | |
929 | * | |
930 | * dev: the pci device we are checking | |
931 | * pdn: the parent pe node with the ibm,dma_window property | |
932 | * Future: also check if we can remap the base window for our base page size | |
933 | * | |
934 | * returns the dma offset for use by dma_set_mask | |
935 | */ | |
936 | static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |
937 | { | |
938 | int len, ret; | |
939 | struct ddw_query_response query; | |
940 | struct ddw_create_response create; | |
941 | int page_shift; | |
942 | u64 dma_addr, max_addr; | |
943 | struct device_node *dn; | |
944 | u32 ddw_avail[3]; | |
945 | struct direct_window *window; | |
946 | struct property *win64; | |
947 | struct dynamic_dma_window_prop *ddwprop; | |
948 | struct failed_ddw_pdn *fpdn; | |
949 | ||
950 | mutex_lock(&direct_window_init_mutex); | |
951 | ||
952 | dma_addr = find_existing_ddw(pdn); | |
953 | if (dma_addr != 0) | |
954 | goto out_unlock; | |
955 | ||
956 | /* | |
957 | * If we already went through this for a previous function of | |
958 | * the same device and failed, we don't want to muck with the | |
959 | * DMA window again, as it will race with in-flight operations | |
960 | * and can lead to EEHs. The above mutex protects access to the | |
961 | * list. | |
962 | */ | |
963 | list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { | |
964 | if (!strcmp(fpdn->pdn->full_name, pdn->full_name)) | |
965 | goto out_unlock; | |
966 | } | |
967 | ||
968 | /* | |
969 | * the ibm,ddw-applicable property holds the tokens for: | |
970 | * ibm,query-pe-dma-window | |
971 | * ibm,create-pe-dma-window | |
972 | * ibm,remove-pe-dma-window | |
973 | * for the given node in that order. | |
974 | * the property is actually in the parent, not the PE | |
975 | */ | |
976 | ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", | |
977 | &ddw_avail[0], 3); | |
978 | if (ret) | |
979 | goto out_failed; | |
980 | ||
981 | /* | |
982 | * Query if there is a second window of size to map the | |
983 | * whole partition. Query returns number of windows, largest | |
984 | * block assigned to PE (partition endpoint), and two bitmasks | |
985 | * of page sizes: supported and supported for migrate-dma. | |
986 | */ | |
987 | dn = pci_device_to_OF_node(dev); | |
988 | ret = query_ddw(dev, ddw_avail, &query); | |
989 | if (ret != 0) | |
990 | goto out_failed; | |
991 | ||
992 | if (query.windows_available == 0) { | |
993 | /* | |
994 | * no additional windows are available for this device. | |
995 | * We might be able to reallocate the existing window, | |
996 | * trading in for a larger page size. | |
997 | */ | |
998 | dev_dbg(&dev->dev, "no free dynamic windows"); | |
999 | goto out_failed; | |
1000 | } | |
1001 | if (query.page_size & 4) { | |
1002 | page_shift = 24; /* 16MB */ | |
1003 | } else if (query.page_size & 2) { | |
1004 | page_shift = 16; /* 64kB */ | |
1005 | } else if (query.page_size & 1) { | |
1006 | page_shift = 12; /* 4kB */ | |
1007 | } else { | |
1008 | dev_dbg(&dev->dev, "no supported direct page size in mask %x", | |
1009 | query.page_size); | |
1010 | goto out_failed; | |
1011 | } | |
1012 | /* verify the window * number of ptes will map the partition */ | |
1013 | /* check largest block * page size > max memory hotplug addr */ | |
1014 | max_addr = memory_hotplug_max(); | |
1015 | if (query.largest_available_block < (max_addr >> page_shift)) { | |
1016 | dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " | |
1017 | "%llu-sized pages\n", max_addr, query.largest_available_block, | |
1018 | 1ULL << page_shift); | |
1019 | goto out_failed; | |
1020 | } | |
1021 | len = order_base_2(max_addr); | |
1022 | win64 = kzalloc(sizeof(struct property), GFP_KERNEL); | |
1023 | if (!win64) { | |
1024 | dev_info(&dev->dev, | |
1025 | "couldn't allocate property for 64bit dma window\n"); | |
1026 | goto out_failed; | |
1027 | } | |
1028 | win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); | |
1029 | win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); | |
1030 | win64->length = sizeof(*ddwprop); | |
1031 | if (!win64->name || !win64->value) { | |
1032 | dev_info(&dev->dev, | |
1033 | "couldn't allocate property name and value\n"); | |
1034 | goto out_free_prop; | |
1035 | } | |
1036 | ||
1037 | ret = create_ddw(dev, ddw_avail, &create, page_shift, len); | |
1038 | if (ret != 0) | |
1039 | goto out_free_prop; | |
1040 | ||
1041 | ddwprop->liobn = cpu_to_be32(create.liobn); | |
1042 | ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) | | |
1043 | create.addr_lo); | |
1044 | ddwprop->tce_shift = cpu_to_be32(page_shift); | |
1045 | ddwprop->window_shift = cpu_to_be32(len); | |
1046 | ||
1047 | dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n", | |
1048 | create.liobn, dn->full_name); | |
1049 | ||
1050 | window = kzalloc(sizeof(*window), GFP_KERNEL); | |
1051 | if (!window) | |
1052 | goto out_clear_window; | |
1053 | ||
1054 | ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, | |
1055 | win64->value, tce_setrange_multi_pSeriesLP_walk); | |
1056 | if (ret) { | |
1057 | dev_info(&dev->dev, "failed to map direct window for %s: %d\n", | |
1058 | dn->full_name, ret); | |
1059 | goto out_free_window; | |
1060 | } | |
1061 | ||
1062 | ret = of_add_property(pdn, win64); | |
1063 | if (ret) { | |
1064 | dev_err(&dev->dev, "unable to add dma window property for %s: %d", | |
1065 | pdn->full_name, ret); | |
1066 | goto out_free_window; | |
1067 | } | |
1068 | ||
1069 | window->device = pdn; | |
1070 | window->prop = ddwprop; | |
1071 | spin_lock(&direct_window_list_lock); | |
1072 | list_add(&window->list, &direct_window_list); | |
1073 | spin_unlock(&direct_window_list_lock); | |
1074 | ||
1075 | dma_addr = be64_to_cpu(ddwprop->dma_base); | |
1076 | goto out_unlock; | |
1077 | ||
1078 | out_free_window: | |
1079 | kfree(window); | |
1080 | ||
1081 | out_clear_window: | |
1082 | remove_ddw(pdn, true); | |
1083 | ||
1084 | out_free_prop: | |
1085 | kfree(win64->name); | |
1086 | kfree(win64->value); | |
1087 | kfree(win64); | |
1088 | ||
1089 | out_failed: | |
1090 | ||
1091 | fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); | |
1092 | if (!fpdn) | |
1093 | goto out_unlock; | |
1094 | fpdn->pdn = pdn; | |
1095 | list_add(&fpdn->list, &failed_ddw_pdn_list); | |
1096 | ||
1097 | out_unlock: | |
1098 | mutex_unlock(&direct_window_init_mutex); | |
1099 | return dma_addr; | |
1100 | } | |
1101 | ||
1102 | static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |
1103 | { | |
1104 | struct device_node *pdn, *dn; | |
1105 | struct iommu_table *tbl; | |
1106 | const __be32 *dma_window = NULL; | |
1107 | struct pci_dn *pci; | |
1108 | ||
1109 | pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); | |
1110 | ||
1111 | /* dev setup for LPAR is a little tricky, since the device tree might | |
1112 | * contain the dma-window properties per-device and not necessarily | |
1113 | * for the bus. So we need to search upwards in the tree until we | |
1114 | * either hit a dma-window property, OR find a parent with a table | |
1115 | * already allocated. | |
1116 | */ | |
1117 | dn = pci_device_to_OF_node(dev); | |
1118 | pr_debug(" node is %s\n", dn->full_name); | |
1119 | ||
1120 | for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; | |
1121 | pdn = pdn->parent) { | |
1122 | dma_window = of_get_property(pdn, "ibm,dma-window", NULL); | |
1123 | if (dma_window) | |
1124 | break; | |
1125 | } | |
1126 | ||
1127 | if (!pdn || !PCI_DN(pdn)) { | |
1128 | printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: " | |
1129 | "no DMA window found for pci dev=%s dn=%s\n", | |
1130 | pci_name(dev), of_node_full_name(dn)); | |
1131 | return; | |
1132 | } | |
1133 | pr_debug(" parent is %s\n", pdn->full_name); | |
1134 | ||
1135 | pci = PCI_DN(pdn); | |
1136 | if (!pci->iommu_table) { | |
1137 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | |
1138 | pci->phb->node); | |
1139 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); | |
1140 | tbl->it_ops = &iommu_table_lpar_multi_ops; | |
1141 | pci->iommu_table = iommu_init_table(tbl, pci->phb->node); | |
1142 | iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0); | |
1143 | pr_debug(" created table: %p\n", pci->iommu_table); | |
1144 | } else { | |
1145 | pr_debug(" found DMA window, table: %p\n", pci->iommu_table); | |
1146 | } | |
1147 | ||
1148 | set_iommu_table_base(&dev->dev, pci->iommu_table); | |
1149 | iommu_add_device(&dev->dev); | |
1150 | } | |
1151 | ||
1152 | static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) | |
1153 | { | |
1154 | bool ddw_enabled = false; | |
1155 | struct device_node *pdn, *dn; | |
1156 | struct pci_dev *pdev; | |
1157 | const __be32 *dma_window = NULL; | |
1158 | u64 dma_offset; | |
1159 | ||
1160 | if (!dev->dma_mask) | |
1161 | return -EIO; | |
1162 | ||
1163 | if (!dev_is_pci(dev)) | |
1164 | goto check_mask; | |
1165 | ||
1166 | pdev = to_pci_dev(dev); | |
1167 | ||
1168 | /* only attempt to use a new window if 64-bit DMA is requested */ | |
1169 | if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { | |
1170 | dn = pci_device_to_OF_node(pdev); | |
1171 | dev_dbg(dev, "node is %s\n", dn->full_name); | |
1172 | ||
1173 | /* | |
1174 | * the device tree might contain the dma-window properties | |
1175 | * per-device and not necessarily for the bus. So we need to | |
1176 | * search upwards in the tree until we either hit a dma-window | |
1177 | * property, OR find a parent with a table already allocated. | |
1178 | */ | |
1179 | for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; | |
1180 | pdn = pdn->parent) { | |
1181 | dma_window = of_get_property(pdn, "ibm,dma-window", NULL); | |
1182 | if (dma_window) | |
1183 | break; | |
1184 | } | |
1185 | if (pdn && PCI_DN(pdn)) { | |
1186 | dma_offset = enable_ddw(pdev, pdn); | |
1187 | if (dma_offset != 0) { | |
1188 | dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); | |
1189 | set_dma_offset(dev, dma_offset); | |
1190 | set_dma_ops(dev, &dma_direct_ops); | |
1191 | ddw_enabled = true; | |
1192 | } | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | /* fall back on iommu ops, restore table pointer with ops */ | |
1197 | if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { | |
1198 | dev_info(dev, "Restoring 32-bit DMA via iommu\n"); | |
1199 | set_dma_ops(dev, &dma_iommu_ops); | |
1200 | pci_dma_dev_setup_pSeriesLP(pdev); | |
1201 | } | |
1202 | ||
1203 | check_mask: | |
1204 | if (!dma_supported(dev, dma_mask)) | |
1205 | return -EIO; | |
1206 | ||
1207 | *dev->dma_mask = dma_mask; | |
1208 | return 0; | |
1209 | } | |
1210 | ||
1211 | static u64 dma_get_required_mask_pSeriesLP(struct device *dev) | |
1212 | { | |
1213 | if (!dev->dma_mask) | |
1214 | return 0; | |
1215 | ||
1216 | if (!disable_ddw && dev_is_pci(dev)) { | |
1217 | struct pci_dev *pdev = to_pci_dev(dev); | |
1218 | struct device_node *dn; | |
1219 | ||
1220 | dn = pci_device_to_OF_node(pdev); | |
1221 | ||
1222 | /* search upwards for ibm,dma-window */ | |
1223 | for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table; | |
1224 | dn = dn->parent) | |
1225 | if (of_get_property(dn, "ibm,dma-window", NULL)) | |
1226 | break; | |
1227 | /* if there is a ibm,ddw-applicable property require 64 bits */ | |
1228 | if (dn && PCI_DN(dn) && | |
1229 | of_get_property(dn, "ibm,ddw-applicable", NULL)) | |
1230 | return DMA_BIT_MASK(64); | |
1231 | } | |
1232 | ||
1233 | return dma_iommu_ops.get_required_mask(dev); | |
1234 | } | |
1235 | ||
1236 | #else /* CONFIG_PCI */ | |
1237 | #define pci_dma_bus_setup_pSeries NULL | |
1238 | #define pci_dma_dev_setup_pSeries NULL | |
1239 | #define pci_dma_bus_setup_pSeriesLP NULL | |
1240 | #define pci_dma_dev_setup_pSeriesLP NULL | |
1241 | #define dma_set_mask_pSeriesLP NULL | |
1242 | #define dma_get_required_mask_pSeriesLP NULL | |
1243 | #endif /* !CONFIG_PCI */ | |
1244 | ||
1245 | static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, | |
1246 | void *data) | |
1247 | { | |
1248 | struct direct_window *window; | |
1249 | struct memory_notify *arg = data; | |
1250 | int ret = 0; | |
1251 | ||
1252 | switch (action) { | |
1253 | case MEM_GOING_ONLINE: | |
1254 | spin_lock(&direct_window_list_lock); | |
1255 | list_for_each_entry(window, &direct_window_list, list) { | |
1256 | ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, | |
1257 | arg->nr_pages, window->prop); | |
1258 | /* XXX log error */ | |
1259 | } | |
1260 | spin_unlock(&direct_window_list_lock); | |
1261 | break; | |
1262 | case MEM_CANCEL_ONLINE: | |
1263 | case MEM_OFFLINE: | |
1264 | spin_lock(&direct_window_list_lock); | |
1265 | list_for_each_entry(window, &direct_window_list, list) { | |
1266 | ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, | |
1267 | arg->nr_pages, window->prop); | |
1268 | /* XXX log error */ | |
1269 | } | |
1270 | spin_unlock(&direct_window_list_lock); | |
1271 | break; | |
1272 | default: | |
1273 | break; | |
1274 | } | |
1275 | if (ret && action != MEM_CANCEL_ONLINE) | |
1276 | return NOTIFY_BAD; | |
1277 | ||
1278 | return NOTIFY_OK; | |
1279 | } | |
1280 | ||
1281 | static struct notifier_block iommu_mem_nb = { | |
1282 | .notifier_call = iommu_mem_notifier, | |
1283 | }; | |
1284 | ||
1285 | static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data) | |
1286 | { | |
1287 | int err = NOTIFY_OK; | |
1288 | struct of_reconfig_data *rd = data; | |
1289 | struct device_node *np = rd->dn; | |
1290 | struct pci_dn *pci = PCI_DN(np); | |
1291 | struct direct_window *window; | |
1292 | ||
1293 | switch (action) { | |
1294 | case OF_RECONFIG_DETACH_NODE: | |
1295 | /* | |
1296 | * Removing the property will invoke the reconfig | |
1297 | * notifier again, which causes dead-lock on the | |
1298 | * read-write semaphore of the notifier chain. So | |
1299 | * we have to remove the property when releasing | |
1300 | * the device node. | |
1301 | */ | |
1302 | remove_ddw(np, false); | |
1303 | if (pci && pci->iommu_table) | |
1304 | iommu_pseries_free_table(pci->iommu_table, | |
1305 | np->full_name); | |
1306 | ||
1307 | spin_lock(&direct_window_list_lock); | |
1308 | list_for_each_entry(window, &direct_window_list, list) { | |
1309 | if (window->device == np) { | |
1310 | list_del(&window->list); | |
1311 | kfree(window); | |
1312 | break; | |
1313 | } | |
1314 | } | |
1315 | spin_unlock(&direct_window_list_lock); | |
1316 | break; | |
1317 | default: | |
1318 | err = NOTIFY_DONE; | |
1319 | break; | |
1320 | } | |
1321 | return err; | |
1322 | } | |
1323 | ||
1324 | static struct notifier_block iommu_reconfig_nb = { | |
1325 | .notifier_call = iommu_reconfig_notifier, | |
1326 | }; | |
1327 | ||
1328 | /* These are called very early. */ | |
1329 | void iommu_init_early_pSeries(void) | |
1330 | { | |
1331 | if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) | |
1332 | return; | |
1333 | ||
1334 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | |
1335 | pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP; | |
1336 | pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP; | |
1337 | ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; | |
1338 | ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP; | |
1339 | } else { | |
1340 | pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries; | |
1341 | pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries; | |
1342 | } | |
1343 | ||
1344 | ||
1345 | of_reconfig_notifier_register(&iommu_reconfig_nb); | |
1346 | register_memory_notifier(&iommu_mem_nb); | |
1347 | ||
1348 | set_pci_dma_ops(&dma_iommu_ops); | |
1349 | } | |
1350 | ||
1351 | static int __init disable_multitce(char *str) | |
1352 | { | |
1353 | if (strcmp(str, "off") == 0 && | |
1354 | firmware_has_feature(FW_FEATURE_LPAR) && | |
1355 | firmware_has_feature(FW_FEATURE_MULTITCE)) { | |
1356 | printk(KERN_INFO "Disabling MULTITCE firmware feature\n"); | |
1357 | powerpc_firmware_features &= ~FW_FEATURE_MULTITCE; | |
1358 | } | |
1359 | return 1; | |
1360 | } | |
1361 | ||
1362 | __setup("multitce=", disable_multitce); | |
1363 | ||
1364 | machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); |