2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 #include <linux/pci.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/crash_dump.h>
37 #include <linux/memory.h>
42 #include <asm/iommu.h>
43 #include <asm/pci-bridge.h>
44 #include <asm/machdep.h>
45 #include <asm/firmware.h>
47 #include <asm/ppc-pci.h>
49 #include <asm/mmzone.h>
50 #include <asm/plpar_wrappers.h>
53 static void tce_invalidate_pSeries_sw(struct iommu_table
*tbl
,
54 __be64
*startp
, __be64
*endp
)
56 u64 __iomem
*invalidate
= (u64 __iomem
*)tbl
->it_index
;
57 unsigned long start
, end
, inc
;
61 inc
= L1_CACHE_BYTES
; /* invalidate a cacheline of TCEs at a time */
63 /* If this is non-zero, change the format. We shift the
64 * address and or in the magic from the device tree. */
69 start
|= tbl
->it_busno
;
73 end
|= inc
- 1; /* round up end to be different than start */
75 mb(); /* Make sure TCEs in memory are written */
76 while (start
<= end
) {
77 out_be64(invalidate
, start
);
82 static int tce_build_pSeries(struct iommu_table
*tbl
, long index
,
83 long npages
, unsigned long uaddr
,
84 enum dma_data_direction direction
,
85 struct dma_attrs
*attrs
)
91 proto_tce
= TCE_PCI_READ
; // Read allowed
93 if (direction
!= DMA_TO_DEVICE
)
94 proto_tce
|= TCE_PCI_WRITE
;
96 tces
= tcep
= ((__be64
*)tbl
->it_base
) + index
;
99 /* can't move this out since we might cross MEMBLOCK boundary */
100 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
101 *tcep
= cpu_to_be64(proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
);
103 uaddr
+= TCE_PAGE_SIZE
;
107 if (tbl
->it_type
& TCE_PCI_SWINV_CREATE
)
108 tce_invalidate_pSeries_sw(tbl
, tces
, tcep
- 1);
113 static void tce_free_pSeries(struct iommu_table
*tbl
, long index
, long npages
)
117 tces
= tcep
= ((__be64
*)tbl
->it_base
) + index
;
122 if (tbl
->it_type
& TCE_PCI_SWINV_FREE
)
123 tce_invalidate_pSeries_sw(tbl
, tces
, tcep
- 1);
126 static unsigned long tce_get_pseries(struct iommu_table
*tbl
, long index
)
130 tcep
= ((__be64
*)tbl
->it_base
) + index
;
132 return be64_to_cpu(*tcep
);
135 static void tce_free_pSeriesLP(struct iommu_table
*, long, long);
136 static void tce_freemulti_pSeriesLP(struct iommu_table
*, long, long);
138 static int tce_build_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
139 long npages
, unsigned long uaddr
,
140 enum dma_data_direction direction
,
141 struct dma_attrs
*attrs
)
147 long tcenum_start
= tcenum
, npages_start
= npages
;
149 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
150 proto_tce
= TCE_PCI_READ
;
151 if (direction
!= DMA_TO_DEVICE
)
152 proto_tce
|= TCE_PCI_WRITE
;
155 tce
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
156 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, tce
);
158 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
160 tce_free_pSeriesLP(tbl
, tcenum_start
,
161 (npages_start
- (npages
+ 1)));
165 if (rc
&& printk_ratelimit()) {
166 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
167 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
168 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
169 printk("\ttce val = 0x%llx\n", tce
);
179 static DEFINE_PER_CPU(__be64
*, tce_page
);
181 static int tce_buildmulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
182 long npages
, unsigned long uaddr
,
183 enum dma_data_direction direction
,
184 struct dma_attrs
*attrs
)
191 long tcenum_start
= tcenum
, npages_start
= npages
;
196 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
200 local_irq_save(flags
); /* to protect tcep and the page behind it */
202 tcep
= __this_cpu_read(tce_page
);
204 /* This is safe to do since interrupts are off when we're called
205 * from iommu_alloc{,_sg}()
208 tcep
= (__be64
*)__get_free_page(GFP_ATOMIC
);
209 /* If allocation fails, fall back to the loop implementation */
211 local_irq_restore(flags
);
212 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
215 __this_cpu_write(tce_page
, tcep
);
218 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
219 proto_tce
= TCE_PCI_READ
;
220 if (direction
!= DMA_TO_DEVICE
)
221 proto_tce
|= TCE_PCI_WRITE
;
223 /* We can map max one pageful of TCEs at a time */
226 * Set up the page with TCE data, looping through and setting
229 limit
= min_t(long, npages
, 4096/TCE_ENTRY_SIZE
);
231 for (l
= 0; l
< limit
; l
++) {
232 tcep
[l
] = cpu_to_be64(proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
);
236 rc
= plpar_tce_put_indirect((u64
)tbl
->it_index
,
243 } while (npages
> 0 && !rc
);
245 local_irq_restore(flags
);
247 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
249 tce_freemulti_pSeriesLP(tbl
, tcenum_start
,
250 (npages_start
- (npages
+ limit
)));
254 if (rc
&& printk_ratelimit()) {
255 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
256 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
257 printk("\tnpages = 0x%llx\n", (u64
)npages
);
258 printk("\ttce[0] val = 0x%llx\n", tcep
[0]);
264 static void tce_free_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
269 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0);
271 if (rc
&& printk_ratelimit()) {
272 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
273 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
274 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
283 static void tce_freemulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
287 rc
= plpar_tce_stuff((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0, npages
);
289 if (rc
&& printk_ratelimit()) {
290 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
291 printk("\trc = %lld\n", rc
);
292 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
293 printk("\tnpages = 0x%llx\n", (u64
)npages
);
298 static unsigned long tce_get_pSeriesLP(struct iommu_table
*tbl
, long tcenum
)
301 unsigned long tce_ret
;
303 rc
= plpar_tce_get((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, &tce_ret
);
305 if (rc
&& printk_ratelimit()) {
306 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc
);
307 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
308 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
315 /* this is compatible with cells for the device tree property */
316 struct dynamic_dma_window_prop
{
317 __be32 liobn
; /* tce table number */
318 __be64 dma_base
; /* address hi,lo */
319 __be32 tce_shift
; /* ilog2(tce_page_size) */
320 __be32 window_shift
; /* ilog2(tce_window_size) */
323 struct direct_window
{
324 struct device_node
*device
;
325 const struct dynamic_dma_window_prop
*prop
;
326 struct list_head list
;
329 /* Dynamic DMA Window support */
330 struct ddw_query_response
{
331 u32 windows_available
;
332 u32 largest_available_block
;
334 u32 migration_capable
;
337 struct ddw_create_response
{
343 static LIST_HEAD(direct_window_list
);
344 /* prevents races between memory on/offline and window creation */
345 static DEFINE_SPINLOCK(direct_window_list_lock
);
346 /* protects initializing window twice for same device */
347 static DEFINE_MUTEX(direct_window_init_mutex
);
348 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
350 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn
,
351 unsigned long num_pfn
, const void *arg
)
353 const struct dynamic_dma_window_prop
*maprange
= arg
;
355 u64 tce_size
, num_tce
, dma_offset
, next
;
359 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
360 tce_size
= 1ULL << tce_shift
;
361 next
= start_pfn
<< PAGE_SHIFT
;
362 num_tce
= num_pfn
<< PAGE_SHIFT
;
364 /* round back to the beginning of the tce page size */
365 num_tce
+= next
& (tce_size
- 1);
366 next
&= ~(tce_size
- 1);
368 /* covert to number of tces */
369 num_tce
|= tce_size
- 1;
370 num_tce
>>= tce_shift
;
374 * Set up the page with TCE data, looping through and setting
377 limit
= min_t(long, num_tce
, 512);
378 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
380 rc
= plpar_tce_stuff((u64
)be32_to_cpu(maprange
->liobn
),
383 next
+= limit
* tce_size
;
385 } while (num_tce
> 0 && !rc
);
390 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn
,
391 unsigned long num_pfn
, const void *arg
)
393 const struct dynamic_dma_window_prop
*maprange
= arg
;
394 u64 tce_size
, num_tce
, dma_offset
, next
, proto_tce
, liobn
;
400 local_irq_disable(); /* to protect tcep and the page behind it */
401 tcep
= __this_cpu_read(tce_page
);
404 tcep
= (__be64
*)__get_free_page(GFP_ATOMIC
);
409 __this_cpu_write(tce_page
, tcep
);
412 proto_tce
= TCE_PCI_READ
| TCE_PCI_WRITE
;
414 liobn
= (u64
)be32_to_cpu(maprange
->liobn
);
415 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
416 tce_size
= 1ULL << tce_shift
;
417 next
= start_pfn
<< PAGE_SHIFT
;
418 num_tce
= num_pfn
<< PAGE_SHIFT
;
420 /* round back to the beginning of the tce page size */
421 num_tce
+= next
& (tce_size
- 1);
422 next
&= ~(tce_size
- 1);
424 /* covert to number of tces */
425 num_tce
|= tce_size
- 1;
426 num_tce
>>= tce_shift
;
428 /* We can map max one pageful of TCEs at a time */
431 * Set up the page with TCE data, looping through and setting
434 limit
= min_t(long, num_tce
, 4096/TCE_ENTRY_SIZE
);
435 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
437 for (l
= 0; l
< limit
; l
++) {
438 tcep
[l
] = cpu_to_be64(proto_tce
| next
);
442 rc
= plpar_tce_put_indirect(liobn
,
448 } while (num_tce
> 0 && !rc
);
450 /* error cleanup: caller will clear whole range */
456 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn
,
457 unsigned long num_pfn
, void *arg
)
459 return tce_setrange_multi_pSeriesLP(start_pfn
, num_pfn
, arg
);
464 static void iommu_table_setparms(struct pci_controller
*phb
,
465 struct device_node
*dn
,
466 struct iommu_table
*tbl
)
468 struct device_node
*node
;
469 const unsigned long *basep
, *sw_inval
;
474 basep
= of_get_property(node
, "linux,tce-base", NULL
);
475 sizep
= of_get_property(node
, "linux,tce-size", NULL
);
476 if (basep
== NULL
|| sizep
== NULL
) {
477 printk(KERN_ERR
"PCI_DMA: iommu_table_setparms: %s has "
478 "missing tce entries !\n", dn
->full_name
);
482 tbl
->it_base
= (unsigned long)__va(*basep
);
484 if (!is_kdump_kernel())
485 memset((void *)tbl
->it_base
, 0, *sizep
);
487 tbl
->it_busno
= phb
->bus
->number
;
488 tbl
->it_page_shift
= IOMMU_PAGE_SHIFT_4K
;
490 /* Units of tce entries */
491 tbl
->it_offset
= phb
->dma_window_base_cur
>> tbl
->it_page_shift
;
493 /* Test if we are going over 2GB of DMA space */
494 if (phb
->dma_window_base_cur
+ phb
->dma_window_size
> 0x80000000ul
) {
495 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
496 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
499 phb
->dma_window_base_cur
+= phb
->dma_window_size
;
501 /* Set the tce table size - measured in entries */
502 tbl
->it_size
= phb
->dma_window_size
>> tbl
->it_page_shift
;
505 tbl
->it_blocksize
= 16;
506 tbl
->it_type
= TCE_PCI
;
508 sw_inval
= of_get_property(node
, "linux,tce-sw-invalidate-info", NULL
);
511 * This property contains information on how to
512 * invalidate the TCE entry. The first property is
513 * the base MMIO address used to invalidate entries.
514 * The second property tells us the format of the TCE
515 * invalidate (whether it needs to be shifted) and
516 * some magic routing info to add to our invalidate
519 tbl
->it_index
= (unsigned long) ioremap(sw_inval
[0], 8);
520 tbl
->it_busno
= sw_inval
[1]; /* overload this with magic */
521 tbl
->it_type
= TCE_PCI_SWINV_CREATE
| TCE_PCI_SWINV_FREE
;
526 * iommu_table_setparms_lpar
528 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
530 static void iommu_table_setparms_lpar(struct pci_controller
*phb
,
531 struct device_node
*dn
,
532 struct iommu_table
*tbl
,
533 const __be32
*dma_window
)
535 unsigned long offset
, size
;
537 of_parse_dma_window(dn
, dma_window
, &tbl
->it_index
, &offset
, &size
);
539 tbl
->it_busno
= phb
->bus
->number
;
540 tbl
->it_page_shift
= IOMMU_PAGE_SHIFT_4K
;
542 tbl
->it_blocksize
= 16;
543 tbl
->it_type
= TCE_PCI
;
544 tbl
->it_offset
= offset
>> tbl
->it_page_shift
;
545 tbl
->it_size
= size
>> tbl
->it_page_shift
;
548 static void pci_dma_bus_setup_pSeries(struct pci_bus
*bus
)
550 struct device_node
*dn
;
551 struct iommu_table
*tbl
;
552 struct device_node
*isa_dn
, *isa_dn_orig
;
553 struct device_node
*tmp
;
557 dn
= pci_bus_to_OF_node(bus
);
559 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn
->full_name
);
562 /* This is not a root bus, any setup will be done for the
563 * device-side of the bridge in iommu_dev_setup_pSeries().
569 /* Check if the ISA bus on the system is under
572 isa_dn
= isa_dn_orig
= of_find_node_by_type(NULL
, "isa");
574 while (isa_dn
&& isa_dn
!= dn
)
575 isa_dn
= isa_dn
->parent
;
577 of_node_put(isa_dn_orig
);
579 /* Count number of direct PCI children of the PHB. */
580 for (children
= 0, tmp
= dn
->child
; tmp
; tmp
= tmp
->sibling
)
583 pr_debug("Children: %d\n", children
);
585 /* Calculate amount of DMA window per slot. Each window must be
586 * a power of two (due to pci_alloc_consistent requirements).
588 * Keep 256MB aside for PHBs with ISA.
592 /* No ISA/IDE - just set window size and return */
593 pci
->phb
->dma_window_size
= 0x80000000ul
; /* To be divided */
595 while (pci
->phb
->dma_window_size
* children
> 0x80000000ul
)
596 pci
->phb
->dma_window_size
>>= 1;
597 pr_debug("No ISA/IDE, window size is 0x%llx\n",
598 pci
->phb
->dma_window_size
);
599 pci
->phb
->dma_window_base_cur
= 0;
604 /* If we have ISA, then we probably have an IDE
605 * controller too. Allocate a 128MB table but
606 * skip the first 128MB to avoid stepping on ISA
609 pci
->phb
->dma_window_size
= 0x8000000ul
;
610 pci
->phb
->dma_window_base_cur
= 0x8000000ul
;
612 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
615 iommu_table_setparms(pci
->phb
, dn
, tbl
);
616 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
617 iommu_register_group(tbl
, pci_domain_nr(bus
), 0);
619 /* Divide the rest (1.75GB) among the children */
620 pci
->phb
->dma_window_size
= 0x80000000ul
;
621 while (pci
->phb
->dma_window_size
* children
> 0x70000000ul
)
622 pci
->phb
->dma_window_size
>>= 1;
624 pr_debug("ISA/IDE, window size is 0x%llx\n", pci
->phb
->dma_window_size
);
628 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus
*bus
)
630 struct iommu_table
*tbl
;
631 struct device_node
*dn
, *pdn
;
633 const __be32
*dma_window
= NULL
;
635 dn
= pci_bus_to_OF_node(bus
);
637 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
640 /* Find nearest ibm,dma-window, walking up the device tree */
641 for (pdn
= dn
; pdn
!= NULL
; pdn
= pdn
->parent
) {
642 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
643 if (dma_window
!= NULL
)
647 if (dma_window
== NULL
) {
648 pr_debug(" no ibm,dma-window property !\n");
654 pr_debug(" parent is %s, iommu_table: 0x%p\n",
655 pdn
->full_name
, ppci
->iommu_table
);
657 if (!ppci
->iommu_table
) {
658 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
660 iommu_table_setparms_lpar(ppci
->phb
, pdn
, tbl
, dma_window
);
661 ppci
->iommu_table
= iommu_init_table(tbl
, ppci
->phb
->node
);
662 iommu_register_group(tbl
, pci_domain_nr(bus
), 0);
663 pr_debug(" created table: %p\n", ppci
->iommu_table
);
668 static void pci_dma_dev_setup_pSeries(struct pci_dev
*dev
)
670 struct device_node
*dn
;
671 struct iommu_table
*tbl
;
673 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev
));
675 dn
= dev
->dev
.of_node
;
677 /* If we're the direct child of a root bus, then we need to allocate
678 * an iommu table ourselves. The bus setup code should have setup
679 * the window sizes already.
681 if (!dev
->bus
->self
) {
682 struct pci_controller
*phb
= PCI_DN(dn
)->phb
;
684 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
685 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
687 iommu_table_setparms(phb
, dn
, tbl
);
688 PCI_DN(dn
)->iommu_table
= iommu_init_table(tbl
, phb
->node
);
689 iommu_register_group(tbl
, pci_domain_nr(phb
->bus
), 0);
690 set_iommu_table_base_and_group(&dev
->dev
,
691 PCI_DN(dn
)->iommu_table
);
695 /* If this device is further down the bus tree, search upwards until
696 * an already allocated iommu table is found and use that.
699 while (dn
&& PCI_DN(dn
) && PCI_DN(dn
)->iommu_table
== NULL
)
702 if (dn
&& PCI_DN(dn
))
703 set_iommu_table_base_and_group(&dev
->dev
,
704 PCI_DN(dn
)->iommu_table
);
706 printk(KERN_WARNING
"iommu: Device %s has no iommu table\n",
710 static int __read_mostly disable_ddw
;
712 static int __init
disable_ddw_setup(char *str
)
715 printk(KERN_INFO
"ppc iommu: disabling ddw.\n");
720 early_param("disable_ddw", disable_ddw_setup
);
722 static void remove_ddw(struct device_node
*np
, bool remove_prop
)
724 struct dynamic_dma_window_prop
*dwp
;
725 struct property
*win64
;
730 ret
= of_property_read_u32_array(np
, "ibm,ddw-applicable",
733 win64
= of_find_property(np
, DIRECT64_PROPNAME
, NULL
);
737 if (ret
|| win64
->length
< sizeof(*dwp
))
741 liobn
= (u64
)be32_to_cpu(dwp
->liobn
);
743 /* clear the whole window, note the arg is in kernel pages */
744 ret
= tce_clearrange_multi_pSeriesLP(0,
745 1ULL << (be32_to_cpu(dwp
->window_shift
) - PAGE_SHIFT
), dwp
);
747 pr_warning("%s failed to clear tces in window.\n",
750 pr_debug("%s successfully cleared tces in window.\n",
753 ret
= rtas_call(ddw_avail
[2], 1, 1, NULL
, liobn
);
755 pr_warning("%s: failed to remove direct window: rtas returned "
756 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
757 np
->full_name
, ret
, ddw_avail
[2], liobn
);
759 pr_debug("%s: successfully removed direct window: rtas returned "
760 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
761 np
->full_name
, ret
, ddw_avail
[2], liobn
);
765 ret
= of_remove_property(np
, win64
);
767 pr_warning("%s: failed to remove direct window property: %d\n",
771 static u64
find_existing_ddw(struct device_node
*pdn
)
773 struct direct_window
*window
;
774 const struct dynamic_dma_window_prop
*direct64
;
777 spin_lock(&direct_window_list_lock
);
778 /* check if we already created a window and dupe that config if so */
779 list_for_each_entry(window
, &direct_window_list
, list
) {
780 if (window
->device
== pdn
) {
781 direct64
= window
->prop
;
782 dma_addr
= be64_to_cpu(direct64
->dma_base
);
786 spin_unlock(&direct_window_list_lock
);
791 static int find_existing_ddw_windows(void)
794 struct device_node
*pdn
;
795 struct direct_window
*window
;
796 const struct dynamic_dma_window_prop
*direct64
;
798 if (!firmware_has_feature(FW_FEATURE_LPAR
))
801 for_each_node_with_property(pdn
, DIRECT64_PROPNAME
) {
802 direct64
= of_get_property(pdn
, DIRECT64_PROPNAME
, &len
);
806 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
807 if (!window
|| len
< sizeof(struct dynamic_dma_window_prop
)) {
809 remove_ddw(pdn
, true);
813 window
->device
= pdn
;
814 window
->prop
= direct64
;
815 spin_lock(&direct_window_list_lock
);
816 list_add(&window
->list
, &direct_window_list
);
817 spin_unlock(&direct_window_list_lock
);
822 machine_arch_initcall(pseries
, find_existing_ddw_windows
);
824 static int query_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
825 struct ddw_query_response
*query
)
827 struct eeh_dev
*edev
;
833 * Get the config address and phb buid of the PE window.
834 * Rely on eeh to retrieve this for us.
835 * Retrieve them from the pci device, not the node with the
836 * dma-window property
838 edev
= pci_dev_to_eeh_dev(dev
);
839 cfg_addr
= edev
->config_addr
;
840 if (edev
->pe_config_addr
)
841 cfg_addr
= edev
->pe_config_addr
;
842 buid
= edev
->phb
->buid
;
844 ret
= rtas_call(ddw_avail
[0], 3, 5, (u32
*)query
,
845 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
));
846 dev_info(&dev
->dev
, "ibm,query-pe-dma-windows(%x) %x %x %x"
847 " returned %d\n", ddw_avail
[0], cfg_addr
, BUID_HI(buid
),
852 static int create_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
853 struct ddw_create_response
*create
, int page_shift
,
856 struct eeh_dev
*edev
;
862 * Get the config address and phb buid of the PE window.
863 * Rely on eeh to retrieve this for us.
864 * Retrieve them from the pci device, not the node with the
865 * dma-window property
867 edev
= pci_dev_to_eeh_dev(dev
);
868 cfg_addr
= edev
->config_addr
;
869 if (edev
->pe_config_addr
)
870 cfg_addr
= edev
->pe_config_addr
;
871 buid
= edev
->phb
->buid
;
874 /* extra outputs are LIOBN and dma-addr (hi, lo) */
875 ret
= rtas_call(ddw_avail
[1], 5, 4, (u32
*)create
,
876 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
),
877 page_shift
, window_shift
);
878 } while (rtas_busy_delay(ret
));
880 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
881 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail
[1],
882 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
), page_shift
,
883 window_shift
, ret
, create
->liobn
, create
->addr_hi
, create
->addr_lo
);
888 struct failed_ddw_pdn
{
889 struct device_node
*pdn
;
890 struct list_head list
;
893 static LIST_HEAD(failed_ddw_pdn_list
);
896 * If the PE supports dynamic dma windows, and there is space for a table
897 * that can map all pages in a linear offset, then setup such a table,
898 * and record the dma-offset in the struct device.
900 * dev: the pci device we are checking
901 * pdn: the parent pe node with the ibm,dma_window property
902 * Future: also check if we can remap the base window for our base page size
904 * returns the dma offset for use by dma_set_mask
906 static u64
enable_ddw(struct pci_dev
*dev
, struct device_node
*pdn
)
909 struct ddw_query_response query
;
910 struct ddw_create_response create
;
912 u64 dma_addr
, max_addr
;
913 struct device_node
*dn
;
915 struct direct_window
*window
;
916 struct property
*win64
;
917 struct dynamic_dma_window_prop
*ddwprop
;
918 struct failed_ddw_pdn
*fpdn
;
920 mutex_lock(&direct_window_init_mutex
);
922 dma_addr
= find_existing_ddw(pdn
);
927 * If we already went through this for a previous function of
928 * the same device and failed, we don't want to muck with the
929 * DMA window again, as it will race with in-flight operations
930 * and can lead to EEHs. The above mutex protects access to the
933 list_for_each_entry(fpdn
, &failed_ddw_pdn_list
, list
) {
934 if (!strcmp(fpdn
->pdn
->full_name
, pdn
->full_name
))
939 * the ibm,ddw-applicable property holds the tokens for:
940 * ibm,query-pe-dma-window
941 * ibm,create-pe-dma-window
942 * ibm,remove-pe-dma-window
943 * for the given node in that order.
944 * the property is actually in the parent, not the PE
946 ret
= of_property_read_u32_array(pdn
, "ibm,ddw-applicable",
952 * Query if there is a second window of size to map the
953 * whole partition. Query returns number of windows, largest
954 * block assigned to PE (partition endpoint), and two bitmasks
955 * of page sizes: supported and supported for migrate-dma.
957 dn
= pci_device_to_OF_node(dev
);
958 ret
= query_ddw(dev
, ddw_avail
, &query
);
962 if (query
.windows_available
== 0) {
964 * no additional windows are available for this device.
965 * We might be able to reallocate the existing window,
966 * trading in for a larger page size.
968 dev_dbg(&dev
->dev
, "no free dynamic windows");
971 if (query
.page_size
& 4) {
972 page_shift
= 24; /* 16MB */
973 } else if (query
.page_size
& 2) {
974 page_shift
= 16; /* 64kB */
975 } else if (query
.page_size
& 1) {
976 page_shift
= 12; /* 4kB */
978 dev_dbg(&dev
->dev
, "no supported direct page size in mask %x",
982 /* verify the window * number of ptes will map the partition */
983 /* check largest block * page size > max memory hotplug addr */
984 max_addr
= memory_hotplug_max();
985 if (query
.largest_available_block
< (max_addr
>> page_shift
)) {
986 dev_dbg(&dev
->dev
, "can't map partiton max 0x%llx with %u "
987 "%llu-sized pages\n", max_addr
, query
.largest_available_block
,
991 len
= order_base_2(max_addr
);
992 win64
= kzalloc(sizeof(struct property
), GFP_KERNEL
);
995 "couldn't allocate property for 64bit dma window\n");
998 win64
->name
= kstrdup(DIRECT64_PROPNAME
, GFP_KERNEL
);
999 win64
->value
= ddwprop
= kmalloc(sizeof(*ddwprop
), GFP_KERNEL
);
1000 win64
->length
= sizeof(*ddwprop
);
1001 if (!win64
->name
|| !win64
->value
) {
1003 "couldn't allocate property name and value\n");
1007 ret
= create_ddw(dev
, ddw_avail
, &create
, page_shift
, len
);
1011 ddwprop
->liobn
= cpu_to_be32(create
.liobn
);
1012 ddwprop
->dma_base
= cpu_to_be64(((u64
)create
.addr_hi
<< 32) |
1014 ddwprop
->tce_shift
= cpu_to_be32(page_shift
);
1015 ddwprop
->window_shift
= cpu_to_be32(len
);
1017 dev_dbg(&dev
->dev
, "created tce table LIOBN 0x%x for %s\n",
1018 create
.liobn
, dn
->full_name
);
1020 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
1022 goto out_clear_window
;
1024 ret
= walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT
,
1025 win64
->value
, tce_setrange_multi_pSeriesLP_walk
);
1027 dev_info(&dev
->dev
, "failed to map direct window for %s: %d\n",
1028 dn
->full_name
, ret
);
1029 goto out_free_window
;
1032 ret
= of_add_property(pdn
, win64
);
1034 dev_err(&dev
->dev
, "unable to add dma window property for %s: %d",
1035 pdn
->full_name
, ret
);
1036 goto out_free_window
;
1039 window
->device
= pdn
;
1040 window
->prop
= ddwprop
;
1041 spin_lock(&direct_window_list_lock
);
1042 list_add(&window
->list
, &direct_window_list
);
1043 spin_unlock(&direct_window_list_lock
);
1045 dma_addr
= be64_to_cpu(ddwprop
->dma_base
);
1052 remove_ddw(pdn
, true);
1056 kfree(win64
->value
);
1061 fpdn
= kzalloc(sizeof(*fpdn
), GFP_KERNEL
);
1065 list_add(&fpdn
->list
, &failed_ddw_pdn_list
);
1068 mutex_unlock(&direct_window_init_mutex
);
1072 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev
*dev
)
1074 struct device_node
*pdn
, *dn
;
1075 struct iommu_table
*tbl
;
1076 const __be32
*dma_window
= NULL
;
1079 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev
));
1081 /* dev setup for LPAR is a little tricky, since the device tree might
1082 * contain the dma-window properties per-device and not necessarily
1083 * for the bus. So we need to search upwards in the tree until we
1084 * either hit a dma-window property, OR find a parent with a table
1085 * already allocated.
1087 dn
= pci_device_to_OF_node(dev
);
1088 pr_debug(" node is %s\n", dn
->full_name
);
1090 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
1091 pdn
= pdn
->parent
) {
1092 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1097 if (!pdn
|| !PCI_DN(pdn
)) {
1098 printk(KERN_WARNING
"pci_dma_dev_setup_pSeriesLP: "
1099 "no DMA window found for pci dev=%s dn=%s\n",
1100 pci_name(dev
), of_node_full_name(dn
));
1103 pr_debug(" parent is %s\n", pdn
->full_name
);
1106 if (!pci
->iommu_table
) {
1107 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
,
1109 iommu_table_setparms_lpar(pci
->phb
, pdn
, tbl
, dma_window
);
1110 pci
->iommu_table
= iommu_init_table(tbl
, pci
->phb
->node
);
1111 iommu_register_group(tbl
, pci_domain_nr(pci
->phb
->bus
), 0);
1112 pr_debug(" created table: %p\n", pci
->iommu_table
);
1114 pr_debug(" found DMA window, table: %p\n", pci
->iommu_table
);
1117 set_iommu_table_base_and_group(&dev
->dev
, pci
->iommu_table
);
1120 static int dma_set_mask_pSeriesLP(struct device
*dev
, u64 dma_mask
)
1122 bool ddw_enabled
= false;
1123 struct device_node
*pdn
, *dn
;
1124 struct pci_dev
*pdev
;
1125 const __be32
*dma_window
= NULL
;
1131 if (!dev_is_pci(dev
))
1134 pdev
= to_pci_dev(dev
);
1136 /* only attempt to use a new window if 64-bit DMA is requested */
1137 if (!disable_ddw
&& dma_mask
== DMA_BIT_MASK(64)) {
1138 dn
= pci_device_to_OF_node(pdev
);
1139 dev_dbg(dev
, "node is %s\n", dn
->full_name
);
1142 * the device tree might contain the dma-window properties
1143 * per-device and not necessarily for the bus. So we need to
1144 * search upwards in the tree until we either hit a dma-window
1145 * property, OR find a parent with a table already allocated.
1147 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->iommu_table
;
1148 pdn
= pdn
->parent
) {
1149 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1153 if (pdn
&& PCI_DN(pdn
)) {
1154 dma_offset
= enable_ddw(pdev
, pdn
);
1155 if (dma_offset
!= 0) {
1156 dev_info(dev
, "Using 64-bit direct DMA at offset %llx\n", dma_offset
);
1157 set_dma_offset(dev
, dma_offset
);
1158 set_dma_ops(dev
, &dma_direct_ops
);
1164 /* fall back on iommu ops, restore table pointer with ops */
1165 if (!ddw_enabled
&& get_dma_ops(dev
) != &dma_iommu_ops
) {
1166 dev_info(dev
, "Restoring 32-bit DMA via iommu\n");
1167 set_dma_ops(dev
, &dma_iommu_ops
);
1168 pci_dma_dev_setup_pSeriesLP(pdev
);
1172 if (!dma_supported(dev
, dma_mask
))
1175 *dev
->dma_mask
= dma_mask
;
1179 static u64
dma_get_required_mask_pSeriesLP(struct device
*dev
)
1184 if (!disable_ddw
&& dev_is_pci(dev
)) {
1185 struct pci_dev
*pdev
= to_pci_dev(dev
);
1186 struct device_node
*dn
;
1188 dn
= pci_device_to_OF_node(pdev
);
1190 /* search upwards for ibm,dma-window */
1191 for (; dn
&& PCI_DN(dn
) && !PCI_DN(dn
)->iommu_table
;
1193 if (of_get_property(dn
, "ibm,dma-window", NULL
))
1195 /* if there is a ibm,ddw-applicable property require 64 bits */
1196 if (dn
&& PCI_DN(dn
) &&
1197 of_get_property(dn
, "ibm,ddw-applicable", NULL
))
1198 return DMA_BIT_MASK(64);
1201 return dma_iommu_ops
.get_required_mask(dev
);
1204 #else /* CONFIG_PCI */
1205 #define pci_dma_bus_setup_pSeries NULL
1206 #define pci_dma_dev_setup_pSeries NULL
1207 #define pci_dma_bus_setup_pSeriesLP NULL
1208 #define pci_dma_dev_setup_pSeriesLP NULL
1209 #define dma_set_mask_pSeriesLP NULL
1210 #define dma_get_required_mask_pSeriesLP NULL
1211 #endif /* !CONFIG_PCI */
1213 static int iommu_mem_notifier(struct notifier_block
*nb
, unsigned long action
,
1216 struct direct_window
*window
;
1217 struct memory_notify
*arg
= data
;
1221 case MEM_GOING_ONLINE
:
1222 spin_lock(&direct_window_list_lock
);
1223 list_for_each_entry(window
, &direct_window_list
, list
) {
1224 ret
|= tce_setrange_multi_pSeriesLP(arg
->start_pfn
,
1225 arg
->nr_pages
, window
->prop
);
1228 spin_unlock(&direct_window_list_lock
);
1230 case MEM_CANCEL_ONLINE
:
1232 spin_lock(&direct_window_list_lock
);
1233 list_for_each_entry(window
, &direct_window_list
, list
) {
1234 ret
|= tce_clearrange_multi_pSeriesLP(arg
->start_pfn
,
1235 arg
->nr_pages
, window
->prop
);
1238 spin_unlock(&direct_window_list_lock
);
1243 if (ret
&& action
!= MEM_CANCEL_ONLINE
)
1249 static struct notifier_block iommu_mem_nb
= {
1250 .notifier_call
= iommu_mem_notifier
,
1253 static int iommu_reconfig_notifier(struct notifier_block
*nb
, unsigned long action
, void *data
)
1255 int err
= NOTIFY_OK
;
1256 struct of_reconfig_data
*rd
= data
;
1257 struct device_node
*np
= rd
->dn
;
1258 struct pci_dn
*pci
= PCI_DN(np
);
1259 struct direct_window
*window
;
1262 case OF_RECONFIG_DETACH_NODE
:
1264 * Removing the property will invoke the reconfig
1265 * notifier again, which causes dead-lock on the
1266 * read-write semaphore of the notifier chain. So
1267 * we have to remove the property when releasing
1270 remove_ddw(np
, false);
1271 if (pci
&& pci
->iommu_table
)
1272 iommu_free_table(pci
->iommu_table
, np
->full_name
);
1274 spin_lock(&direct_window_list_lock
);
1275 list_for_each_entry(window
, &direct_window_list
, list
) {
1276 if (window
->device
== np
) {
1277 list_del(&window
->list
);
1282 spin_unlock(&direct_window_list_lock
);
1291 static struct notifier_block iommu_reconfig_nb
= {
1292 .notifier_call
= iommu_reconfig_notifier
,
1295 /* These are called very early. */
1296 void iommu_init_early_pSeries(void)
1298 if (of_chosen
&& of_get_property(of_chosen
, "linux,iommu-off", NULL
))
1301 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
1302 if (firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1303 ppc_md
.tce_build
= tce_buildmulti_pSeriesLP
;
1304 ppc_md
.tce_free
= tce_freemulti_pSeriesLP
;
1306 ppc_md
.tce_build
= tce_build_pSeriesLP
;
1307 ppc_md
.tce_free
= tce_free_pSeriesLP
;
1309 ppc_md
.tce_get
= tce_get_pSeriesLP
;
1310 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeriesLP
;
1311 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeriesLP
;
1312 ppc_md
.dma_set_mask
= dma_set_mask_pSeriesLP
;
1313 ppc_md
.dma_get_required_mask
= dma_get_required_mask_pSeriesLP
;
1315 ppc_md
.tce_build
= tce_build_pSeries
;
1316 ppc_md
.tce_free
= tce_free_pSeries
;
1317 ppc_md
.tce_get
= tce_get_pseries
;
1318 ppc_md
.pci_dma_bus_setup
= pci_dma_bus_setup_pSeries
;
1319 ppc_md
.pci_dma_dev_setup
= pci_dma_dev_setup_pSeries
;
1323 of_reconfig_notifier_register(&iommu_reconfig_nb
);
1324 register_memory_notifier(&iommu_mem_nb
);
1326 set_pci_dma_ops(&dma_iommu_ops
);
1329 static int __init
disable_multitce(char *str
)
1331 if (strcmp(str
, "off") == 0 &&
1332 firmware_has_feature(FW_FEATURE_LPAR
) &&
1333 firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1334 printk(KERN_INFO
"Disabling MULTITCE firmware feature\n");
1335 ppc_md
.tce_build
= tce_build_pSeriesLP
;
1336 ppc_md
.tce_free
= tce_free_pSeriesLP
;
1337 powerpc_firmware_features
&= ~FW_FEATURE_MULTITCE
;
1342 __setup("multitce=", disable_multitce
);