1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
7 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
8 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
10 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
17 #include <linux/memblock.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/pci.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/crash_dump.h>
23 #include <linux/memory.h>
25 #include <linux/iommu.h>
26 #include <linux/rculist.h>
30 #include <asm/iommu.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/machdep.h>
33 #include <asm/firmware.h>
35 #include <asm/ppc-pci.h>
37 #include <asm/mmzone.h>
38 #include <asm/plpar_wrappers.h>
42 static struct iommu_table_group
*iommu_pseries_alloc_group(int node
)
44 struct iommu_table_group
*table_group
;
45 struct iommu_table
*tbl
;
47 table_group
= kzalloc_node(sizeof(struct iommu_table_group
), GFP_KERNEL
,
52 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
, node
);
56 INIT_LIST_HEAD_RCU(&tbl
->it_group_list
);
57 kref_init(&tbl
->it_kref
);
59 table_group
->tables
[0] = tbl
;
68 static void iommu_pseries_free_group(struct iommu_table_group
*table_group
,
69 const char *node_name
)
71 struct iommu_table
*tbl
;
76 tbl
= table_group
->tables
[0];
77 #ifdef CONFIG_IOMMU_API
78 if (table_group
->group
) {
79 iommu_group_put(table_group
->group
);
80 BUG_ON(table_group
->group
);
83 iommu_tce_table_put(tbl
);
88 static int tce_build_pSeries(struct iommu_table
*tbl
, long index
,
89 long npages
, unsigned long uaddr
,
90 enum dma_data_direction direction
,
97 proto_tce
= TCE_PCI_READ
; // Read allowed
99 if (direction
!= DMA_TO_DEVICE
)
100 proto_tce
|= TCE_PCI_WRITE
;
102 tcep
= ((__be64
*)tbl
->it_base
) + index
;
105 /* can't move this out since we might cross MEMBLOCK boundary */
106 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
107 *tcep
= cpu_to_be64(proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
);
109 uaddr
+= TCE_PAGE_SIZE
;
116 static void tce_free_pSeries(struct iommu_table
*tbl
, long index
, long npages
)
120 tcep
= ((__be64
*)tbl
->it_base
) + index
;
126 static unsigned long tce_get_pseries(struct iommu_table
*tbl
, long index
)
130 tcep
= ((__be64
*)tbl
->it_base
) + index
;
132 return be64_to_cpu(*tcep
);
135 static void tce_free_pSeriesLP(struct iommu_table
*, long, long);
136 static void tce_freemulti_pSeriesLP(struct iommu_table
*, long, long);
138 static int tce_build_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
139 long npages
, unsigned long uaddr
,
140 enum dma_data_direction direction
,
147 long tcenum_start
= tcenum
, npages_start
= npages
;
149 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
150 proto_tce
= TCE_PCI_READ
;
151 if (direction
!= DMA_TO_DEVICE
)
152 proto_tce
|= TCE_PCI_WRITE
;
155 tce
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
156 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, tce
);
158 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
160 tce_free_pSeriesLP(tbl
, tcenum_start
,
161 (npages_start
- (npages
+ 1)));
165 if (rc
&& printk_ratelimit()) {
166 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
167 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
168 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
169 printk("\ttce val = 0x%llx\n", tce
);
179 static DEFINE_PER_CPU(__be64
*, tce_page
);
181 static int tce_buildmulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
,
182 long npages
, unsigned long uaddr
,
183 enum dma_data_direction direction
,
191 long tcenum_start
= tcenum
, npages_start
= npages
;
195 if ((npages
== 1) || !firmware_has_feature(FW_FEATURE_MULTITCE
)) {
196 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
200 local_irq_save(flags
); /* to protect tcep and the page behind it */
202 tcep
= __this_cpu_read(tce_page
);
204 /* This is safe to do since interrupts are off when we're called
205 * from iommu_alloc{,_sg}()
208 tcep
= (__be64
*)__get_free_page(GFP_ATOMIC
);
209 /* If allocation fails, fall back to the loop implementation */
211 local_irq_restore(flags
);
212 return tce_build_pSeriesLP(tbl
, tcenum
, npages
, uaddr
,
215 __this_cpu_write(tce_page
, tcep
);
218 rpn
= __pa(uaddr
) >> TCE_SHIFT
;
219 proto_tce
= TCE_PCI_READ
;
220 if (direction
!= DMA_TO_DEVICE
)
221 proto_tce
|= TCE_PCI_WRITE
;
223 /* We can map max one pageful of TCEs at a time */
226 * Set up the page with TCE data, looping through and setting
229 limit
= min_t(long, npages
, 4096/TCE_ENTRY_SIZE
);
231 for (l
= 0; l
< limit
; l
++) {
232 tcep
[l
] = cpu_to_be64(proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
);
236 rc
= plpar_tce_put_indirect((u64
)tbl
->it_index
,
243 } while (npages
> 0 && !rc
);
245 local_irq_restore(flags
);
247 if (unlikely(rc
== H_NOT_ENOUGH_RESOURCES
)) {
249 tce_freemulti_pSeriesLP(tbl
, tcenum_start
,
250 (npages_start
- (npages
+ limit
)));
254 if (rc
&& printk_ratelimit()) {
255 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
256 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
257 printk("\tnpages = 0x%llx\n", (u64
)npages
);
258 printk("\ttce[0] val = 0x%llx\n", tcep
[0]);
264 static void tce_free_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
269 rc
= plpar_tce_put((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0);
271 if (rc
&& printk_ratelimit()) {
272 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc
);
273 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
274 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
283 static void tce_freemulti_pSeriesLP(struct iommu_table
*tbl
, long tcenum
, long npages
)
287 if (!firmware_has_feature(FW_FEATURE_MULTITCE
))
288 return tce_free_pSeriesLP(tbl
, tcenum
, npages
);
290 rc
= plpar_tce_stuff((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, 0, npages
);
292 if (rc
&& printk_ratelimit()) {
293 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
294 printk("\trc = %lld\n", rc
);
295 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
296 printk("\tnpages = 0x%llx\n", (u64
)npages
);
301 static unsigned long tce_get_pSeriesLP(struct iommu_table
*tbl
, long tcenum
)
304 unsigned long tce_ret
;
306 rc
= plpar_tce_get((u64
)tbl
->it_index
, (u64
)tcenum
<< 12, &tce_ret
);
308 if (rc
&& printk_ratelimit()) {
309 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc
);
310 printk("\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
311 printk("\ttcenum = 0x%llx\n", (u64
)tcenum
);
318 /* this is compatible with cells for the device tree property */
319 struct dynamic_dma_window_prop
{
320 __be32 liobn
; /* tce table number */
321 __be64 dma_base
; /* address hi,lo */
322 __be32 tce_shift
; /* ilog2(tce_page_size) */
323 __be32 window_shift
; /* ilog2(tce_window_size) */
326 struct direct_window
{
327 struct device_node
*device
;
328 const struct dynamic_dma_window_prop
*prop
;
329 struct list_head list
;
332 /* Dynamic DMA Window support */
333 struct ddw_query_response
{
334 u32 windows_available
;
335 u32 largest_available_block
;
337 u32 migration_capable
;
340 struct ddw_create_response
{
346 static LIST_HEAD(direct_window_list
);
347 /* prevents races between memory on/offline and window creation */
348 static DEFINE_SPINLOCK(direct_window_list_lock
);
349 /* protects initializing window twice for same device */
350 static DEFINE_MUTEX(direct_window_init_mutex
);
351 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
353 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn
,
354 unsigned long num_pfn
, const void *arg
)
356 const struct dynamic_dma_window_prop
*maprange
= arg
;
358 u64 tce_size
, num_tce
, dma_offset
, next
;
362 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
363 tce_size
= 1ULL << tce_shift
;
364 next
= start_pfn
<< PAGE_SHIFT
;
365 num_tce
= num_pfn
<< PAGE_SHIFT
;
367 /* round back to the beginning of the tce page size */
368 num_tce
+= next
& (tce_size
- 1);
369 next
&= ~(tce_size
- 1);
371 /* covert to number of tces */
372 num_tce
|= tce_size
- 1;
373 num_tce
>>= tce_shift
;
377 * Set up the page with TCE data, looping through and setting
380 limit
= min_t(long, num_tce
, 512);
381 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
383 rc
= plpar_tce_stuff((u64
)be32_to_cpu(maprange
->liobn
),
386 next
+= limit
* tce_size
;
388 } while (num_tce
> 0 && !rc
);
393 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn
,
394 unsigned long num_pfn
, const void *arg
)
396 const struct dynamic_dma_window_prop
*maprange
= arg
;
397 u64 tce_size
, num_tce
, dma_offset
, next
, proto_tce
, liobn
;
403 local_irq_disable(); /* to protect tcep and the page behind it */
404 tcep
= __this_cpu_read(tce_page
);
407 tcep
= (__be64
*)__get_free_page(GFP_ATOMIC
);
412 __this_cpu_write(tce_page
, tcep
);
415 proto_tce
= TCE_PCI_READ
| TCE_PCI_WRITE
;
417 liobn
= (u64
)be32_to_cpu(maprange
->liobn
);
418 tce_shift
= be32_to_cpu(maprange
->tce_shift
);
419 tce_size
= 1ULL << tce_shift
;
420 next
= start_pfn
<< PAGE_SHIFT
;
421 num_tce
= num_pfn
<< PAGE_SHIFT
;
423 /* round back to the beginning of the tce page size */
424 num_tce
+= next
& (tce_size
- 1);
425 next
&= ~(tce_size
- 1);
427 /* covert to number of tces */
428 num_tce
|= tce_size
- 1;
429 num_tce
>>= tce_shift
;
431 /* We can map max one pageful of TCEs at a time */
434 * Set up the page with TCE data, looping through and setting
437 limit
= min_t(long, num_tce
, 4096/TCE_ENTRY_SIZE
);
438 dma_offset
= next
+ be64_to_cpu(maprange
->dma_base
);
440 for (l
= 0; l
< limit
; l
++) {
441 tcep
[l
] = cpu_to_be64(proto_tce
| next
);
445 rc
= plpar_tce_put_indirect(liobn
,
451 } while (num_tce
> 0 && !rc
);
453 /* error cleanup: caller will clear whole range */
459 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn
,
460 unsigned long num_pfn
, void *arg
)
462 return tce_setrange_multi_pSeriesLP(start_pfn
, num_pfn
, arg
);
465 static void iommu_table_setparms(struct pci_controller
*phb
,
466 struct device_node
*dn
,
467 struct iommu_table
*tbl
)
469 struct device_node
*node
;
470 const unsigned long *basep
;
475 basep
= of_get_property(node
, "linux,tce-base", NULL
);
476 sizep
= of_get_property(node
, "linux,tce-size", NULL
);
477 if (basep
== NULL
|| sizep
== NULL
) {
478 printk(KERN_ERR
"PCI_DMA: iommu_table_setparms: %pOF has "
479 "missing tce entries !\n", dn
);
483 tbl
->it_base
= (unsigned long)__va(*basep
);
485 if (!is_kdump_kernel())
486 memset((void *)tbl
->it_base
, 0, *sizep
);
488 tbl
->it_busno
= phb
->bus
->number
;
489 tbl
->it_page_shift
= IOMMU_PAGE_SHIFT_4K
;
491 /* Units of tce entries */
492 tbl
->it_offset
= phb
->dma_window_base_cur
>> tbl
->it_page_shift
;
494 /* Test if we are going over 2GB of DMA space */
495 if (phb
->dma_window_base_cur
+ phb
->dma_window_size
> 0x80000000ul
) {
496 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
497 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
500 phb
->dma_window_base_cur
+= phb
->dma_window_size
;
502 /* Set the tce table size - measured in entries */
503 tbl
->it_size
= phb
->dma_window_size
>> tbl
->it_page_shift
;
506 tbl
->it_blocksize
= 16;
507 tbl
->it_type
= TCE_PCI
;
511 * iommu_table_setparms_lpar
513 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
515 static void iommu_table_setparms_lpar(struct pci_controller
*phb
,
516 struct device_node
*dn
,
517 struct iommu_table
*tbl
,
518 struct iommu_table_group
*table_group
,
519 const __be32
*dma_window
)
521 unsigned long offset
, size
;
523 of_parse_dma_window(dn
, dma_window
, &tbl
->it_index
, &offset
, &size
);
525 tbl
->it_busno
= phb
->bus
->number
;
526 tbl
->it_page_shift
= IOMMU_PAGE_SHIFT_4K
;
528 tbl
->it_blocksize
= 16;
529 tbl
->it_type
= TCE_PCI
;
530 tbl
->it_offset
= offset
>> tbl
->it_page_shift
;
531 tbl
->it_size
= size
>> tbl
->it_page_shift
;
533 table_group
->tce32_start
= offset
;
534 table_group
->tce32_size
= size
;
537 struct iommu_table_ops iommu_table_pseries_ops
= {
538 .set
= tce_build_pSeries
,
539 .clear
= tce_free_pSeries
,
540 .get
= tce_get_pseries
543 static void pci_dma_bus_setup_pSeries(struct pci_bus
*bus
)
545 struct device_node
*dn
;
546 struct iommu_table
*tbl
;
547 struct device_node
*isa_dn
, *isa_dn_orig
;
548 struct device_node
*tmp
;
552 dn
= pci_bus_to_OF_node(bus
);
554 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn
);
557 /* This is not a root bus, any setup will be done for the
558 * device-side of the bridge in iommu_dev_setup_pSeries().
564 /* Check if the ISA bus on the system is under
567 isa_dn
= isa_dn_orig
= of_find_node_by_type(NULL
, "isa");
569 while (isa_dn
&& isa_dn
!= dn
)
570 isa_dn
= isa_dn
->parent
;
572 of_node_put(isa_dn_orig
);
574 /* Count number of direct PCI children of the PHB. */
575 for (children
= 0, tmp
= dn
->child
; tmp
; tmp
= tmp
->sibling
)
578 pr_debug("Children: %d\n", children
);
580 /* Calculate amount of DMA window per slot. Each window must be
581 * a power of two (due to pci_alloc_consistent requirements).
583 * Keep 256MB aside for PHBs with ISA.
587 /* No ISA/IDE - just set window size and return */
588 pci
->phb
->dma_window_size
= 0x80000000ul
; /* To be divided */
590 while (pci
->phb
->dma_window_size
* children
> 0x80000000ul
)
591 pci
->phb
->dma_window_size
>>= 1;
592 pr_debug("No ISA/IDE, window size is 0x%llx\n",
593 pci
->phb
->dma_window_size
);
594 pci
->phb
->dma_window_base_cur
= 0;
599 /* If we have ISA, then we probably have an IDE
600 * controller too. Allocate a 128MB table but
601 * skip the first 128MB to avoid stepping on ISA
604 pci
->phb
->dma_window_size
= 0x8000000ul
;
605 pci
->phb
->dma_window_base_cur
= 0x8000000ul
;
607 pci
->table_group
= iommu_pseries_alloc_group(pci
->phb
->node
);
608 tbl
= pci
->table_group
->tables
[0];
610 iommu_table_setparms(pci
->phb
, dn
, tbl
);
611 tbl
->it_ops
= &iommu_table_pseries_ops
;
612 iommu_init_table(tbl
, pci
->phb
->node
);
614 /* Divide the rest (1.75GB) among the children */
615 pci
->phb
->dma_window_size
= 0x80000000ul
;
616 while (pci
->phb
->dma_window_size
* children
> 0x70000000ul
)
617 pci
->phb
->dma_window_size
>>= 1;
619 pr_debug("ISA/IDE, window size is 0x%llx\n", pci
->phb
->dma_window_size
);
622 #ifdef CONFIG_IOMMU_API
623 static int tce_exchange_pseries(struct iommu_table
*tbl
, long index
, unsigned
624 long *tce
, enum dma_data_direction
*direction
)
627 unsigned long ioba
= (unsigned long) index
<< tbl
->it_page_shift
;
628 unsigned long flags
, oldtce
= 0;
629 u64 proto_tce
= iommu_direction_to_tce_perm(*direction
);
630 unsigned long newtce
= *tce
| proto_tce
;
632 spin_lock_irqsave(&tbl
->large_pool
.lock
, flags
);
634 rc
= plpar_tce_get((u64
)tbl
->it_index
, ioba
, &oldtce
);
636 rc
= plpar_tce_put((u64
)tbl
->it_index
, ioba
, newtce
);
639 *direction
= iommu_tce_direction(oldtce
);
640 *tce
= oldtce
& ~(TCE_PCI_READ
| TCE_PCI_WRITE
);
643 spin_unlock_irqrestore(&tbl
->large_pool
.lock
, flags
);
649 struct iommu_table_ops iommu_table_lpar_multi_ops
= {
650 .set
= tce_buildmulti_pSeriesLP
,
651 #ifdef CONFIG_IOMMU_API
652 .exchange
= tce_exchange_pseries
,
654 .clear
= tce_freemulti_pSeriesLP
,
655 .get
= tce_get_pSeriesLP
658 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus
*bus
)
660 struct iommu_table
*tbl
;
661 struct device_node
*dn
, *pdn
;
663 const __be32
*dma_window
= NULL
;
665 dn
= pci_bus_to_OF_node(bus
);
667 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
670 /* Find nearest ibm,dma-window, walking up the device tree */
671 for (pdn
= dn
; pdn
!= NULL
; pdn
= pdn
->parent
) {
672 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
673 if (dma_window
!= NULL
)
677 if (dma_window
== NULL
) {
678 pr_debug(" no ibm,dma-window property !\n");
684 pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
685 pdn
, ppci
->table_group
);
687 if (!ppci
->table_group
) {
688 ppci
->table_group
= iommu_pseries_alloc_group(ppci
->phb
->node
);
689 tbl
= ppci
->table_group
->tables
[0];
690 iommu_table_setparms_lpar(ppci
->phb
, pdn
, tbl
,
691 ppci
->table_group
, dma_window
);
692 tbl
->it_ops
= &iommu_table_lpar_multi_ops
;
693 iommu_init_table(tbl
, ppci
->phb
->node
);
694 iommu_register_group(ppci
->table_group
,
695 pci_domain_nr(bus
), 0);
696 pr_debug(" created table: %p\n", ppci
->table_group
);
701 static void pci_dma_dev_setup_pSeries(struct pci_dev
*dev
)
703 struct device_node
*dn
;
704 struct iommu_table
*tbl
;
706 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev
));
708 dn
= dev
->dev
.of_node
;
710 /* If we're the direct child of a root bus, then we need to allocate
711 * an iommu table ourselves. The bus setup code should have setup
712 * the window sizes already.
714 if (!dev
->bus
->self
) {
715 struct pci_controller
*phb
= PCI_DN(dn
)->phb
;
717 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
718 PCI_DN(dn
)->table_group
= iommu_pseries_alloc_group(phb
->node
);
719 tbl
= PCI_DN(dn
)->table_group
->tables
[0];
720 iommu_table_setparms(phb
, dn
, tbl
);
721 tbl
->it_ops
= &iommu_table_pseries_ops
;
722 iommu_init_table(tbl
, phb
->node
);
723 set_iommu_table_base(&dev
->dev
, tbl
);
727 /* If this device is further down the bus tree, search upwards until
728 * an already allocated iommu table is found and use that.
731 while (dn
&& PCI_DN(dn
) && PCI_DN(dn
)->table_group
== NULL
)
734 if (dn
&& PCI_DN(dn
))
735 set_iommu_table_base(&dev
->dev
,
736 PCI_DN(dn
)->table_group
->tables
[0]);
738 printk(KERN_WARNING
"iommu: Device %s has no iommu table\n",
742 static int __read_mostly disable_ddw
;
744 static int __init
disable_ddw_setup(char *str
)
747 printk(KERN_INFO
"ppc iommu: disabling ddw.\n");
752 early_param("disable_ddw", disable_ddw_setup
);
754 static void remove_ddw(struct device_node
*np
, bool remove_prop
)
756 struct dynamic_dma_window_prop
*dwp
;
757 struct property
*win64
;
762 ret
= of_property_read_u32_array(np
, "ibm,ddw-applicable",
765 win64
= of_find_property(np
, DIRECT64_PROPNAME
, NULL
);
769 if (ret
|| win64
->length
< sizeof(*dwp
))
773 liobn
= (u64
)be32_to_cpu(dwp
->liobn
);
775 /* clear the whole window, note the arg is in kernel pages */
776 ret
= tce_clearrange_multi_pSeriesLP(0,
777 1ULL << (be32_to_cpu(dwp
->window_shift
) - PAGE_SHIFT
), dwp
);
779 pr_warn("%pOF failed to clear tces in window.\n",
782 pr_debug("%pOF successfully cleared tces in window.\n",
785 ret
= rtas_call(ddw_avail
[2], 1, 1, NULL
, liobn
);
787 pr_warn("%pOF: failed to remove direct window: rtas returned "
788 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
789 np
, ret
, ddw_avail
[2], liobn
);
791 pr_debug("%pOF: successfully removed direct window: rtas returned "
792 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
793 np
, ret
, ddw_avail
[2], liobn
);
797 ret
= of_remove_property(np
, win64
);
799 pr_warn("%pOF: failed to remove direct window property: %d\n",
803 static u64
find_existing_ddw(struct device_node
*pdn
)
805 struct direct_window
*window
;
806 const struct dynamic_dma_window_prop
*direct64
;
809 spin_lock(&direct_window_list_lock
);
810 /* check if we already created a window and dupe that config if so */
811 list_for_each_entry(window
, &direct_window_list
, list
) {
812 if (window
->device
== pdn
) {
813 direct64
= window
->prop
;
814 dma_addr
= be64_to_cpu(direct64
->dma_base
);
818 spin_unlock(&direct_window_list_lock
);
823 static int find_existing_ddw_windows(void)
826 struct device_node
*pdn
;
827 struct direct_window
*window
;
828 const struct dynamic_dma_window_prop
*direct64
;
830 if (!firmware_has_feature(FW_FEATURE_LPAR
))
833 for_each_node_with_property(pdn
, DIRECT64_PROPNAME
) {
834 direct64
= of_get_property(pdn
, DIRECT64_PROPNAME
, &len
);
838 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
839 if (!window
|| len
< sizeof(struct dynamic_dma_window_prop
)) {
841 remove_ddw(pdn
, true);
845 window
->device
= pdn
;
846 window
->prop
= direct64
;
847 spin_lock(&direct_window_list_lock
);
848 list_add(&window
->list
, &direct_window_list
);
849 spin_unlock(&direct_window_list_lock
);
854 machine_arch_initcall(pseries
, find_existing_ddw_windows
);
856 static int query_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
857 struct ddw_query_response
*query
)
859 struct device_node
*dn
;
866 * Get the config address and phb buid of the PE window.
867 * Rely on eeh to retrieve this for us.
868 * Retrieve them from the pci device, not the node with the
869 * dma-window property
871 dn
= pci_device_to_OF_node(dev
);
873 buid
= pdn
->phb
->buid
;
874 cfg_addr
= ((pdn
->busno
<< 16) | (pdn
->devfn
<< 8));
876 ret
= rtas_call(ddw_avail
[0], 3, 5, (u32
*)query
,
877 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
));
878 dev_info(&dev
->dev
, "ibm,query-pe-dma-windows(%x) %x %x %x"
879 " returned %d\n", ddw_avail
[0], cfg_addr
, BUID_HI(buid
),
884 static int create_ddw(struct pci_dev
*dev
, const u32
*ddw_avail
,
885 struct ddw_create_response
*create
, int page_shift
,
888 struct device_node
*dn
;
895 * Get the config address and phb buid of the PE window.
896 * Rely on eeh to retrieve this for us.
897 * Retrieve them from the pci device, not the node with the
898 * dma-window property
900 dn
= pci_device_to_OF_node(dev
);
902 buid
= pdn
->phb
->buid
;
903 cfg_addr
= ((pdn
->busno
<< 16) | (pdn
->devfn
<< 8));
906 /* extra outputs are LIOBN and dma-addr (hi, lo) */
907 ret
= rtas_call(ddw_avail
[1], 5, 4, (u32
*)create
,
908 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
),
909 page_shift
, window_shift
);
910 } while (rtas_busy_delay(ret
));
912 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
913 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail
[1],
914 cfg_addr
, BUID_HI(buid
), BUID_LO(buid
), page_shift
,
915 window_shift
, ret
, create
->liobn
, create
->addr_hi
, create
->addr_lo
);
920 struct failed_ddw_pdn
{
921 struct device_node
*pdn
;
922 struct list_head list
;
925 static LIST_HEAD(failed_ddw_pdn_list
);
927 static phys_addr_t
ddw_memory_hotplug_max(void)
929 phys_addr_t max_addr
= memory_hotplug_max();
930 struct device_node
*memory
;
932 for_each_node_by_type(memory
, "memory") {
933 unsigned long start
, size
;
934 int n_mem_addr_cells
, n_mem_size_cells
, len
;
935 const __be32
*memcell_buf
;
937 memcell_buf
= of_get_property(memory
, "reg", &len
);
938 if (!memcell_buf
|| len
<= 0)
941 n_mem_addr_cells
= of_n_addr_cells(memory
);
942 n_mem_size_cells
= of_n_size_cells(memory
);
944 start
= of_read_number(memcell_buf
, n_mem_addr_cells
);
945 memcell_buf
+= n_mem_addr_cells
;
946 size
= of_read_number(memcell_buf
, n_mem_size_cells
);
947 memcell_buf
+= n_mem_size_cells
;
949 max_addr
= max_t(phys_addr_t
, max_addr
, start
+ size
);
956 * If the PE supports dynamic dma windows, and there is space for a table
957 * that can map all pages in a linear offset, then setup such a table,
958 * and record the dma-offset in the struct device.
960 * dev: the pci device we are checking
961 * pdn: the parent pe node with the ibm,dma_window property
962 * Future: also check if we can remap the base window for our base page size
964 * returns the dma offset for use by the direct mapped DMA code.
966 static u64
enable_ddw(struct pci_dev
*dev
, struct device_node
*pdn
)
969 struct ddw_query_response query
;
970 struct ddw_create_response create
;
972 u64 dma_addr
, max_addr
;
973 struct device_node
*dn
;
975 struct direct_window
*window
;
976 struct property
*win64
;
977 struct dynamic_dma_window_prop
*ddwprop
;
978 struct failed_ddw_pdn
*fpdn
;
980 mutex_lock(&direct_window_init_mutex
);
982 dma_addr
= find_existing_ddw(pdn
);
987 * If we already went through this for a previous function of
988 * the same device and failed, we don't want to muck with the
989 * DMA window again, as it will race with in-flight operations
990 * and can lead to EEHs. The above mutex protects access to the
993 list_for_each_entry(fpdn
, &failed_ddw_pdn_list
, list
) {
994 if (fpdn
->pdn
== pdn
)
999 * the ibm,ddw-applicable property holds the tokens for:
1000 * ibm,query-pe-dma-window
1001 * ibm,create-pe-dma-window
1002 * ibm,remove-pe-dma-window
1003 * for the given node in that order.
1004 * the property is actually in the parent, not the PE
1006 ret
= of_property_read_u32_array(pdn
, "ibm,ddw-applicable",
1012 * Query if there is a second window of size to map the
1013 * whole partition. Query returns number of windows, largest
1014 * block assigned to PE (partition endpoint), and two bitmasks
1015 * of page sizes: supported and supported for migrate-dma.
1017 dn
= pci_device_to_OF_node(dev
);
1018 ret
= query_ddw(dev
, ddw_avail
, &query
);
1022 if (query
.windows_available
== 0) {
1024 * no additional windows are available for this device.
1025 * We might be able to reallocate the existing window,
1026 * trading in for a larger page size.
1028 dev_dbg(&dev
->dev
, "no free dynamic windows");
1031 if (query
.page_size
& 4) {
1032 page_shift
= 24; /* 16MB */
1033 } else if (query
.page_size
& 2) {
1034 page_shift
= 16; /* 64kB */
1035 } else if (query
.page_size
& 1) {
1036 page_shift
= 12; /* 4kB */
1038 dev_dbg(&dev
->dev
, "no supported direct page size in mask %x",
1042 /* verify the window * number of ptes will map the partition */
1043 /* check largest block * page size > max memory hotplug addr */
1044 max_addr
= ddw_memory_hotplug_max();
1045 if (query
.largest_available_block
< (max_addr
>> page_shift
)) {
1046 dev_dbg(&dev
->dev
, "can't map partition max 0x%llx with %u "
1047 "%llu-sized pages\n", max_addr
, query
.largest_available_block
,
1048 1ULL << page_shift
);
1051 len
= order_base_2(max_addr
);
1052 win64
= kzalloc(sizeof(struct property
), GFP_KERNEL
);
1055 "couldn't allocate property for 64bit dma window\n");
1058 win64
->name
= kstrdup(DIRECT64_PROPNAME
, GFP_KERNEL
);
1059 win64
->value
= ddwprop
= kmalloc(sizeof(*ddwprop
), GFP_KERNEL
);
1060 win64
->length
= sizeof(*ddwprop
);
1061 if (!win64
->name
|| !win64
->value
) {
1063 "couldn't allocate property name and value\n");
1067 ret
= create_ddw(dev
, ddw_avail
, &create
, page_shift
, len
);
1071 ddwprop
->liobn
= cpu_to_be32(create
.liobn
);
1072 ddwprop
->dma_base
= cpu_to_be64(((u64
)create
.addr_hi
<< 32) |
1074 ddwprop
->tce_shift
= cpu_to_be32(page_shift
);
1075 ddwprop
->window_shift
= cpu_to_be32(len
);
1077 dev_dbg(&dev
->dev
, "created tce table LIOBN 0x%x for %pOF\n",
1080 window
= kzalloc(sizeof(*window
), GFP_KERNEL
);
1082 goto out_clear_window
;
1084 ret
= walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT
,
1085 win64
->value
, tce_setrange_multi_pSeriesLP_walk
);
1087 dev_info(&dev
->dev
, "failed to map direct window for %pOF: %d\n",
1089 goto out_free_window
;
1092 ret
= of_add_property(pdn
, win64
);
1094 dev_err(&dev
->dev
, "unable to add dma window property for %pOF: %d",
1096 goto out_free_window
;
1099 window
->device
= pdn
;
1100 window
->prop
= ddwprop
;
1101 spin_lock(&direct_window_list_lock
);
1102 list_add(&window
->list
, &direct_window_list
);
1103 spin_unlock(&direct_window_list_lock
);
1105 dma_addr
= be64_to_cpu(ddwprop
->dma_base
);
1112 remove_ddw(pdn
, true);
1116 kfree(win64
->value
);
1121 fpdn
= kzalloc(sizeof(*fpdn
), GFP_KERNEL
);
1125 list_add(&fpdn
->list
, &failed_ddw_pdn_list
);
1128 mutex_unlock(&direct_window_init_mutex
);
1132 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev
*dev
)
1134 struct device_node
*pdn
, *dn
;
1135 struct iommu_table
*tbl
;
1136 const __be32
*dma_window
= NULL
;
1139 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev
));
1141 /* dev setup for LPAR is a little tricky, since the device tree might
1142 * contain the dma-window properties per-device and not necessarily
1143 * for the bus. So we need to search upwards in the tree until we
1144 * either hit a dma-window property, OR find a parent with a table
1145 * already allocated.
1147 dn
= pci_device_to_OF_node(dev
);
1148 pr_debug(" node is %pOF\n", dn
);
1150 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->table_group
;
1151 pdn
= pdn
->parent
) {
1152 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1157 if (!pdn
|| !PCI_DN(pdn
)) {
1158 printk(KERN_WARNING
"pci_dma_dev_setup_pSeriesLP: "
1159 "no DMA window found for pci dev=%s dn=%pOF\n",
1163 pr_debug(" parent is %pOF\n", pdn
);
1166 if (!pci
->table_group
) {
1167 pci
->table_group
= iommu_pseries_alloc_group(pci
->phb
->node
);
1168 tbl
= pci
->table_group
->tables
[0];
1169 iommu_table_setparms_lpar(pci
->phb
, pdn
, tbl
,
1170 pci
->table_group
, dma_window
);
1171 tbl
->it_ops
= &iommu_table_lpar_multi_ops
;
1172 iommu_init_table(tbl
, pci
->phb
->node
);
1173 iommu_register_group(pci
->table_group
,
1174 pci_domain_nr(pci
->phb
->bus
), 0);
1175 pr_debug(" created table: %p\n", pci
->table_group
);
1177 pr_debug(" found DMA window, table: %p\n", pci
->table_group
);
1180 set_iommu_table_base(&dev
->dev
, pci
->table_group
->tables
[0]);
1181 iommu_add_device(pci
->table_group
, &dev
->dev
);
1184 static bool iommu_bypass_supported_pSeriesLP(struct pci_dev
*pdev
, u64 dma_mask
)
1186 struct device_node
*dn
= pci_device_to_OF_node(pdev
), *pdn
;
1187 const __be32
*dma_window
= NULL
;
1189 /* only attempt to use a new window if 64-bit DMA is requested */
1190 if (dma_mask
< DMA_BIT_MASK(64))
1193 dev_dbg(&pdev
->dev
, "node is %pOF\n", dn
);
1196 * the device tree might contain the dma-window properties
1197 * per-device and not necessarily for the bus. So we need to
1198 * search upwards in the tree until we either hit a dma-window
1199 * property, OR find a parent with a table already allocated.
1201 for (pdn
= dn
; pdn
&& PCI_DN(pdn
) && !PCI_DN(pdn
)->table_group
;
1202 pdn
= pdn
->parent
) {
1203 dma_window
= of_get_property(pdn
, "ibm,dma-window", NULL
);
1208 if (pdn
&& PCI_DN(pdn
)) {
1209 pdev
->dev
.archdata
.dma_offset
= enable_ddw(pdev
, pdn
);
1210 if (pdev
->dev
.archdata
.dma_offset
)
1217 static int iommu_mem_notifier(struct notifier_block
*nb
, unsigned long action
,
1220 struct direct_window
*window
;
1221 struct memory_notify
*arg
= data
;
1225 case MEM_GOING_ONLINE
:
1226 spin_lock(&direct_window_list_lock
);
1227 list_for_each_entry(window
, &direct_window_list
, list
) {
1228 ret
|= tce_setrange_multi_pSeriesLP(arg
->start_pfn
,
1229 arg
->nr_pages
, window
->prop
);
1232 spin_unlock(&direct_window_list_lock
);
1234 case MEM_CANCEL_ONLINE
:
1236 spin_lock(&direct_window_list_lock
);
1237 list_for_each_entry(window
, &direct_window_list
, list
) {
1238 ret
|= tce_clearrange_multi_pSeriesLP(arg
->start_pfn
,
1239 arg
->nr_pages
, window
->prop
);
1242 spin_unlock(&direct_window_list_lock
);
1247 if (ret
&& action
!= MEM_CANCEL_ONLINE
)
1253 static struct notifier_block iommu_mem_nb
= {
1254 .notifier_call
= iommu_mem_notifier
,
1257 static int iommu_reconfig_notifier(struct notifier_block
*nb
, unsigned long action
, void *data
)
1259 int err
= NOTIFY_OK
;
1260 struct of_reconfig_data
*rd
= data
;
1261 struct device_node
*np
= rd
->dn
;
1262 struct pci_dn
*pci
= PCI_DN(np
);
1263 struct direct_window
*window
;
1266 case OF_RECONFIG_DETACH_NODE
:
1268 * Removing the property will invoke the reconfig
1269 * notifier again, which causes dead-lock on the
1270 * read-write semaphore of the notifier chain. So
1271 * we have to remove the property when releasing
1274 remove_ddw(np
, false);
1275 if (pci
&& pci
->table_group
)
1276 iommu_pseries_free_group(pci
->table_group
,
1279 spin_lock(&direct_window_list_lock
);
1280 list_for_each_entry(window
, &direct_window_list
, list
) {
1281 if (window
->device
== np
) {
1282 list_del(&window
->list
);
1287 spin_unlock(&direct_window_list_lock
);
1296 static struct notifier_block iommu_reconfig_nb
= {
1297 .notifier_call
= iommu_reconfig_notifier
,
1300 /* These are called very early. */
1301 void iommu_init_early_pSeries(void)
1303 if (of_chosen
&& of_get_property(of_chosen
, "linux,iommu-off", NULL
))
1306 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
1307 pseries_pci_controller_ops
.dma_bus_setup
= pci_dma_bus_setup_pSeriesLP
;
1308 pseries_pci_controller_ops
.dma_dev_setup
= pci_dma_dev_setup_pSeriesLP
;
1310 pseries_pci_controller_ops
.iommu_bypass_supported
=
1311 iommu_bypass_supported_pSeriesLP
;
1313 pseries_pci_controller_ops
.dma_bus_setup
= pci_dma_bus_setup_pSeries
;
1314 pseries_pci_controller_ops
.dma_dev_setup
= pci_dma_dev_setup_pSeries
;
1318 of_reconfig_notifier_register(&iommu_reconfig_nb
);
1319 register_memory_notifier(&iommu_mem_nb
);
1321 set_pci_dma_ops(&dma_iommu_ops
);
1324 static int __init
disable_multitce(char *str
)
1326 if (strcmp(str
, "off") == 0 &&
1327 firmware_has_feature(FW_FEATURE_LPAR
) &&
1328 firmware_has_feature(FW_FEATURE_MULTITCE
)) {
1329 printk(KERN_INFO
"Disabling MULTITCE firmware feature\n");
1330 powerpc_firmware_features
&= ~FW_FEATURE_MULTITCE
;
1335 __setup("multitce=", disable_multitce
);
1337 static int tce_iommu_bus_notifier(struct notifier_block
*nb
,
1338 unsigned long action
, void *data
)
1340 struct device
*dev
= data
;
1343 case BUS_NOTIFY_DEL_DEVICE
:
1344 iommu_del_device(dev
);
1351 static struct notifier_block tce_iommu_bus_nb
= {
1352 .notifier_call
= tce_iommu_bus_notifier
,
1355 static int __init
tce_iommu_bus_notifier_init(void)
1357 bus_register_notifier(&pci_bus_type
, &tce_iommu_bus_nb
);
1360 machine_subsys_initcall_sync(pseries
, tce_iommu_bus_notifier_init
);