2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/crash_dump.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h>
22 #include <linux/irq.h>
24 #include <linux/msi.h>
25 #include <linux/memblock.h>
27 #include <asm/sections.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/machdep.h>
32 #include <asm/msi_bitmap.h>
33 #include <asm/ppc-pci.h>
35 #include <asm/iommu.h>
38 #include <asm/debug.h>
43 #define define_pe_printk_level(func, kern_level) \
44 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
46 struct va_format vaf; \
51 va_start(args, fmt); \
57 strlcpy(pfix, dev_name(&pe->pdev->dev), \
60 sprintf(pfix, "%04x:%02x ", \
61 pci_domain_nr(pe->pbus), \
63 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
64 pfix, pe->pe_number, &vaf); \
71 define_pe_printk_level(pe_err, KERN_ERR);
72 define_pe_printk_level(pe_warn
, KERN_WARNING
);
73 define_pe_printk_level(pe_info
, KERN_INFO
);
76 * stdcix is only supposed to be used in hypervisor real mode as per
77 * the architecture spec
79 static inline void __raw_rm_writeq(u64 val
, volatile void __iomem
*paddr
)
81 __asm__
__volatile__("stdcix %0,0,%1"
82 : : "r" (val
), "r" (paddr
) : "memory");
85 static int pnv_ioda_alloc_pe(struct pnv_phb
*phb
)
90 pe
= find_next_zero_bit(phb
->ioda
.pe_alloc
,
91 phb
->ioda
.total_pe
, 0);
92 if (pe
>= phb
->ioda
.total_pe
)
93 return IODA_INVALID_PE
;
94 } while(test_and_set_bit(pe
, phb
->ioda
.pe_alloc
));
96 phb
->ioda
.pe_array
[pe
].phb
= phb
;
97 phb
->ioda
.pe_array
[pe
].pe_number
= pe
;
101 static void pnv_ioda_free_pe(struct pnv_phb
*phb
, int pe
)
103 WARN_ON(phb
->ioda
.pe_array
[pe
].pdev
);
105 memset(&phb
->ioda
.pe_array
[pe
], 0, sizeof(struct pnv_ioda_pe
));
106 clear_bit(pe
, phb
->ioda
.pe_alloc
);
109 /* Currently those 2 are only used when MSIs are enabled, this will change
110 * but in the meantime, we need to protect them to avoid warnings
112 #ifdef CONFIG_PCI_MSI
113 static struct pnv_ioda_pe
*pnv_ioda_get_pe(struct pci_dev
*dev
)
115 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
116 struct pnv_phb
*phb
= hose
->private_data
;
117 struct pci_dn
*pdn
= pci_get_pdn(dev
);
121 if (pdn
->pe_number
== IODA_INVALID_PE
)
123 return &phb
->ioda
.pe_array
[pdn
->pe_number
];
125 #endif /* CONFIG_PCI_MSI */
127 static int pnv_ioda_configure_pe(struct pnv_phb
*phb
, struct pnv_ioda_pe
*pe
)
129 struct pci_dev
*parent
;
130 uint8_t bcomp
, dcomp
, fcomp
;
131 long rc
, rid_end
, rid
;
133 /* Bus validation ? */
137 dcomp
= OPAL_IGNORE_RID_DEVICE_NUMBER
;
138 fcomp
= OPAL_IGNORE_RID_FUNCTION_NUMBER
;
139 parent
= pe
->pbus
->self
;
140 if (pe
->flags
& PNV_IODA_PE_BUS_ALL
)
141 count
= pe
->pbus
->busn_res
.end
- pe
->pbus
->busn_res
.start
+ 1;
146 case 1: bcomp
= OpalPciBusAll
; break;
147 case 2: bcomp
= OpalPciBus7Bits
; break;
148 case 4: bcomp
= OpalPciBus6Bits
; break;
149 case 8: bcomp
= OpalPciBus5Bits
; break;
150 case 16: bcomp
= OpalPciBus4Bits
; break;
151 case 32: bcomp
= OpalPciBus3Bits
; break;
153 pr_err("%s: Number of subordinate busses %d"
155 pci_name(pe
->pbus
->self
), count
);
156 /* Do an exact match only */
157 bcomp
= OpalPciBusAll
;
159 rid_end
= pe
->rid
+ (count
<< 8);
161 parent
= pe
->pdev
->bus
->self
;
162 bcomp
= OpalPciBusAll
;
163 dcomp
= OPAL_COMPARE_RID_DEVICE_NUMBER
;
164 fcomp
= OPAL_COMPARE_RID_FUNCTION_NUMBER
;
165 rid_end
= pe
->rid
+ 1;
169 * Associate PE in PELT. We need add the PE into the
170 * corresponding PELT-V as well. Otherwise, the error
171 * originated from the PE might contribute to other
174 rc
= opal_pci_set_pe(phb
->opal_id
, pe
->pe_number
, pe
->rid
,
175 bcomp
, dcomp
, fcomp
, OPAL_MAP_PE
);
177 pe_err(pe
, "OPAL error %ld trying to setup PELT table\n", rc
);
181 rc
= opal_pci_set_peltv(phb
->opal_id
, pe
->pe_number
,
182 pe
->pe_number
, OPAL_ADD_PE_TO_DOMAIN
);
184 pe_warn(pe
, "OPAL error %d adding self to PELTV\n", rc
);
185 opal_pci_eeh_freeze_clear(phb
->opal_id
, pe
->pe_number
,
186 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL
);
188 /* Add to all parents PELT-V */
190 struct pci_dn
*pdn
= pci_get_pdn(parent
);
191 if (pdn
&& pdn
->pe_number
!= IODA_INVALID_PE
) {
192 rc
= opal_pci_set_peltv(phb
->opal_id
, pdn
->pe_number
,
193 pe
->pe_number
, OPAL_ADD_PE_TO_DOMAIN
);
194 /* XXX What to do in case of error ? */
196 parent
= parent
->bus
->self
;
198 /* Setup reverse map */
199 for (rid
= pe
->rid
; rid
< rid_end
; rid
++)
200 phb
->ioda
.pe_rmap
[rid
] = pe
->pe_number
;
202 /* Setup one MVTs on IODA1 */
203 if (phb
->type
== PNV_PHB_IODA1
) {
204 pe
->mve_number
= pe
->pe_number
;
205 rc
= opal_pci_set_mve(phb
->opal_id
, pe
->mve_number
,
208 pe_err(pe
, "OPAL error %ld setting up MVE %d\n",
212 rc
= opal_pci_set_mve_enable(phb
->opal_id
,
213 pe
->mve_number
, OPAL_ENABLE_MVE
);
215 pe_err(pe
, "OPAL error %ld enabling MVE %d\n",
220 } else if (phb
->type
== PNV_PHB_IODA2
)
226 static void pnv_ioda_link_pe_by_weight(struct pnv_phb
*phb
,
227 struct pnv_ioda_pe
*pe
)
229 struct pnv_ioda_pe
*lpe
;
231 list_for_each_entry(lpe
, &phb
->ioda
.pe_dma_list
, dma_link
) {
232 if (lpe
->dma_weight
< pe
->dma_weight
) {
233 list_add_tail(&pe
->dma_link
, &lpe
->dma_link
);
237 list_add_tail(&pe
->dma_link
, &phb
->ioda
.pe_dma_list
);
240 static unsigned int pnv_ioda_dma_weight(struct pci_dev
*dev
)
242 /* This is quite simplistic. The "base" weight of a device
243 * is 10. 0 means no DMA is to be accounted for it.
246 /* If it's a bridge, no DMA */
247 if (dev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
)
250 /* Reduce the weight of slow USB controllers */
251 if (dev
->class == PCI_CLASS_SERIAL_USB_UHCI
||
252 dev
->class == PCI_CLASS_SERIAL_USB_OHCI
||
253 dev
->class == PCI_CLASS_SERIAL_USB_EHCI
)
256 /* Increase the weight of RAID (includes Obsidian) */
257 if ((dev
->class >> 8) == PCI_CLASS_STORAGE_RAID
)
265 static struct pnv_ioda_pe
*pnv_ioda_setup_dev_PE(struct pci_dev
*dev
)
267 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
268 struct pnv_phb
*phb
= hose
->private_data
;
269 struct pci_dn
*pdn
= pci_get_pdn(dev
);
270 struct pnv_ioda_pe
*pe
;
274 pr_err("%s: Device tree node not associated properly\n",
278 if (pdn
->pe_number
!= IODA_INVALID_PE
)
281 /* PE#0 has been pre-set */
282 if (dev
->bus
->number
== 0)
285 pe_num
= pnv_ioda_alloc_pe(phb
);
286 if (pe_num
== IODA_INVALID_PE
) {
287 pr_warning("%s: Not enough PE# available, disabling device\n",
292 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
293 * pointer in the PE data structure, both should be destroyed at the
294 * same time. However, this needs to be looked at more closely again
295 * once we actually start removing things (Hotplug, SR-IOV, ...)
297 * At some point we want to remove the PDN completely anyways
299 pe
= &phb
->ioda
.pe_array
[pe_num
];
302 pdn
->pe_number
= pe_num
;
307 pe
->rid
= dev
->bus
->number
<< 8 | pdn
->devfn
;
309 pe_info(pe
, "Associated device to PE\n");
311 if (pnv_ioda_configure_pe(phb
, pe
)) {
312 /* XXX What do we do here ? */
314 pnv_ioda_free_pe(phb
, pe_num
);
315 pdn
->pe_number
= IODA_INVALID_PE
;
321 /* Assign a DMA weight to the device */
322 pe
->dma_weight
= pnv_ioda_dma_weight(dev
);
323 if (pe
->dma_weight
!= 0) {
324 phb
->ioda
.dma_weight
+= pe
->dma_weight
;
325 phb
->ioda
.dma_pe_count
++;
329 pnv_ioda_link_pe_by_weight(phb
, pe
);
333 #endif /* Useful for SRIOV case */
335 static void pnv_ioda_setup_same_PE(struct pci_bus
*bus
, struct pnv_ioda_pe
*pe
)
339 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
340 struct pci_dn
*pdn
= pci_get_pdn(dev
);
343 pr_warn("%s: No device node associated with device !\n",
348 pdn
->pe_number
= pe
->pe_number
;
349 pe
->dma_weight
+= pnv_ioda_dma_weight(dev
);
350 if ((pe
->flags
& PNV_IODA_PE_BUS_ALL
) && dev
->subordinate
)
351 pnv_ioda_setup_same_PE(dev
->subordinate
, pe
);
356 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
357 * single PCI bus. Another one that contains the primary PCI bus and its
358 * subordinate PCI devices and buses. The second type of PE is normally
359 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
361 static void pnv_ioda_setup_bus_PE(struct pci_bus
*bus
, int all
)
363 struct pci_controller
*hose
= pci_bus_to_host(bus
);
364 struct pnv_phb
*phb
= hose
->private_data
;
365 struct pnv_ioda_pe
*pe
;
368 pe_num
= pnv_ioda_alloc_pe(phb
);
369 if (pe_num
== IODA_INVALID_PE
) {
370 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
371 __func__
, pci_domain_nr(bus
), bus
->number
);
375 pe
= &phb
->ioda
.pe_array
[pe_num
];
376 pe
->flags
= (all
? PNV_IODA_PE_BUS_ALL
: PNV_IODA_PE_BUS
);
381 pe
->rid
= bus
->busn_res
.start
<< 8;
385 pe_info(pe
, "Secondary bus %d..%d associated with PE#%d\n",
386 bus
->busn_res
.start
, bus
->busn_res
.end
, pe_num
);
388 pe_info(pe
, "Secondary bus %d associated with PE#%d\n",
389 bus
->busn_res
.start
, pe_num
);
391 if (pnv_ioda_configure_pe(phb
, pe
)) {
392 /* XXX What do we do here ? */
394 pnv_ioda_free_pe(phb
, pe_num
);
399 /* Associate it with all child devices */
400 pnv_ioda_setup_same_PE(bus
, pe
);
402 /* Put PE to the list */
403 list_add_tail(&pe
->list
, &phb
->ioda
.pe_list
);
405 /* Account for one DMA PE if at least one DMA capable device exist
408 if (pe
->dma_weight
!= 0) {
409 phb
->ioda
.dma_weight
+= pe
->dma_weight
;
410 phb
->ioda
.dma_pe_count
++;
414 pnv_ioda_link_pe_by_weight(phb
, pe
);
417 static void pnv_ioda_setup_PEs(struct pci_bus
*bus
)
421 pnv_ioda_setup_bus_PE(bus
, 0);
423 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
424 if (dev
->subordinate
) {
425 if (pci_pcie_type(dev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
426 pnv_ioda_setup_bus_PE(dev
->subordinate
, 1);
428 pnv_ioda_setup_PEs(dev
->subordinate
);
434 * Configure PEs so that the downstream PCI buses and devices
435 * could have their associated PE#. Unfortunately, we didn't
436 * figure out the way to identify the PLX bridge yet. So we
437 * simply put the PCI bus and the subordinate behind the root
438 * port to PE# here. The game rule here is expected to be changed
439 * as soon as we can detected PLX bridge correctly.
441 static void pnv_pci_ioda_setup_PEs(void)
443 struct pci_controller
*hose
, *tmp
;
445 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
) {
446 pnv_ioda_setup_PEs(hose
->bus
);
450 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb
*phb
, struct pci_dev
*pdev
)
452 struct pci_dn
*pdn
= pci_get_pdn(pdev
);
453 struct pnv_ioda_pe
*pe
;
456 * The function can be called while the PE#
457 * hasn't been assigned. Do nothing for the
460 if (!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
)
463 pe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
464 WARN_ON(get_dma_ops(&pdev
->dev
) != &dma_iommu_ops
);
465 set_iommu_table_base(&pdev
->dev
, &pe
->tce32_table
);
468 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb
*phb
,
469 struct pci_dev
*pdev
, u64 dma_mask
)
471 struct pci_dn
*pdn
= pci_get_pdn(pdev
);
472 struct pnv_ioda_pe
*pe
;
476 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
479 pe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
480 if (pe
->tce_bypass_enabled
) {
481 top
= pe
->tce_bypass_base
+ memblock_end_of_DRAM() - 1;
482 bypass
= (dma_mask
>= top
);
486 dev_info(&pdev
->dev
, "Using 64-bit DMA iommu bypass\n");
487 set_dma_ops(&pdev
->dev
, &dma_direct_ops
);
488 set_dma_offset(&pdev
->dev
, pe
->tce_bypass_base
);
490 dev_info(&pdev
->dev
, "Using 32-bit DMA via iommu\n");
491 set_dma_ops(&pdev
->dev
, &dma_iommu_ops
);
492 set_iommu_table_base(&pdev
->dev
, &pe
->tce32_table
);
497 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe
*pe
, struct pci_bus
*bus
)
501 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
502 set_iommu_table_base_and_group(&dev
->dev
, &pe
->tce32_table
);
503 if (dev
->subordinate
)
504 pnv_ioda_setup_bus_dma(pe
, dev
->subordinate
);
508 static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe
*pe
,
509 struct iommu_table
*tbl
,
510 __be64
*startp
, __be64
*endp
, bool rm
)
512 __be64 __iomem
*invalidate
= rm
?
513 (__be64 __iomem
*)pe
->tce_inval_reg_phys
:
514 (__be64 __iomem
*)tbl
->it_index
;
515 unsigned long start
, end
, inc
;
516 const unsigned shift
= tbl
->it_page_shift
;
518 start
= __pa(startp
);
521 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
525 inc
= 128ull << shift
;
526 start
|= tbl
->it_busno
;
527 end
|= tbl
->it_busno
;
528 } else if (tbl
->it_type
& TCE_PCI_SWINV_PAIR
) {
529 /* p7ioc-style invalidation, 2 TCEs per write */
530 start
|= (1ull << 63);
534 /* Default (older HW) */
538 end
|= inc
- 1; /* round up end to be different than start */
540 mb(); /* Ensure above stores are visible */
541 while (start
<= end
) {
543 __raw_rm_writeq(cpu_to_be64(start
), invalidate
);
545 __raw_writeq(cpu_to_be64(start
), invalidate
);
550 * The iommu layer will do another mb() for us on build()
551 * and we don't care on free()
555 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe
*pe
,
556 struct iommu_table
*tbl
,
557 __be64
*startp
, __be64
*endp
, bool rm
)
559 unsigned long start
, end
, inc
;
560 __be64 __iomem
*invalidate
= rm
?
561 (__be64 __iomem
*)pe
->tce_inval_reg_phys
:
562 (__be64 __iomem
*)tbl
->it_index
;
563 const unsigned shift
= tbl
->it_page_shift
;
565 /* We'll invalidate DMA address in PE scope */
566 start
= 0x2ull
<< 60;
567 start
|= (pe
->pe_number
& 0xFF);
570 /* Figure out the start, end and step */
571 inc
= tbl
->it_offset
+ (((u64
)startp
- tbl
->it_base
) / sizeof(u64
));
572 start
|= (inc
<< shift
);
573 inc
= tbl
->it_offset
+ (((u64
)endp
- tbl
->it_base
) / sizeof(u64
));
574 end
|= (inc
<< shift
);
575 inc
= (0x1ull
<< shift
);
578 while (start
<= end
) {
580 __raw_rm_writeq(cpu_to_be64(start
), invalidate
);
582 __raw_writeq(cpu_to_be64(start
), invalidate
);
587 void pnv_pci_ioda_tce_invalidate(struct iommu_table
*tbl
,
588 __be64
*startp
, __be64
*endp
, bool rm
)
590 struct pnv_ioda_pe
*pe
= container_of(tbl
, struct pnv_ioda_pe
,
592 struct pnv_phb
*phb
= pe
->phb
;
594 if (phb
->type
== PNV_PHB_IODA1
)
595 pnv_pci_ioda1_tce_invalidate(pe
, tbl
, startp
, endp
, rm
);
597 pnv_pci_ioda2_tce_invalidate(pe
, tbl
, startp
, endp
, rm
);
600 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb
*phb
,
601 struct pnv_ioda_pe
*pe
, unsigned int base
,
605 struct page
*tce_mem
= NULL
;
606 const __be64
*swinvp
;
607 struct iommu_table
*tbl
;
612 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
613 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
615 /* XXX FIXME: Handle 64-bit only DMA devices */
616 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
617 /* XXX FIXME: Allocate multi-level tables on PHB3 */
619 /* We shouldn't already have a 32-bit DMA associated */
620 if (WARN_ON(pe
->tce32_seg
>= 0))
623 /* Grab a 32-bit TCE table */
624 pe
->tce32_seg
= base
;
625 pe_info(pe
, " Setting up 32-bit TCE table at %08x..%08x\n",
626 (base
<< 28), ((base
+ segs
) << 28) - 1);
628 /* XXX Currently, we allocate one big contiguous table for the
629 * TCEs. We only really need one chunk per 256M of TCE space
630 * (ie per segment) but that's an optimization for later, it
631 * requires some added smarts with our get/put_tce implementation
633 tce_mem
= alloc_pages_node(phb
->hose
->node
, GFP_KERNEL
,
634 get_order(TCE32_TABLE_SIZE
* segs
));
636 pe_err(pe
, " Failed to allocate a 32-bit TCE memory\n");
639 addr
= page_address(tce_mem
);
640 memset(addr
, 0, TCE32_TABLE_SIZE
* segs
);
643 for (i
= 0; i
< segs
; i
++) {
644 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
,
647 __pa(addr
) + TCE32_TABLE_SIZE
* i
,
648 TCE32_TABLE_SIZE
, 0x1000);
650 pe_err(pe
, " Failed to configure 32-bit TCE table,"
656 /* Setup linux iommu table */
657 tbl
= &pe
->tce32_table
;
658 pnv_pci_setup_iommu_table(tbl
, addr
, TCE32_TABLE_SIZE
* segs
,
661 /* OPAL variant of P7IOC SW invalidated TCEs */
662 swinvp
= of_get_property(phb
->hose
->dn
, "ibm,opal-tce-kill", NULL
);
664 /* We need a couple more fields -- an address and a data
665 * to or. Since the bus is only printed out on table free
666 * errors, and on the first pass the data will be a relative
667 * bus number, print that out instead.
669 pe
->tce_inval_reg_phys
= be64_to_cpup(swinvp
);
670 tbl
->it_index
= (unsigned long)ioremap(pe
->tce_inval_reg_phys
,
672 tbl
->it_type
|= (TCE_PCI_SWINV_CREATE
|
676 iommu_init_table(tbl
, phb
->hose
->node
);
677 iommu_register_group(tbl
, phb
->hose
->global_number
, pe
->pe_number
);
680 set_iommu_table_base_and_group(&pe
->pdev
->dev
, tbl
);
682 pnv_ioda_setup_bus_dma(pe
, pe
->pbus
);
686 /* XXX Failure: Try to fallback to 64-bit only ? */
687 if (pe
->tce32_seg
>= 0)
690 __free_pages(tce_mem
, get_order(TCE32_TABLE_SIZE
* segs
));
693 static void pnv_pci_ioda2_set_bypass(struct iommu_table
*tbl
, bool enable
)
695 struct pnv_ioda_pe
*pe
= container_of(tbl
, struct pnv_ioda_pe
,
697 uint16_t window_id
= (pe
->pe_number
<< 1 ) + 1;
700 pe_info(pe
, "%sabling 64-bit DMA bypass\n", enable
? "En" : "Dis");
702 phys_addr_t top
= memblock_end_of_DRAM();
704 top
= roundup_pow_of_two(top
);
705 rc
= opal_pci_map_pe_dma_window_real(pe
->phb
->opal_id
,
711 rc
= opal_pci_map_pe_dma_window_real(pe
->phb
->opal_id
,
718 * We might want to reset the DMA ops of all devices on
719 * this PE. However in theory, that shouldn't be necessary
720 * as this is used for VFIO/KVM pass-through and the device
721 * hasn't yet been returned to its kernel driver
725 pe_err(pe
, "OPAL error %lld configuring bypass window\n", rc
);
727 pe
->tce_bypass_enabled
= enable
;
730 static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb
*phb
,
731 struct pnv_ioda_pe
*pe
)
733 /* TVE #1 is selected by PCI address bit 59 */
734 pe
->tce_bypass_base
= 1ull << 59;
736 /* Install set_bypass callback for VFIO */
737 pe
->tce32_table
.set_bypass
= pnv_pci_ioda2_set_bypass
;
739 /* Enable bypass by default */
740 pnv_pci_ioda2_set_bypass(&pe
->tce32_table
, true);
743 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb
*phb
,
744 struct pnv_ioda_pe
*pe
)
746 struct page
*tce_mem
= NULL
;
748 const __be64
*swinvp
;
749 struct iommu_table
*tbl
;
750 unsigned int tce_table_size
, end
;
753 /* We shouldn't already have a 32-bit DMA associated */
754 if (WARN_ON(pe
->tce32_seg
>= 0))
757 /* The PE will reserve all possible 32-bits space */
759 end
= (1 << ilog2(phb
->ioda
.m32_pci_base
));
760 tce_table_size
= (end
/ 0x1000) * 8;
761 pe_info(pe
, "Setting up 32-bit TCE table at 0..%08x\n",
764 /* Allocate TCE table */
765 tce_mem
= alloc_pages_node(phb
->hose
->node
, GFP_KERNEL
,
766 get_order(tce_table_size
));
768 pe_err(pe
, "Failed to allocate a 32-bit TCE memory\n");
771 addr
= page_address(tce_mem
);
772 memset(addr
, 0, tce_table_size
);
775 * Map TCE table through TVT. The TVE index is the PE number
776 * shifted by 1 bit for 32-bits DMA space.
778 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
, pe
->pe_number
,
779 pe
->pe_number
<< 1, 1, __pa(addr
),
780 tce_table_size
, 0x1000);
782 pe_err(pe
, "Failed to configure 32-bit TCE table,"
787 /* Setup linux iommu table */
788 tbl
= &pe
->tce32_table
;
789 pnv_pci_setup_iommu_table(tbl
, addr
, tce_table_size
, 0);
791 /* OPAL variant of PHB3 invalidated TCEs */
792 swinvp
= of_get_property(phb
->hose
->dn
, "ibm,opal-tce-kill", NULL
);
794 /* We need a couple more fields -- an address and a data
795 * to or. Since the bus is only printed out on table free
796 * errors, and on the first pass the data will be a relative
797 * bus number, print that out instead.
799 pe
->tce_inval_reg_phys
= be64_to_cpup(swinvp
);
800 tbl
->it_index
= (unsigned long)ioremap(pe
->tce_inval_reg_phys
,
802 tbl
->it_type
|= (TCE_PCI_SWINV_CREATE
| TCE_PCI_SWINV_FREE
);
804 iommu_init_table(tbl
, phb
->hose
->node
);
805 iommu_register_group(tbl
, phb
->hose
->global_number
, pe
->pe_number
);
808 set_iommu_table_base_and_group(&pe
->pdev
->dev
, tbl
);
810 pnv_ioda_setup_bus_dma(pe
, pe
->pbus
);
812 /* Also create a bypass window */
813 pnv_pci_ioda2_setup_bypass_pe(phb
, pe
);
816 if (pe
->tce32_seg
>= 0)
819 __free_pages(tce_mem
, get_order(tce_table_size
));
822 static void pnv_ioda_setup_dma(struct pnv_phb
*phb
)
824 struct pci_controller
*hose
= phb
->hose
;
825 unsigned int residual
, remaining
, segs
, tw
, base
;
826 struct pnv_ioda_pe
*pe
;
828 /* If we have more PE# than segments available, hand out one
829 * per PE until we run out and let the rest fail. If not,
830 * then we assign at least one segment per PE, plus more based
831 * on the amount of devices under that PE
833 if (phb
->ioda
.dma_pe_count
> phb
->ioda
.tce32_count
)
836 residual
= phb
->ioda
.tce32_count
-
837 phb
->ioda
.dma_pe_count
;
839 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
840 hose
->global_number
, phb
->ioda
.tce32_count
);
841 pr_info("PCI: %d PE# for a total weight of %d\n",
842 phb
->ioda
.dma_pe_count
, phb
->ioda
.dma_weight
);
844 /* Walk our PE list and configure their DMA segments, hand them
845 * out one base segment plus any residual segments based on
848 remaining
= phb
->ioda
.tce32_count
;
849 tw
= phb
->ioda
.dma_weight
;
851 list_for_each_entry(pe
, &phb
->ioda
.pe_dma_list
, dma_link
) {
855 pe_warn(pe
, "No DMA32 resources available\n");
860 segs
+= ((pe
->dma_weight
* residual
) + (tw
/ 2)) / tw
;
861 if (segs
> remaining
)
866 * For IODA2 compliant PHB3, we needn't care about the weight.
867 * The all available 32-bits DMA space will be assigned to
870 if (phb
->type
== PNV_PHB_IODA1
) {
871 pe_info(pe
, "DMA weight %d, assigned %d DMA32 segments\n",
872 pe
->dma_weight
, segs
);
873 pnv_pci_ioda_setup_dma_pe(phb
, pe
, base
, segs
);
875 pe_info(pe
, "Assign DMA32 space\n");
877 pnv_pci_ioda2_setup_dma_pe(phb
, pe
);
885 #ifdef CONFIG_PCI_MSI
886 static void pnv_ioda2_msi_eoi(struct irq_data
*d
)
888 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
889 struct irq_chip
*chip
= irq_data_get_irq_chip(d
);
890 struct pnv_phb
*phb
= container_of(chip
, struct pnv_phb
,
894 rc
= opal_pci_msi_eoi(phb
->opal_id
, hw_irq
);
900 static int pnv_pci_ioda_msi_setup(struct pnv_phb
*phb
, struct pci_dev
*dev
,
901 unsigned int hwirq
, unsigned int virq
,
902 unsigned int is_64
, struct msi_msg
*msg
)
904 struct pnv_ioda_pe
*pe
= pnv_ioda_get_pe(dev
);
905 struct pci_dn
*pdn
= pci_get_pdn(dev
);
906 struct irq_data
*idata
;
907 struct irq_chip
*ichip
;
908 unsigned int xive_num
= hwirq
- phb
->msi_base
;
912 /* No PE assigned ? bail out ... no MSI for you ! */
916 /* Check if we have an MVE */
917 if (pe
->mve_number
< 0)
920 /* Force 32-bit MSI on some broken devices */
921 if (pdn
&& pdn
->force_32bit_msi
)
924 /* Assign XIVE to PE */
925 rc
= opal_pci_set_xive_pe(phb
->opal_id
, pe
->pe_number
, xive_num
);
927 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
928 pci_name(dev
), rc
, xive_num
);
935 rc
= opal_get_msi_64(phb
->opal_id
, pe
->mve_number
, xive_num
, 1,
938 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
942 msg
->address_hi
= be64_to_cpu(addr64
) >> 32;
943 msg
->address_lo
= be64_to_cpu(addr64
) & 0xfffffffful
;
947 rc
= opal_get_msi_32(phb
->opal_id
, pe
->mve_number
, xive_num
, 1,
950 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
955 msg
->address_lo
= be32_to_cpu(addr32
);
957 msg
->data
= be32_to_cpu(data
);
960 * Change the IRQ chip for the MSI interrupts on PHB3.
961 * The corresponding IRQ chip should be populated for
964 if (phb
->type
== PNV_PHB_IODA2
) {
965 if (!phb
->ioda
.irq_chip_init
) {
966 idata
= irq_get_irq_data(virq
);
967 ichip
= irq_data_get_irq_chip(idata
);
968 phb
->ioda
.irq_chip_init
= 1;
969 phb
->ioda
.irq_chip
= *ichip
;
970 phb
->ioda
.irq_chip
.irq_eoi
= pnv_ioda2_msi_eoi
;
973 irq_set_chip(virq
, &phb
->ioda
.irq_chip
);
976 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
977 " address=%x_%08x data=%x PE# %d\n",
978 pci_name(dev
), is_64
? "64" : "32", hwirq
, xive_num
,
979 msg
->address_hi
, msg
->address_lo
, data
, pe
->pe_number
);
984 static void pnv_pci_init_ioda_msis(struct pnv_phb
*phb
)
987 const __be32
*prop
= of_get_property(phb
->hose
->dn
,
988 "ibm,opal-msi-ranges", NULL
);
991 prop
= of_get_property(phb
->hose
->dn
, "msi-ranges", NULL
);
996 phb
->msi_base
= be32_to_cpup(prop
);
997 count
= be32_to_cpup(prop
+ 1);
998 if (msi_bitmap_alloc(&phb
->msi_bmp
, count
, phb
->hose
->dn
)) {
999 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
1000 phb
->hose
->global_number
);
1004 phb
->msi_setup
= pnv_pci_ioda_msi_setup
;
1005 phb
->msi32_support
= 1;
1006 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
1007 count
, phb
->msi_base
);
1010 static void pnv_pci_init_ioda_msis(struct pnv_phb
*phb
) { }
1011 #endif /* CONFIG_PCI_MSI */
1014 * This function is supposed to be called on basis of PE from top
1015 * to bottom style. So the the I/O or MMIO segment assigned to
1016 * parent PE could be overrided by its child PEs if necessary.
1018 static void pnv_ioda_setup_pe_seg(struct pci_controller
*hose
,
1019 struct pnv_ioda_pe
*pe
)
1021 struct pnv_phb
*phb
= hose
->private_data
;
1022 struct pci_bus_region region
;
1023 struct resource
*res
;
1028 * NOTE: We only care PCI bus based PE for now. For PCI
1029 * device based PE, for example SRIOV sensitive VF should
1030 * be figured out later.
1032 BUG_ON(!(pe
->flags
& (PNV_IODA_PE_BUS
| PNV_IODA_PE_BUS_ALL
)));
1034 pci_bus_for_each_resource(pe
->pbus
, res
, i
) {
1035 if (!res
|| !res
->flags
||
1036 res
->start
> res
->end
)
1039 if (res
->flags
& IORESOURCE_IO
) {
1040 region
.start
= res
->start
- phb
->ioda
.io_pci_base
;
1041 region
.end
= res
->end
- phb
->ioda
.io_pci_base
;
1042 index
= region
.start
/ phb
->ioda
.io_segsize
;
1044 while (index
< phb
->ioda
.total_pe
&&
1045 region
.start
<= region
.end
) {
1046 phb
->ioda
.io_segmap
[index
] = pe
->pe_number
;
1047 rc
= opal_pci_map_pe_mmio_window(phb
->opal_id
,
1048 pe
->pe_number
, OPAL_IO_WINDOW_TYPE
, 0, index
);
1049 if (rc
!= OPAL_SUCCESS
) {
1050 pr_err("%s: OPAL error %d when mapping IO "
1051 "segment #%d to PE#%d\n",
1052 __func__
, rc
, index
, pe
->pe_number
);
1056 region
.start
+= phb
->ioda
.io_segsize
;
1059 } else if (res
->flags
& IORESOURCE_MEM
) {
1060 /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
1061 * harden that algorithm when we start supporting M64
1063 region
.start
= res
->start
-
1064 hose
->mem_offset
[0] -
1065 phb
->ioda
.m32_pci_base
;
1066 region
.end
= res
->end
-
1067 hose
->mem_offset
[0] -
1068 phb
->ioda
.m32_pci_base
;
1069 index
= region
.start
/ phb
->ioda
.m32_segsize
;
1071 while (index
< phb
->ioda
.total_pe
&&
1072 region
.start
<= region
.end
) {
1073 phb
->ioda
.m32_segmap
[index
] = pe
->pe_number
;
1074 rc
= opal_pci_map_pe_mmio_window(phb
->opal_id
,
1075 pe
->pe_number
, OPAL_M32_WINDOW_TYPE
, 0, index
);
1076 if (rc
!= OPAL_SUCCESS
) {
1077 pr_err("%s: OPAL error %d when mapping M32 "
1078 "segment#%d to PE#%d",
1079 __func__
, rc
, index
, pe
->pe_number
);
1083 region
.start
+= phb
->ioda
.m32_segsize
;
1090 static void pnv_pci_ioda_setup_seg(void)
1092 struct pci_controller
*tmp
, *hose
;
1093 struct pnv_phb
*phb
;
1094 struct pnv_ioda_pe
*pe
;
1096 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
) {
1097 phb
= hose
->private_data
;
1098 list_for_each_entry(pe
, &phb
->ioda
.pe_list
, list
) {
1099 pnv_ioda_setup_pe_seg(hose
, pe
);
1104 static void pnv_pci_ioda_setup_DMA(void)
1106 struct pci_controller
*hose
, *tmp
;
1107 struct pnv_phb
*phb
;
1109 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
) {
1110 pnv_ioda_setup_dma(hose
->private_data
);
1112 /* Mark the PHB initialization done */
1113 phb
= hose
->private_data
;
1114 phb
->initialized
= 1;
1118 static void pnv_pci_ioda_create_dbgfs(void)
1120 #ifdef CONFIG_DEBUG_FS
1121 struct pci_controller
*hose
, *tmp
;
1122 struct pnv_phb
*phb
;
1125 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
) {
1126 phb
= hose
->private_data
;
1128 sprintf(name
, "PCI%04x", hose
->global_number
);
1129 phb
->dbgfs
= debugfs_create_dir(name
, powerpc_debugfs_root
);
1131 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
1132 __func__
, hose
->global_number
);
1134 #endif /* CONFIG_DEBUG_FS */
1137 static void pnv_pci_ioda_fixup(void)
1139 pnv_pci_ioda_setup_PEs();
1140 pnv_pci_ioda_setup_seg();
1141 pnv_pci_ioda_setup_DMA();
1143 pnv_pci_ioda_create_dbgfs();
1146 eeh_probe_mode_set(EEH_PROBE_MODE_DEV
);
1147 eeh_addr_cache_build();
1153 * Returns the alignment for I/O or memory windows for P2P
1154 * bridges. That actually depends on how PEs are segmented.
1155 * For now, we return I/O or M32 segment size for PE sensitive
1156 * P2P bridges. Otherwise, the default values (4KiB for I/O,
1157 * 1MiB for memory) will be returned.
1159 * The current PCI bus might be put into one PE, which was
1160 * create against the parent PCI bridge. For that case, we
1161 * needn't enlarge the alignment so that we can save some
1164 static resource_size_t
pnv_pci_window_alignment(struct pci_bus
*bus
,
1167 struct pci_dev
*bridge
;
1168 struct pci_controller
*hose
= pci_bus_to_host(bus
);
1169 struct pnv_phb
*phb
= hose
->private_data
;
1170 int num_pci_bridges
= 0;
1174 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
) {
1176 if (num_pci_bridges
>= 2)
1180 bridge
= bridge
->bus
->self
;
1183 /* We need support prefetchable memory window later */
1184 if (type
& IORESOURCE_MEM
)
1185 return phb
->ioda
.m32_segsize
;
1187 return phb
->ioda
.io_segsize
;
1190 /* Prevent enabling devices for which we couldn't properly
1193 static int pnv_pci_enable_device_hook(struct pci_dev
*dev
)
1195 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
1196 struct pnv_phb
*phb
= hose
->private_data
;
1199 /* The function is probably called while the PEs have
1200 * not be created yet. For example, resource reassignment
1201 * during PCI probe period. We just skip the check if
1204 if (!phb
->initialized
)
1207 pdn
= pci_get_pdn(dev
);
1208 if (!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
)
1214 static u32
pnv_ioda_bdfn_to_pe(struct pnv_phb
*phb
, struct pci_bus
*bus
,
1217 return phb
->ioda
.pe_rmap
[(bus
->number
<< 8) | devfn
];
1220 static void pnv_pci_ioda_shutdown(struct pnv_phb
*phb
)
1222 opal_pci_reset(phb
->opal_id
, OPAL_PCI_IODA_TABLE_RESET
,
1226 void __init
pnv_pci_init_ioda_phb(struct device_node
*np
,
1227 u64 hub_id
, int ioda_type
)
1229 struct pci_controller
*hose
;
1230 struct pnv_phb
*phb
;
1231 unsigned long size
, m32map_off
, pemap_off
, iomap_off
= 0;
1232 const __be64
*prop64
;
1233 const __be32
*prop32
;
1239 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type
, np
->full_name
);
1241 prop64
= of_get_property(np
, "ibm,opal-phbid", NULL
);
1243 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1246 phb_id
= be64_to_cpup(prop64
);
1247 pr_debug(" PHB-ID : 0x%016llx\n", phb_id
);
1249 phb
= alloc_bootmem(sizeof(struct pnv_phb
));
1251 pr_err(" Out of memory !\n");
1255 /* Allocate PCI controller */
1256 memset(phb
, 0, sizeof(struct pnv_phb
));
1257 phb
->hose
= hose
= pcibios_alloc_controller(np
);
1259 pr_err(" Can't allocate PCI controller for %s\n",
1261 free_bootmem((unsigned long)phb
, sizeof(struct pnv_phb
));
1265 spin_lock_init(&phb
->lock
);
1266 prop32
= of_get_property(np
, "bus-range", &len
);
1267 if (prop32
&& len
== 8) {
1268 hose
->first_busno
= be32_to_cpu(prop32
[0]);
1269 hose
->last_busno
= be32_to_cpu(prop32
[1]);
1271 pr_warn(" Broken <bus-range> on %s\n", np
->full_name
);
1272 hose
->first_busno
= 0;
1273 hose
->last_busno
= 0xff;
1275 hose
->private_data
= phb
;
1276 phb
->hub_id
= hub_id
;
1277 phb
->opal_id
= phb_id
;
1278 phb
->type
= ioda_type
;
1280 /* Detect specific models for error handling */
1281 if (of_device_is_compatible(np
, "ibm,p7ioc-pciex"))
1282 phb
->model
= PNV_PHB_MODEL_P7IOC
;
1283 else if (of_device_is_compatible(np
, "ibm,power8-pciex"))
1284 phb
->model
= PNV_PHB_MODEL_PHB3
;
1286 phb
->model
= PNV_PHB_MODEL_UNKNOWN
;
1288 /* Parse 32-bit and IO ranges (if any) */
1289 pci_process_bridge_OF_ranges(hose
, np
, !hose
->global_number
);
1292 phb
->regs
= of_iomap(np
, 0);
1293 if (phb
->regs
== NULL
)
1294 pr_err(" Failed to map registers !\n");
1296 /* Initialize more IODA stuff */
1297 phb
->ioda
.total_pe
= 1;
1298 prop32
= of_get_property(np
, "ibm,opal-num-pes", NULL
);
1300 phb
->ioda
.total_pe
= be32_to_cpup(prop32
);
1301 prop32
= of_get_property(np
, "ibm,opal-reserved-pe", NULL
);
1303 phb
->ioda
.reserved_pe
= be32_to_cpup(prop32
);
1304 phb
->ioda
.m32_size
= resource_size(&hose
->mem_resources
[0]);
1305 /* FW Has already off top 64k of M32 space (MSI space) */
1306 phb
->ioda
.m32_size
+= 0x10000;
1308 phb
->ioda
.m32_segsize
= phb
->ioda
.m32_size
/ phb
->ioda
.total_pe
;
1309 phb
->ioda
.m32_pci_base
= hose
->mem_resources
[0].start
- hose
->mem_offset
[0];
1310 phb
->ioda
.io_size
= hose
->pci_io_size
;
1311 phb
->ioda
.io_segsize
= phb
->ioda
.io_size
/ phb
->ioda
.total_pe
;
1312 phb
->ioda
.io_pci_base
= 0; /* XXX calculate this ? */
1314 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
1315 size
= _ALIGN_UP(phb
->ioda
.total_pe
/ 8, sizeof(unsigned long));
1317 size
+= phb
->ioda
.total_pe
* sizeof(phb
->ioda
.m32_segmap
[0]);
1318 if (phb
->type
== PNV_PHB_IODA1
) {
1320 size
+= phb
->ioda
.total_pe
* sizeof(phb
->ioda
.io_segmap
[0]);
1323 size
+= phb
->ioda
.total_pe
* sizeof(struct pnv_ioda_pe
);
1324 aux
= alloc_bootmem(size
);
1325 memset(aux
, 0, size
);
1326 phb
->ioda
.pe_alloc
= aux
;
1327 phb
->ioda
.m32_segmap
= aux
+ m32map_off
;
1328 if (phb
->type
== PNV_PHB_IODA1
)
1329 phb
->ioda
.io_segmap
= aux
+ iomap_off
;
1330 phb
->ioda
.pe_array
= aux
+ pemap_off
;
1331 set_bit(phb
->ioda
.reserved_pe
, phb
->ioda
.pe_alloc
);
1333 INIT_LIST_HEAD(&phb
->ioda
.pe_dma_list
);
1334 INIT_LIST_HEAD(&phb
->ioda
.pe_list
);
1336 /* Calculate how many 32-bit TCE segments we have */
1337 phb
->ioda
.tce32_count
= phb
->ioda
.m32_pci_base
>> 28;
1339 /* Clear unusable m64 */
1340 hose
->mem_resources
[1].flags
= 0;
1341 hose
->mem_resources
[1].start
= 0;
1342 hose
->mem_resources
[1].end
= 0;
1343 hose
->mem_resources
[2].flags
= 0;
1344 hose
->mem_resources
[2].start
= 0;
1345 hose
->mem_resources
[2].end
= 0;
1347 #if 0 /* We should really do that ... */
1348 rc
= opal_pci_set_phb_mem_window(opal
->phb_id
,
1351 starting_real_address
,
1352 starting_pci_address
,
1356 pr_info(" %d (%d) PE's M32: 0x%x [segment=0x%x]"
1357 " IO: 0x%x [segment=0x%x]\n",
1359 phb
->ioda
.reserved_pe
,
1360 phb
->ioda
.m32_size
, phb
->ioda
.m32_segsize
,
1361 phb
->ioda
.io_size
, phb
->ioda
.io_segsize
);
1363 phb
->hose
->ops
= &pnv_pci_ops
;
1365 phb
->eeh_ops
= &ioda_eeh_ops
;
1368 /* Setup RID -> PE mapping function */
1369 phb
->bdfn_to_pe
= pnv_ioda_bdfn_to_pe
;
1372 phb
->dma_dev_setup
= pnv_pci_ioda_dma_dev_setup
;
1373 phb
->dma_set_mask
= pnv_pci_ioda_dma_set_mask
;
1375 /* Setup shutdown function for kexec */
1376 phb
->shutdown
= pnv_pci_ioda_shutdown
;
1378 /* Setup MSI support */
1379 pnv_pci_init_ioda_msis(phb
);
1382 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1383 * to let the PCI core do resource assignment. It's supposed
1384 * that the PCI core will do correct I/O and MMIO alignment
1385 * for the P2P bridge bars so that each PCI bus (excluding
1386 * the child P2P bridges) can form individual PE.
1388 ppc_md
.pcibios_fixup
= pnv_pci_ioda_fixup
;
1389 ppc_md
.pcibios_enable_device_hook
= pnv_pci_enable_device_hook
;
1390 ppc_md
.pcibios_window_alignment
= pnv_pci_window_alignment
;
1391 ppc_md
.pcibios_reset_secondary_bus
= pnv_pci_reset_secondary_bus
;
1392 pci_add_flags(PCI_REASSIGN_ALL_RSRC
);
1394 /* Reset IODA tables to a clean state */
1395 rc
= opal_pci_reset(phb_id
, OPAL_PCI_IODA_TABLE_RESET
, OPAL_ASSERT_RESET
);
1397 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc
);
1399 /* If we're running in kdump kerenl, the previous kerenl never
1400 * shutdown PCI devices correctly. We already got IODA table
1401 * cleaned out. So we have to issue PHB reset to stop all PCI
1402 * transactions from previous kerenl.
1404 if (is_kdump_kernel()) {
1405 pr_info(" Issue PHB reset ...\n");
1406 ioda_eeh_phb_reset(hose
, EEH_RESET_FUNDAMENTAL
);
1407 ioda_eeh_phb_reset(hose
, OPAL_DEASSERT_RESET
);
1411 void __init
pnv_pci_init_ioda2_phb(struct device_node
*np
)
1413 pnv_pci_init_ioda_phb(np
, 0, PNV_PHB_IODA2
);
1416 void __init
pnv_pci_init_ioda_hub(struct device_node
*np
)
1418 struct device_node
*phbn
;
1419 const __be64
*prop64
;
1422 pr_info("Probing IODA IO-Hub %s\n", np
->full_name
);
1424 prop64
= of_get_property(np
, "ibm,opal-hubid", NULL
);
1426 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1429 hub_id
= be64_to_cpup(prop64
);
1430 pr_devel(" HUB-ID : 0x%016llx\n", hub_id
);
1432 /* Count child PHBs */
1433 for_each_child_of_node(np
, phbn
) {
1434 /* Look for IODA1 PHBs */
1435 if (of_device_is_compatible(phbn
, "ibm,ioda-phb"))
1436 pnv_pci_init_ioda_phb(phbn
, hub_id
, PNV_PHB_IODA1
);