2 * Support PCI/PCIe on PowerNV platforms
4 * Currently supports only P5IOC2
6 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/bootmem.h>
20 #include <linux/irq.h>
22 #include <linux/msi.h>
23 #include <linux/iommu.h>
25 #include <asm/sections.h>
28 #include <asm/pci-bridge.h>
29 #include <asm/machdep.h>
30 #include <asm/msi_bitmap.h>
31 #include <asm/ppc-pci.h>
33 #include <asm/iommu.h>
35 #include <asm/firmware.h>
36 #include <asm/eeh_event.h>
43 #define PCI_RESET_DELAY_US 3000000
45 #define cfg_dbg(fmt...) do { } while(0)
46 //#define cfg_dbg(fmt...) printk(fmt)
49 static int pnv_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
51 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
52 struct pnv_phb
*phb
= hose
->private_data
;
53 struct pci_dn
*pdn
= pci_get_pdn(pdev
);
54 struct msi_desc
*entry
;
60 if (WARN_ON(!phb
) || !phb
->msi_bmp
.bitmap
)
63 if (pdn
&& pdn
->force_32bit_msi
&& !phb
->msi32_support
)
66 list_for_each_entry(entry
, &pdev
->msi_list
, list
) {
67 if (!entry
->msi_attrib
.is_64
&& !phb
->msi32_support
) {
68 pr_warn("%s: Supports only 64-bit MSIs\n",
72 hwirq
= msi_bitmap_alloc_hwirqs(&phb
->msi_bmp
, 1);
74 pr_warn("%s: Failed to find a free MSI\n",
78 virq
= irq_create_mapping(NULL
, phb
->msi_base
+ hwirq
);
80 pr_warn("%s: Failed to map MSI to linux irq\n",
82 msi_bitmap_free_hwirqs(&phb
->msi_bmp
, hwirq
, 1);
85 rc
= phb
->msi_setup(phb
, pdev
, phb
->msi_base
+ hwirq
,
86 virq
, entry
->msi_attrib
.is_64
, &msg
);
88 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev
));
89 irq_dispose_mapping(virq
);
90 msi_bitmap_free_hwirqs(&phb
->msi_bmp
, hwirq
, 1);
93 irq_set_msi_desc(virq
, entry
);
94 write_msi_msg(virq
, &msg
);
99 static void pnv_teardown_msi_irqs(struct pci_dev
*pdev
)
101 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
102 struct pnv_phb
*phb
= hose
->private_data
;
103 struct msi_desc
*entry
;
108 list_for_each_entry(entry
, &pdev
->msi_list
, list
) {
109 if (entry
->irq
== NO_IRQ
)
111 irq_set_msi_desc(entry
->irq
, NULL
);
112 msi_bitmap_free_hwirqs(&phb
->msi_bmp
,
113 virq_to_hw(entry
->irq
) - phb
->msi_base
, 1);
114 irq_dispose_mapping(entry
->irq
);
117 #endif /* CONFIG_PCI_MSI */
119 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller
*hose
,
120 struct OpalIoPhbErrorCommon
*common
)
122 struct OpalIoP7IOCPhbErrorData
*data
;
125 data
= (struct OpalIoP7IOCPhbErrorData
*)common
;
126 pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
127 hose
->global_number
, be32_to_cpu(common
->version
));
130 pr_info("brdgCtl: %08x\n",
131 be32_to_cpu(data
->brdgCtl
));
132 if (data
->portStatusReg
|| data
->rootCmplxStatus
||
133 data
->busAgentStatus
)
134 pr_info("UtlSts: %08x %08x %08x\n",
135 be32_to_cpu(data
->portStatusReg
),
136 be32_to_cpu(data
->rootCmplxStatus
),
137 be32_to_cpu(data
->busAgentStatus
));
138 if (data
->deviceStatus
|| data
->slotStatus
||
139 data
->linkStatus
|| data
->devCmdStatus
||
141 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
142 be32_to_cpu(data
->deviceStatus
),
143 be32_to_cpu(data
->slotStatus
),
144 be32_to_cpu(data
->linkStatus
),
145 be32_to_cpu(data
->devCmdStatus
),
146 be32_to_cpu(data
->devSecStatus
));
147 if (data
->rootErrorStatus
|| data
->uncorrErrorStatus
||
148 data
->corrErrorStatus
)
149 pr_info("RootErrSts: %08x %08x %08x\n",
150 be32_to_cpu(data
->rootErrorStatus
),
151 be32_to_cpu(data
->uncorrErrorStatus
),
152 be32_to_cpu(data
->corrErrorStatus
));
153 if (data
->tlpHdr1
|| data
->tlpHdr2
||
154 data
->tlpHdr3
|| data
->tlpHdr4
)
155 pr_info("RootErrLog: %08x %08x %08x %08x\n",
156 be32_to_cpu(data
->tlpHdr1
),
157 be32_to_cpu(data
->tlpHdr2
),
158 be32_to_cpu(data
->tlpHdr3
),
159 be32_to_cpu(data
->tlpHdr4
));
160 if (data
->sourceId
|| data
->errorClass
||
162 pr_info("RootErrLog1: %08x %016llx %016llx\n",
163 be32_to_cpu(data
->sourceId
),
164 be64_to_cpu(data
->errorClass
),
165 be64_to_cpu(data
->correlator
));
166 if (data
->p7iocPlssr
|| data
->p7iocCsr
)
167 pr_info("PhbSts: %016llx %016llx\n",
168 be64_to_cpu(data
->p7iocPlssr
),
169 be64_to_cpu(data
->p7iocCsr
));
171 pr_info("Lem: %016llx %016llx %016llx\n",
172 be64_to_cpu(data
->lemFir
),
173 be64_to_cpu(data
->lemErrorMask
),
174 be64_to_cpu(data
->lemWOF
));
175 if (data
->phbErrorStatus
)
176 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
177 be64_to_cpu(data
->phbErrorStatus
),
178 be64_to_cpu(data
->phbFirstErrorStatus
),
179 be64_to_cpu(data
->phbErrorLog0
),
180 be64_to_cpu(data
->phbErrorLog1
));
181 if (data
->mmioErrorStatus
)
182 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
183 be64_to_cpu(data
->mmioErrorStatus
),
184 be64_to_cpu(data
->mmioFirstErrorStatus
),
185 be64_to_cpu(data
->mmioErrorLog0
),
186 be64_to_cpu(data
->mmioErrorLog1
));
187 if (data
->dma0ErrorStatus
)
188 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
189 be64_to_cpu(data
->dma0ErrorStatus
),
190 be64_to_cpu(data
->dma0FirstErrorStatus
),
191 be64_to_cpu(data
->dma0ErrorLog0
),
192 be64_to_cpu(data
->dma0ErrorLog1
));
193 if (data
->dma1ErrorStatus
)
194 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
195 be64_to_cpu(data
->dma1ErrorStatus
),
196 be64_to_cpu(data
->dma1FirstErrorStatus
),
197 be64_to_cpu(data
->dma1ErrorLog0
),
198 be64_to_cpu(data
->dma1ErrorLog1
));
200 for (i
= 0; i
< OPAL_P7IOC_NUM_PEST_REGS
; i
++) {
201 if ((data
->pestA
[i
] >> 63) == 0 &&
202 (data
->pestB
[i
] >> 63) == 0)
205 pr_info("PE[%3d] A/B: %016llx %016llx\n",
206 i
, be64_to_cpu(data
->pestA
[i
]),
207 be64_to_cpu(data
->pestB
[i
]));
211 static void pnv_pci_dump_phb3_diag_data(struct pci_controller
*hose
,
212 struct OpalIoPhbErrorCommon
*common
)
214 struct OpalIoPhb3ErrorData
*data
;
217 data
= (struct OpalIoPhb3ErrorData
*)common
;
218 pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
219 hose
->global_number
, be32_to_cpu(common
->version
));
221 pr_info("brdgCtl: %08x\n",
222 be32_to_cpu(data
->brdgCtl
));
223 if (data
->portStatusReg
|| data
->rootCmplxStatus
||
224 data
->busAgentStatus
)
225 pr_info("UtlSts: %08x %08x %08x\n",
226 be32_to_cpu(data
->portStatusReg
),
227 be32_to_cpu(data
->rootCmplxStatus
),
228 be32_to_cpu(data
->busAgentStatus
));
229 if (data
->deviceStatus
|| data
->slotStatus
||
230 data
->linkStatus
|| data
->devCmdStatus
||
232 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
233 be32_to_cpu(data
->deviceStatus
),
234 be32_to_cpu(data
->slotStatus
),
235 be32_to_cpu(data
->linkStatus
),
236 be32_to_cpu(data
->devCmdStatus
),
237 be32_to_cpu(data
->devSecStatus
));
238 if (data
->rootErrorStatus
|| data
->uncorrErrorStatus
||
239 data
->corrErrorStatus
)
240 pr_info("RootErrSts: %08x %08x %08x\n",
241 be32_to_cpu(data
->rootErrorStatus
),
242 be32_to_cpu(data
->uncorrErrorStatus
),
243 be32_to_cpu(data
->corrErrorStatus
));
244 if (data
->tlpHdr1
|| data
->tlpHdr2
||
245 data
->tlpHdr3
|| data
->tlpHdr4
)
246 pr_info("RootErrLog: %08x %08x %08x %08x\n",
247 be32_to_cpu(data
->tlpHdr1
),
248 be32_to_cpu(data
->tlpHdr2
),
249 be32_to_cpu(data
->tlpHdr3
),
250 be32_to_cpu(data
->tlpHdr4
));
251 if (data
->sourceId
|| data
->errorClass
||
253 pr_info("RootErrLog1: %08x %016llx %016llx\n",
254 be32_to_cpu(data
->sourceId
),
255 be64_to_cpu(data
->errorClass
),
256 be64_to_cpu(data
->correlator
));
258 pr_info("nFir: %016llx %016llx %016llx\n",
259 be64_to_cpu(data
->nFir
),
260 be64_to_cpu(data
->nFirMask
),
261 be64_to_cpu(data
->nFirWOF
));
262 if (data
->phbPlssr
|| data
->phbCsr
)
263 pr_info("PhbSts: %016llx %016llx\n",
264 be64_to_cpu(data
->phbPlssr
),
265 be64_to_cpu(data
->phbCsr
));
267 pr_info("Lem: %016llx %016llx %016llx\n",
268 be64_to_cpu(data
->lemFir
),
269 be64_to_cpu(data
->lemErrorMask
),
270 be64_to_cpu(data
->lemWOF
));
271 if (data
->phbErrorStatus
)
272 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
273 be64_to_cpu(data
->phbErrorStatus
),
274 be64_to_cpu(data
->phbFirstErrorStatus
),
275 be64_to_cpu(data
->phbErrorLog0
),
276 be64_to_cpu(data
->phbErrorLog1
));
277 if (data
->mmioErrorStatus
)
278 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
279 be64_to_cpu(data
->mmioErrorStatus
),
280 be64_to_cpu(data
->mmioFirstErrorStatus
),
281 be64_to_cpu(data
->mmioErrorLog0
),
282 be64_to_cpu(data
->mmioErrorLog1
));
283 if (data
->dma0ErrorStatus
)
284 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
285 be64_to_cpu(data
->dma0ErrorStatus
),
286 be64_to_cpu(data
->dma0FirstErrorStatus
),
287 be64_to_cpu(data
->dma0ErrorLog0
),
288 be64_to_cpu(data
->dma0ErrorLog1
));
289 if (data
->dma1ErrorStatus
)
290 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
291 be64_to_cpu(data
->dma1ErrorStatus
),
292 be64_to_cpu(data
->dma1FirstErrorStatus
),
293 be64_to_cpu(data
->dma1ErrorLog0
),
294 be64_to_cpu(data
->dma1ErrorLog1
));
296 for (i
= 0; i
< OPAL_PHB3_NUM_PEST_REGS
; i
++) {
297 if ((be64_to_cpu(data
->pestA
[i
]) >> 63) == 0 &&
298 (be64_to_cpu(data
->pestB
[i
]) >> 63) == 0)
301 pr_info("PE[%3d] A/B: %016llx %016llx\n",
302 i
, be64_to_cpu(data
->pestA
[i
]),
303 be64_to_cpu(data
->pestB
[i
]));
307 void pnv_pci_dump_phb_diag_data(struct pci_controller
*hose
,
308 unsigned char *log_buff
)
310 struct OpalIoPhbErrorCommon
*common
;
312 if (!hose
|| !log_buff
)
315 common
= (struct OpalIoPhbErrorCommon
*)log_buff
;
316 switch (be32_to_cpu(common
->ioType
)) {
317 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC
:
318 pnv_pci_dump_p7ioc_diag_data(hose
, common
);
320 case OPAL_PHB_ERROR_DATA_TYPE_PHB3
:
321 pnv_pci_dump_phb3_diag_data(hose
, common
);
324 pr_warn("%s: Unrecognized ioType %d\n",
325 __func__
, be32_to_cpu(common
->ioType
));
329 static void pnv_pci_handle_eeh_config(struct pnv_phb
*phb
, u32 pe_no
)
331 unsigned long flags
, rc
;
332 int has_diag
, ret
= 0;
334 spin_lock_irqsave(&phb
->lock
, flags
);
336 /* Fetch PHB diag-data */
337 rc
= opal_pci_get_phb_diag_data2(phb
->opal_id
, phb
->diag
.blob
,
338 PNV_PCI_DIAG_BUF_SIZE
);
339 has_diag
= (rc
== OPAL_SUCCESS
);
341 /* If PHB supports compound PE, to handle it */
342 if (phb
->unfreeze_pe
) {
343 ret
= phb
->unfreeze_pe(phb
,
345 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL
);
347 rc
= opal_pci_eeh_freeze_clear(phb
->opal_id
,
349 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL
);
351 pr_warn("%s: Failure %ld clearing frozen "
353 __func__
, rc
, phb
->hose
->global_number
,
360 * For now, let's only display the diag buffer when we fail to clear
361 * the EEH status. We'll do more sensible things later when we have
362 * proper EEH support. We need to make sure we don't pollute ourselves
363 * with the normal errors generated when probing empty slots
366 pnv_pci_dump_phb_diag_data(phb
->hose
, phb
->diag
.blob
);
368 spin_unlock_irqrestore(&phb
->lock
, flags
);
371 static void pnv_pci_config_check_eeh(struct pnv_phb
*phb
,
372 struct device_node
*dn
)
380 * Get the PE#. During the PCI probe stage, we might not
381 * setup that yet. So all ER errors should be mapped to
384 pe_no
= PCI_DN(dn
)->pe_number
;
385 if (pe_no
== IODA_INVALID_PE
) {
386 if (phb
->type
== PNV_PHB_P5IOC2
)
389 pe_no
= phb
->ioda
.reserved_pe
;
393 * Fetch frozen state. If the PHB support compound PE,
394 * we need handle that case.
396 if (phb
->get_pe_state
) {
397 fstate
= phb
->get_pe_state(phb
, pe_no
);
399 rc
= opal_pci_eeh_freeze_status(phb
->opal_id
,
405 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
406 __func__
, rc
, phb
->hose
->global_number
, pe_no
);
411 cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
412 (PCI_DN(dn
)->busno
<< 8) | (PCI_DN(dn
)->devfn
),
415 /* Clear the frozen state if applicable */
416 if (fstate
== OPAL_EEH_STOPPED_MMIO_FREEZE
||
417 fstate
== OPAL_EEH_STOPPED_DMA_FREEZE
||
418 fstate
== OPAL_EEH_STOPPED_MMIO_DMA_FREEZE
) {
420 * If PHB supports compound PE, freeze it for
424 phb
->freeze_pe(phb
, pe_no
);
426 pnv_pci_handle_eeh_config(phb
, pe_no
);
430 int pnv_pci_cfg_read(struct device_node
*dn
,
431 int where
, int size
, u32
*val
)
433 struct pci_dn
*pdn
= PCI_DN(dn
);
434 struct pnv_phb
*phb
= pdn
->phb
->private_data
;
435 u32 bdfn
= (pdn
->busno
<< 8) | pdn
->devfn
;
441 rc
= opal_pci_config_read_byte(phb
->opal_id
, bdfn
, where
, &v8
);
442 *val
= (rc
== OPAL_SUCCESS
) ? v8
: 0xff;
447 rc
= opal_pci_config_read_half_word(phb
->opal_id
, bdfn
, where
,
449 *val
= (rc
== OPAL_SUCCESS
) ? be16_to_cpu(v16
) : 0xffff;
454 rc
= opal_pci_config_read_word(phb
->opal_id
, bdfn
, where
, &v32
);
455 *val
= (rc
== OPAL_SUCCESS
) ? be32_to_cpu(v32
) : 0xffffffff;
459 return PCIBIOS_FUNC_NOT_SUPPORTED
;
462 cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
463 __func__
, pdn
->busno
, pdn
->devfn
, where
, size
, *val
);
464 return PCIBIOS_SUCCESSFUL
;
467 int pnv_pci_cfg_write(struct device_node
*dn
,
468 int where
, int size
, u32 val
)
470 struct pci_dn
*pdn
= PCI_DN(dn
);
471 struct pnv_phb
*phb
= pdn
->phb
->private_data
;
472 u32 bdfn
= (pdn
->busno
<< 8) | pdn
->devfn
;
474 cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
475 pdn
->busno
, pdn
->devfn
, where
, size
, val
);
478 opal_pci_config_write_byte(phb
->opal_id
, bdfn
, where
, val
);
481 opal_pci_config_write_half_word(phb
->opal_id
, bdfn
, where
, val
);
484 opal_pci_config_write_word(phb
->opal_id
, bdfn
, where
, val
);
487 return PCIBIOS_FUNC_NOT_SUPPORTED
;
490 return PCIBIOS_SUCCESSFUL
;
494 static bool pnv_pci_cfg_check(struct pci_controller
*hose
,
495 struct device_node
*dn
)
497 struct eeh_dev
*edev
= NULL
;
498 struct pnv_phb
*phb
= hose
->private_data
;
500 /* EEH not enabled ? */
501 if (!(phb
->flags
& PNV_PHB_FLAG_EEH
))
504 /* PE reset or device removed ? */
505 edev
= of_node_to_eeh_dev(dn
);
508 (edev
->pe
->state
& EEH_PE_CFG_BLOCKED
))
511 if (edev
->mode
& EEH_DEV_REMOVED
)
518 static inline pnv_pci_cfg_check(struct pci_controller
*hose
,
519 struct device_node
*dn
)
523 #endif /* CONFIG_EEH */
525 static int pnv_pci_read_config(struct pci_bus
*bus
,
527 int where
, int size
, u32
*val
)
529 struct device_node
*dn
, *busdn
= pci_bus_to_OF_node(bus
);
536 for (dn
= busdn
->child
; dn
; dn
= dn
->sibling
) {
538 if (pdn
&& pdn
->devfn
== devfn
) {
539 phb
= pdn
->phb
->private_data
;
545 if (!found
|| !pnv_pci_cfg_check(pdn
->phb
, dn
))
546 return PCIBIOS_DEVICE_NOT_FOUND
;
548 ret
= pnv_pci_cfg_read(dn
, where
, size
, val
);
549 if (phb
->flags
& PNV_PHB_FLAG_EEH
) {
550 if (*val
== EEH_IO_ERROR_VALUE(size
) &&
551 eeh_dev_check_failure(of_node_to_eeh_dev(dn
)))
552 return PCIBIOS_DEVICE_NOT_FOUND
;
554 pnv_pci_config_check_eeh(phb
, dn
);
560 static int pnv_pci_write_config(struct pci_bus
*bus
,
562 int where
, int size
, u32 val
)
564 struct device_node
*dn
, *busdn
= pci_bus_to_OF_node(bus
);
570 for (dn
= busdn
->child
; dn
; dn
= dn
->sibling
) {
572 if (pdn
&& pdn
->devfn
== devfn
) {
573 phb
= pdn
->phb
->private_data
;
579 if (!found
|| !pnv_pci_cfg_check(pdn
->phb
, dn
))
580 return PCIBIOS_DEVICE_NOT_FOUND
;
582 ret
= pnv_pci_cfg_write(dn
, where
, size
, val
);
583 if (!(phb
->flags
& PNV_PHB_FLAG_EEH
))
584 pnv_pci_config_check_eeh(phb
, dn
);
589 struct pci_ops pnv_pci_ops
= {
590 .read
= pnv_pci_read_config
,
591 .write
= pnv_pci_write_config
,
594 static int pnv_tce_build(struct iommu_table
*tbl
, long index
, long npages
,
595 unsigned long uaddr
, enum dma_data_direction direction
,
596 struct dma_attrs
*attrs
, bool rm
)
602 proto_tce
= TCE_PCI_READ
; // Read allowed
604 if (direction
!= DMA_TO_DEVICE
)
605 proto_tce
|= TCE_PCI_WRITE
;
607 tces
= tcep
= ((__be64
*)tbl
->it_base
) + index
- tbl
->it_offset
;
608 rpn
= __pa(uaddr
) >> tbl
->it_page_shift
;
611 *(tcep
++) = cpu_to_be64(proto_tce
|
612 (rpn
++ << tbl
->it_page_shift
));
614 /* Some implementations won't cache invalid TCEs and thus may not
615 * need that flush. We'll probably turn it_type into a bit mask
616 * of flags if that becomes the case
618 if (tbl
->it_type
& TCE_PCI_SWINV_CREATE
)
619 pnv_pci_ioda_tce_invalidate(tbl
, tces
, tcep
- 1, rm
);
624 static int pnv_tce_build_vm(struct iommu_table
*tbl
, long index
, long npages
,
626 enum dma_data_direction direction
,
627 struct dma_attrs
*attrs
)
629 return pnv_tce_build(tbl
, index
, npages
, uaddr
, direction
, attrs
,
633 static void pnv_tce_free(struct iommu_table
*tbl
, long index
, long npages
,
638 tces
= tcep
= ((__be64
*)tbl
->it_base
) + index
- tbl
->it_offset
;
641 *(tcep
++) = cpu_to_be64(0);
643 if (tbl
->it_type
& TCE_PCI_SWINV_FREE
)
644 pnv_pci_ioda_tce_invalidate(tbl
, tces
, tcep
- 1, rm
);
647 static void pnv_tce_free_vm(struct iommu_table
*tbl
, long index
, long npages
)
649 pnv_tce_free(tbl
, index
, npages
, false);
652 static unsigned long pnv_tce_get(struct iommu_table
*tbl
, long index
)
654 return ((u64
*)tbl
->it_base
)[index
- tbl
->it_offset
];
657 static int pnv_tce_build_rm(struct iommu_table
*tbl
, long index
, long npages
,
659 enum dma_data_direction direction
,
660 struct dma_attrs
*attrs
)
662 return pnv_tce_build(tbl
, index
, npages
, uaddr
, direction
, attrs
, true);
665 static void pnv_tce_free_rm(struct iommu_table
*tbl
, long index
, long npages
)
667 pnv_tce_free(tbl
, index
, npages
, true);
670 void pnv_pci_setup_iommu_table(struct iommu_table
*tbl
,
671 void *tce_mem
, u64 tce_size
,
672 u64 dma_offset
, unsigned page_shift
)
674 tbl
->it_blocksize
= 16;
675 tbl
->it_base
= (unsigned long)tce_mem
;
676 tbl
->it_page_shift
= page_shift
;
677 tbl
->it_offset
= dma_offset
>> tbl
->it_page_shift
;
679 tbl
->it_size
= tce_size
>> 3;
681 tbl
->it_type
= TCE_PCI
;
684 static struct iommu_table
*pnv_pci_setup_bml_iommu(struct pci_controller
*hose
)
686 struct iommu_table
*tbl
;
687 const __be64
*basep
, *swinvp
;
690 basep
= of_get_property(hose
->dn
, "linux,tce-base", NULL
);
691 sizep
= of_get_property(hose
->dn
, "linux,tce-size", NULL
);
692 if (basep
== NULL
|| sizep
== NULL
) {
693 pr_err("PCI: %s has missing tce entries !\n",
694 hose
->dn
->full_name
);
697 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
, hose
->node
);
700 pnv_pci_setup_iommu_table(tbl
, __va(be64_to_cpup(basep
)),
701 be32_to_cpup(sizep
), 0, IOMMU_PAGE_SHIFT_4K
);
702 iommu_init_table(tbl
, hose
->node
);
703 iommu_register_group(tbl
, pci_domain_nr(hose
->bus
), 0);
705 /* Deal with SW invalidated TCEs when needed (BML way) */
706 swinvp
= of_get_property(hose
->dn
, "linux,tce-sw-invalidate-info",
709 tbl
->it_busno
= be64_to_cpu(swinvp
[1]);
710 tbl
->it_index
= (unsigned long)ioremap(be64_to_cpup(swinvp
), 8);
711 tbl
->it_type
= TCE_PCI_SWINV_CREATE
| TCE_PCI_SWINV_FREE
;
716 static void pnv_pci_dma_fallback_setup(struct pci_controller
*hose
,
717 struct pci_dev
*pdev
)
719 struct device_node
*np
= pci_bus_to_OF_node(hose
->bus
);
725 if (!pdn
->iommu_table
)
726 pdn
->iommu_table
= pnv_pci_setup_bml_iommu(hose
);
727 if (!pdn
->iommu_table
)
729 set_iommu_table_base_and_group(&pdev
->dev
, pdn
->iommu_table
);
732 static void pnv_pci_dma_dev_setup(struct pci_dev
*pdev
)
734 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
735 struct pnv_phb
*phb
= hose
->private_data
;
737 /* If we have no phb structure, try to setup a fallback based on
738 * the device-tree (RTAS PCI for example)
740 if (phb
&& phb
->dma_dev_setup
)
741 phb
->dma_dev_setup(phb
, pdev
);
743 pnv_pci_dma_fallback_setup(hose
, pdev
);
746 int pnv_pci_dma_set_mask(struct pci_dev
*pdev
, u64 dma_mask
)
748 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
749 struct pnv_phb
*phb
= hose
->private_data
;
751 if (phb
&& phb
->dma_set_mask
)
752 return phb
->dma_set_mask(phb
, pdev
, dma_mask
);
753 return __dma_set_mask(&pdev
->dev
, dma_mask
);
756 u64
pnv_pci_dma_get_required_mask(struct pci_dev
*pdev
)
758 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
759 struct pnv_phb
*phb
= hose
->private_data
;
761 if (phb
&& phb
->dma_get_required_mask
)
762 return phb
->dma_get_required_mask(phb
, pdev
);
764 return __dma_get_required_mask(&pdev
->dev
);
767 void pnv_pci_shutdown(void)
769 struct pci_controller
*hose
;
771 list_for_each_entry(hose
, &hose_list
, list_node
) {
772 struct pnv_phb
*phb
= hose
->private_data
;
774 if (phb
&& phb
->shutdown
)
779 /* Fixup wrong class code in p7ioc and p8 root complex */
780 static void pnv_p7ioc_rc_quirk(struct pci_dev
*dev
)
782 dev
->class = PCI_CLASS_BRIDGE_PCI
<< 8;
784 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM
, 0x3b9, pnv_p7ioc_rc_quirk
);
786 static int pnv_pci_probe_mode(struct pci_bus
*bus
)
788 struct pci_controller
*hose
= pci_bus_to_host(bus
);
789 const __be64
*tstamp
;
793 /* We hijack this as a way to ensure we have waited long
794 * enough since the reset was lifted on the PCI bus
796 if (bus
!= hose
->bus
)
797 return PCI_PROBE_NORMAL
;
798 tstamp
= of_get_property(hose
->dn
, "reset-clear-timestamp", NULL
);
799 if (!tstamp
|| !*tstamp
)
800 return PCI_PROBE_NORMAL
;
802 now
= mftb() / tb_ticks_per_usec
;
803 target
= (be64_to_cpup(tstamp
) / tb_ticks_per_usec
)
804 + PCI_RESET_DELAY_US
;
806 pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
807 hose
->global_number
, target
, now
);
810 msleep((target
- now
+ 999) / 1000);
812 return PCI_PROBE_NORMAL
;
815 void __init
pnv_pci_init(void)
817 struct device_node
*np
;
819 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN
);
821 /* OPAL absent, try POPAL first then RTAS detection of PHBs */
822 if (!firmware_has_feature(FW_FEATURE_OPAL
)) {
823 #ifdef CONFIG_PPC_POWERNV_RTAS
824 init_pci_config_tokens();
825 find_and_init_phbs();
826 #endif /* CONFIG_PPC_POWERNV_RTAS */
828 /* OPAL is here, do our normal stuff */
832 /* Look for IODA IO-Hubs. We don't support mixing IODA
833 * and p5ioc2 due to the need to change some global
836 for_each_compatible_node(np
, NULL
, "ibm,ioda-hub") {
837 pnv_pci_init_ioda_hub(np
);
841 /* Look for p5ioc2 IO-Hubs */
843 for_each_compatible_node(np
, NULL
, "ibm,p5ioc2")
844 pnv_pci_init_p5ioc2_hub(np
);
846 /* Look for ioda2 built-in PHB3's */
847 for_each_compatible_node(np
, NULL
, "ibm,ioda2-phb")
848 pnv_pci_init_ioda2_phb(np
);
851 /* Setup the linkage between OF nodes and PHBs */
854 /* Configure IOMMU DMA hooks */
855 ppc_md
.pci_dma_dev_setup
= pnv_pci_dma_dev_setup
;
856 ppc_md
.tce_build
= pnv_tce_build_vm
;
857 ppc_md
.tce_free
= pnv_tce_free_vm
;
858 ppc_md
.tce_build_rm
= pnv_tce_build_rm
;
859 ppc_md
.tce_free_rm
= pnv_tce_free_rm
;
860 ppc_md
.tce_get
= pnv_tce_get
;
861 ppc_md
.pci_probe_mode
= pnv_pci_probe_mode
;
862 set_pci_dma_ops(&dma_iommu_ops
);
865 #ifdef CONFIG_PCI_MSI
866 ppc_md
.setup_msi_irqs
= pnv_setup_msi_irqs
;
867 ppc_md
.teardown_msi_irqs
= pnv_teardown_msi_irqs
;
871 static int tce_iommu_bus_notifier(struct notifier_block
*nb
,
872 unsigned long action
, void *data
)
874 struct device
*dev
= data
;
877 case BUS_NOTIFY_ADD_DEVICE
:
878 return iommu_add_device(dev
);
879 case BUS_NOTIFY_DEL_DEVICE
:
880 if (dev
->iommu_group
)
881 iommu_del_device(dev
);
888 static struct notifier_block tce_iommu_bus_nb
= {
889 .notifier_call
= tce_iommu_bus_notifier
,
892 static int __init
tce_iommu_bus_notifier_init(void)
894 bus_register_notifier(&pci_bus_type
, &tce_iommu_bus_nb
);
897 machine_subsys_initcall_sync(powernv
, tce_iommu_bus_notifier_init
);