]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/platforms/powernv/pci-ioda.c
powerpc/powernv: TCE invalidation for PHB3
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / platforms / powernv / pci-ioda.c
CommitLineData
184cd4a3
BH
1/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
cee72d5b 12#undef DEBUG
184cd4a3
BH
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h>
21#include <linux/io.h>
22#include <linux/msi.h>
23
24#include <asm/sections.h>
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <asm/pci-bridge.h>
28#include <asm/machdep.h>
fb1b55d6 29#include <asm/msi_bitmap.h>
184cd4a3
BH
30#include <asm/ppc-pci.h>
31#include <asm/opal.h>
32#include <asm/iommu.h>
33#include <asm/tce.h>
137436c9 34#include <asm/xics.h>
184cd4a3
BH
35
36#include "powernv.h"
37#include "pci.h"
38
184cd4a3
BH
39#define define_pe_printk_level(func, kern_level) \
40static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
41{ \
42 struct va_format vaf; \
43 va_list args; \
490e078d 44 char pfix[32]; \
184cd4a3
BH
45 int r; \
46 \
47 va_start(args, fmt); \
48 \
49 vaf.fmt = fmt; \
50 vaf.va = &args; \
51 \
490e078d
GS
52 if (pe->pdev) \
53 strlcpy(pfix, dev_name(&pe->pdev->dev), \
54 sizeof(pfix)); \
55 else \
56 sprintf(pfix, "%04x:%02x ", \
57 pci_domain_nr(pe->pbus), \
58 pe->pbus->number); \
59 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
60 pfix, pe->pe_number, &vaf); \
61 \
184cd4a3
BH
62 va_end(args); \
63 \
64 return r; \
65} \
66
67define_pe_printk_level(pe_err, KERN_ERR);
68define_pe_printk_level(pe_warn, KERN_WARNING);
69define_pe_printk_level(pe_info, KERN_INFO);
70
184cd4a3
BH
71static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
72{
73 struct device_node *np;
74
75 np = pci_device_to_OF_node(dev);
76 if (!np)
77 return NULL;
78 return PCI_DN(np);
79}
80
cad5cef6 81static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
184cd4a3
BH
82{
83 unsigned long pe;
84
85 do {
86 pe = find_next_zero_bit(phb->ioda.pe_alloc,
87 phb->ioda.total_pe, 0);
88 if (pe >= phb->ioda.total_pe)
89 return IODA_INVALID_PE;
90 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
91
4cce9550 92 phb->ioda.pe_array[pe].phb = phb;
184cd4a3
BH
93 phb->ioda.pe_array[pe].pe_number = pe;
94 return pe;
95}
96
cad5cef6 97static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
184cd4a3
BH
98{
99 WARN_ON(phb->ioda.pe_array[pe].pdev);
100
101 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
102 clear_bit(pe, phb->ioda.pe_alloc);
103}
104
105/* Currently those 2 are only used when MSIs are enabled, this will change
106 * but in the meantime, we need to protect them to avoid warnings
107 */
108#ifdef CONFIG_PCI_MSI
cad5cef6 109static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
184cd4a3
BH
110{
111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
112 struct pnv_phb *phb = hose->private_data;
113 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
114
115 if (!pdn)
116 return NULL;
117 if (pdn->pe_number == IODA_INVALID_PE)
118 return NULL;
119 return &phb->ioda.pe_array[pdn->pe_number];
120}
184cd4a3
BH
121#endif /* CONFIG_PCI_MSI */
122
cad5cef6 123static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
184cd4a3
BH
124{
125 struct pci_dev *parent;
126 uint8_t bcomp, dcomp, fcomp;
127 long rc, rid_end, rid;
128
129 /* Bus validation ? */
130 if (pe->pbus) {
131 int count;
132
133 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
134 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
135 parent = pe->pbus->self;
fb446ad0
GS
136 if (pe->flags & PNV_IODA_PE_BUS_ALL)
137 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
138 else
139 count = 1;
140
184cd4a3
BH
141 switch(count) {
142 case 1: bcomp = OpalPciBusAll; break;
143 case 2: bcomp = OpalPciBus7Bits; break;
144 case 4: bcomp = OpalPciBus6Bits; break;
145 case 8: bcomp = OpalPciBus5Bits; break;
146 case 16: bcomp = OpalPciBus4Bits; break;
147 case 32: bcomp = OpalPciBus3Bits; break;
148 default:
149 pr_err("%s: Number of subordinate busses %d"
150 " unsupported\n",
151 pci_name(pe->pbus->self), count);
152 /* Do an exact match only */
153 bcomp = OpalPciBusAll;
154 }
155 rid_end = pe->rid + (count << 8);
156 } else {
157 parent = pe->pdev->bus->self;
158 bcomp = OpalPciBusAll;
159 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
160 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
161 rid_end = pe->rid + 1;
162 }
163
164 /* Associate PE in PELT */
165 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
166 bcomp, dcomp, fcomp, OPAL_MAP_PE);
167 if (rc) {
168 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
169 return -ENXIO;
170 }
171 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
172 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
173
174 /* Add to all parents PELT-V */
175 while (parent) {
176 struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
177 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
178 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
cee72d5b 179 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
184cd4a3
BH
180 /* XXX What to do in case of error ? */
181 }
182 parent = parent->bus->self;
183 }
184 /* Setup reverse map */
185 for (rid = pe->rid; rid < rid_end; rid++)
186 phb->ioda.pe_rmap[rid] = pe->pe_number;
187
188 /* Setup one MVTs on IODA1 */
189 if (phb->type == PNV_PHB_IODA1) {
190 pe->mve_number = pe->pe_number;
191 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
192 pe->pe_number);
193 if (rc) {
194 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
195 rc, pe->mve_number);
196 pe->mve_number = -1;
197 } else {
198 rc = opal_pci_set_mve_enable(phb->opal_id,
cee72d5b 199 pe->mve_number, OPAL_ENABLE_MVE);
184cd4a3
BH
200 if (rc) {
201 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
202 rc, pe->mve_number);
203 pe->mve_number = -1;
204 }
205 }
206 } else if (phb->type == PNV_PHB_IODA2)
207 pe->mve_number = 0;
208
209 return 0;
210}
211
cad5cef6
GKH
212static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
213 struct pnv_ioda_pe *pe)
184cd4a3
BH
214{
215 struct pnv_ioda_pe *lpe;
216
7ebdf956 217 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
184cd4a3 218 if (lpe->dma_weight < pe->dma_weight) {
7ebdf956 219 list_add_tail(&pe->dma_link, &lpe->dma_link);
184cd4a3
BH
220 return;
221 }
222 }
7ebdf956 223 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
184cd4a3
BH
224}
225
226static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
227{
228 /* This is quite simplistic. The "base" weight of a device
229 * is 10. 0 means no DMA is to be accounted for it.
230 */
231
232 /* If it's a bridge, no DMA */
233 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
234 return 0;
235
236 /* Reduce the weight of slow USB controllers */
237 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
238 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
239 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
240 return 3;
241
242 /* Increase the weight of RAID (includes Obsidian) */
243 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
244 return 15;
245
246 /* Default */
247 return 10;
248}
249
fb446ad0 250#if 0
cad5cef6 251static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
184cd4a3
BH
252{
253 struct pci_controller *hose = pci_bus_to_host(dev->bus);
254 struct pnv_phb *phb = hose->private_data;
255 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
256 struct pnv_ioda_pe *pe;
257 int pe_num;
258
259 if (!pdn) {
260 pr_err("%s: Device tree node not associated properly\n",
261 pci_name(dev));
262 return NULL;
263 }
264 if (pdn->pe_number != IODA_INVALID_PE)
265 return NULL;
266
267 /* PE#0 has been pre-set */
268 if (dev->bus->number == 0)
269 pe_num = 0;
270 else
271 pe_num = pnv_ioda_alloc_pe(phb);
272 if (pe_num == IODA_INVALID_PE) {
273 pr_warning("%s: Not enough PE# available, disabling device\n",
274 pci_name(dev));
275 return NULL;
276 }
277
278 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
279 * pointer in the PE data structure, both should be destroyed at the
280 * same time. However, this needs to be looked at more closely again
281 * once we actually start removing things (Hotplug, SR-IOV, ...)
282 *
283 * At some point we want to remove the PDN completely anyways
284 */
285 pe = &phb->ioda.pe_array[pe_num];
286 pci_dev_get(dev);
287 pdn->pcidev = dev;
288 pdn->pe_number = pe_num;
289 pe->pdev = dev;
290 pe->pbus = NULL;
291 pe->tce32_seg = -1;
292 pe->mve_number = -1;
293 pe->rid = dev->bus->number << 8 | pdn->devfn;
294
295 pe_info(pe, "Associated device to PE\n");
296
297 if (pnv_ioda_configure_pe(phb, pe)) {
298 /* XXX What do we do here ? */
299 if (pe_num)
300 pnv_ioda_free_pe(phb, pe_num);
301 pdn->pe_number = IODA_INVALID_PE;
302 pe->pdev = NULL;
303 pci_dev_put(dev);
304 return NULL;
305 }
306
307 /* Assign a DMA weight to the device */
308 pe->dma_weight = pnv_ioda_dma_weight(dev);
309 if (pe->dma_weight != 0) {
310 phb->ioda.dma_weight += pe->dma_weight;
311 phb->ioda.dma_pe_count++;
312 }
313
314 /* Link the PE */
315 pnv_ioda_link_pe_by_weight(phb, pe);
316
317 return pe;
318}
fb446ad0 319#endif /* Useful for SRIOV case */
184cd4a3
BH
320
321static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
322{
323 struct pci_dev *dev;
324
325 list_for_each_entry(dev, &bus->devices, bus_list) {
326 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
327
328 if (pdn == NULL) {
329 pr_warn("%s: No device node associated with device !\n",
330 pci_name(dev));
331 continue;
332 }
333 pci_dev_get(dev);
334 pdn->pcidev = dev;
335 pdn->pe_number = pe->pe_number;
336 pe->dma_weight += pnv_ioda_dma_weight(dev);
fb446ad0 337 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
184cd4a3
BH
338 pnv_ioda_setup_same_PE(dev->subordinate, pe);
339 }
340}
341
fb446ad0
GS
342/*
343 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
344 * single PCI bus. Another one that contains the primary PCI bus and its
345 * subordinate PCI devices and buses. The second type of PE is normally
346 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
347 */
cad5cef6 348static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
184cd4a3 349{
fb446ad0 350 struct pci_controller *hose = pci_bus_to_host(bus);
184cd4a3 351 struct pnv_phb *phb = hose->private_data;
184cd4a3
BH
352 struct pnv_ioda_pe *pe;
353 int pe_num;
354
184cd4a3
BH
355 pe_num = pnv_ioda_alloc_pe(phb);
356 if (pe_num == IODA_INVALID_PE) {
fb446ad0
GS
357 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
358 __func__, pci_domain_nr(bus), bus->number);
184cd4a3
BH
359 return;
360 }
361
362 pe = &phb->ioda.pe_array[pe_num];
fb446ad0 363 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
184cd4a3
BH
364 pe->pbus = bus;
365 pe->pdev = NULL;
366 pe->tce32_seg = -1;
367 pe->mve_number = -1;
b918c62e 368 pe->rid = bus->busn_res.start << 8;
184cd4a3
BH
369 pe->dma_weight = 0;
370
fb446ad0
GS
371 if (all)
372 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
373 bus->busn_res.start, bus->busn_res.end, pe_num);
374 else
375 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
376 bus->busn_res.start, pe_num);
184cd4a3
BH
377
378 if (pnv_ioda_configure_pe(phb, pe)) {
379 /* XXX What do we do here ? */
380 if (pe_num)
381 pnv_ioda_free_pe(phb, pe_num);
382 pe->pbus = NULL;
383 return;
384 }
385
386 /* Associate it with all child devices */
387 pnv_ioda_setup_same_PE(bus, pe);
388
7ebdf956
GS
389 /* Put PE to the list */
390 list_add_tail(&pe->list, &phb->ioda.pe_list);
391
184cd4a3
BH
392 /* Account for one DMA PE if at least one DMA capable device exist
393 * below the bridge
394 */
395 if (pe->dma_weight != 0) {
396 phb->ioda.dma_weight += pe->dma_weight;
397 phb->ioda.dma_pe_count++;
398 }
399
400 /* Link the PE */
401 pnv_ioda_link_pe_by_weight(phb, pe);
402}
403
cad5cef6 404static void pnv_ioda_setup_PEs(struct pci_bus *bus)
184cd4a3
BH
405{
406 struct pci_dev *dev;
fb446ad0
GS
407
408 pnv_ioda_setup_bus_PE(bus, 0);
184cd4a3
BH
409
410 list_for_each_entry(dev, &bus->devices, bus_list) {
fb446ad0
GS
411 if (dev->subordinate) {
412 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
413 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
414 else
415 pnv_ioda_setup_PEs(dev->subordinate);
416 }
417 }
418}
419
420/*
421 * Configure PEs so that the downstream PCI buses and devices
422 * could have their associated PE#. Unfortunately, we didn't
423 * figure out the way to identify the PLX bridge yet. So we
424 * simply put the PCI bus and the subordinate behind the root
425 * port to PE# here. The game rule here is expected to be changed
426 * as soon as we can detected PLX bridge correctly.
427 */
cad5cef6 428static void pnv_pci_ioda_setup_PEs(void)
fb446ad0
GS
429{
430 struct pci_controller *hose, *tmp;
431
432 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
433 pnv_ioda_setup_PEs(hose->bus);
184cd4a3
BH
434 }
435}
436
cad5cef6 437static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *dev)
184cd4a3
BH
438{
439 /* We delay DMA setup after we have assigned all PE# */
440}
441
cad5cef6 442static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
184cd4a3
BH
443{
444 struct pci_dev *dev;
445
446 list_for_each_entry(dev, &bus->devices, bus_list) {
447 set_iommu_table_base(&dev->dev, &pe->tce32_table);
448 if (dev->subordinate)
449 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
450 }
451}
452
4cce9550
GS
453static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
454 u64 *startp, u64 *endp)
455{
456 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
457 unsigned long start, end, inc;
458
459 start = __pa(startp);
460 end = __pa(endp);
461
462 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
463 if (tbl->it_busno) {
464 start <<= 12;
465 end <<= 12;
466 inc = 128 << 12;
467 start |= tbl->it_busno;
468 end |= tbl->it_busno;
469 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
470 /* p7ioc-style invalidation, 2 TCEs per write */
471 start |= (1ull << 63);
472 end |= (1ull << 63);
473 inc = 16;
474 } else {
475 /* Default (older HW) */
476 inc = 128;
477 }
478
479 end |= inc - 1; /* round up end to be different than start */
480
481 mb(); /* Ensure above stores are visible */
482 while (start <= end) {
483 __raw_writeq(start, invalidate);
484 start += inc;
485 }
486
487 /*
488 * The iommu layer will do another mb() for us on build()
489 * and we don't care on free()
490 */
491}
492
493static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
494 struct iommu_table *tbl,
495 u64 *startp, u64 *endp)
496{
497 unsigned long start, end, inc;
498 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
499
500 /* We'll invalidate DMA address in PE scope */
501 start = 0x2ul << 60;
502 start |= (pe->pe_number & 0xFF);
503 end = start;
504
505 /* Figure out the start, end and step */
506 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
507 start |= (inc << 12);
508 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
509 end |= (inc << 12);
510 inc = (0x1ul << 12);
511 mb();
512
513 while (start <= end) {
514 __raw_writeq(start, invalidate);
515 start += inc;
516 }
517}
518
519void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
520 u64 *startp, u64 *endp)
521{
522 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
523 tce32_table);
524 struct pnv_phb *phb = pe->phb;
525
526 if (phb->type == PNV_PHB_IODA1)
527 pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
528 else
529 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
530}
531
cad5cef6
GKH
532static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
533 struct pnv_ioda_pe *pe, unsigned int base,
534 unsigned int segs)
184cd4a3
BH
535{
536
537 struct page *tce_mem = NULL;
538 const __be64 *swinvp;
539 struct iommu_table *tbl;
540 unsigned int i;
541 int64_t rc;
542 void *addr;
543
544 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
545#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
546
547 /* XXX FIXME: Handle 64-bit only DMA devices */
548 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
549 /* XXX FIXME: Allocate multi-level tables on PHB3 */
550
551 /* We shouldn't already have a 32-bit DMA associated */
552 if (WARN_ON(pe->tce32_seg >= 0))
553 return;
554
555 /* Grab a 32-bit TCE table */
556 pe->tce32_seg = base;
557 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
558 (base << 28), ((base + segs) << 28) - 1);
559
560 /* XXX Currently, we allocate one big contiguous table for the
561 * TCEs. We only really need one chunk per 256M of TCE space
562 * (ie per segment) but that's an optimization for later, it
563 * requires some added smarts with our get/put_tce implementation
564 */
565 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
566 get_order(TCE32_TABLE_SIZE * segs));
567 if (!tce_mem) {
568 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
569 goto fail;
570 }
571 addr = page_address(tce_mem);
572 memset(addr, 0, TCE32_TABLE_SIZE * segs);
573
574 /* Configure HW */
575 for (i = 0; i < segs; i++) {
576 rc = opal_pci_map_pe_dma_window(phb->opal_id,
577 pe->pe_number,
578 base + i, 1,
579 __pa(addr) + TCE32_TABLE_SIZE * i,
580 TCE32_TABLE_SIZE, 0x1000);
581 if (rc) {
582 pe_err(pe, " Failed to configure 32-bit TCE table,"
583 " err %ld\n", rc);
584 goto fail;
585 }
586 }
587
588 /* Setup linux iommu table */
589 tbl = &pe->tce32_table;
590 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
591 base << 28);
592
593 /* OPAL variant of P7IOC SW invalidated TCEs */
594 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
595 if (swinvp) {
596 /* We need a couple more fields -- an address and a data
597 * to or. Since the bus is only printed out on table free
598 * errors, and on the first pass the data will be a relative
599 * bus number, print that out instead.
600 */
601 tbl->it_busno = 0;
602 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
4cce9550
GS
603 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
604 if (phb->type == PNV_PHB_IODA1)
605 tbl->it_type |= TCE_PCI_SWINV_PAIR;
184cd4a3
BH
606 }
607 iommu_init_table(tbl, phb->hose->node);
608
609 if (pe->pdev)
610 set_iommu_table_base(&pe->pdev->dev, tbl);
611 else
612 pnv_ioda_setup_bus_dma(pe, pe->pbus);
613
614 return;
615 fail:
616 /* XXX Failure: Try to fallback to 64-bit only ? */
617 if (pe->tce32_seg >= 0)
618 pe->tce32_seg = -1;
619 if (tce_mem)
620 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
621}
622
cad5cef6 623static void pnv_ioda_setup_dma(struct pnv_phb *phb)
184cd4a3
BH
624{
625 struct pci_controller *hose = phb->hose;
626 unsigned int residual, remaining, segs, tw, base;
627 struct pnv_ioda_pe *pe;
628
629 /* If we have more PE# than segments available, hand out one
630 * per PE until we run out and let the rest fail. If not,
631 * then we assign at least one segment per PE, plus more based
632 * on the amount of devices under that PE
633 */
634 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
635 residual = 0;
636 else
637 residual = phb->ioda.tce32_count -
638 phb->ioda.dma_pe_count;
639
640 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
641 hose->global_number, phb->ioda.tce32_count);
642 pr_info("PCI: %d PE# for a total weight of %d\n",
643 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
644
645 /* Walk our PE list and configure their DMA segments, hand them
646 * out one base segment plus any residual segments based on
647 * weight
648 */
649 remaining = phb->ioda.tce32_count;
650 tw = phb->ioda.dma_weight;
651 base = 0;
7ebdf956 652 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
184cd4a3
BH
653 if (!pe->dma_weight)
654 continue;
655 if (!remaining) {
656 pe_warn(pe, "No DMA32 resources available\n");
657 continue;
658 }
659 segs = 1;
660 if (residual) {
661 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
662 if (segs > remaining)
663 segs = remaining;
664 }
665 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
666 pe->dma_weight, segs);
667 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
668 remaining -= segs;
669 base += segs;
670 }
671}
672
673#ifdef CONFIG_PCI_MSI
137436c9
GS
674static void pnv_ioda2_msi_eoi(struct irq_data *d)
675{
676 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
677 struct irq_chip *chip = irq_data_get_irq_chip(d);
678 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
679 ioda.irq_chip);
680 int64_t rc;
681
682 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
683 WARN_ON_ONCE(rc);
684
685 icp_native_eoi(d);
686}
687
184cd4a3 688static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
137436c9
GS
689 unsigned int hwirq, unsigned int virq,
690 unsigned int is_64, struct msi_msg *msg)
184cd4a3
BH
691{
692 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
137436c9
GS
693 struct irq_data *idata;
694 struct irq_chip *ichip;
184cd4a3
BH
695 unsigned int xive_num = hwirq - phb->msi_base;
696 uint64_t addr64;
697 uint32_t addr32, data;
698 int rc;
699
700 /* No PE assigned ? bail out ... no MSI for you ! */
701 if (pe == NULL)
702 return -ENXIO;
703
704 /* Check if we have an MVE */
705 if (pe->mve_number < 0)
706 return -ENXIO;
707
708 /* Assign XIVE to PE */
709 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
710 if (rc) {
711 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
712 pci_name(dev), rc, xive_num);
713 return -EIO;
714 }
715
716 if (is_64) {
717 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
718 &addr64, &data);
719 if (rc) {
720 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
721 pci_name(dev), rc);
722 return -EIO;
723 }
724 msg->address_hi = addr64 >> 32;
725 msg->address_lo = addr64 & 0xfffffffful;
726 } else {
727 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
728 &addr32, &data);
729 if (rc) {
730 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
731 pci_name(dev), rc);
732 return -EIO;
733 }
734 msg->address_hi = 0;
735 msg->address_lo = addr32;
736 }
737 msg->data = data;
738
137436c9
GS
739 /*
740 * Change the IRQ chip for the MSI interrupts on PHB3.
741 * The corresponding IRQ chip should be populated for
742 * the first time.
743 */
744 if (phb->type == PNV_PHB_IODA2) {
745 if (!phb->ioda.irq_chip_init) {
746 idata = irq_get_irq_data(virq);
747 ichip = irq_data_get_irq_chip(idata);
748 phb->ioda.irq_chip_init = 1;
749 phb->ioda.irq_chip = *ichip;
750 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
751 }
752
753 irq_set_chip(virq, &phb->ioda.irq_chip);
754 }
755
184cd4a3
BH
756 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
757 " address=%x_%08x data=%x PE# %d\n",
758 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
759 msg->address_hi, msg->address_lo, data, pe->pe_number);
760
761 return 0;
762}
763
764static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
765{
fb1b55d6 766 unsigned int count;
184cd4a3
BH
767 const __be32 *prop = of_get_property(phb->hose->dn,
768 "ibm,opal-msi-ranges", NULL);
769 if (!prop) {
770 /* BML Fallback */
771 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
772 }
773 if (!prop)
774 return;
775
776 phb->msi_base = be32_to_cpup(prop);
fb1b55d6
GS
777 count = be32_to_cpup(prop + 1);
778 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
184cd4a3
BH
779 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
780 phb->hose->global_number);
781 return;
782 }
fb1b55d6 783
184cd4a3
BH
784 phb->msi_setup = pnv_pci_ioda_msi_setup;
785 phb->msi32_support = 1;
786 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
fb1b55d6 787 count, phb->msi_base);
184cd4a3
BH
788}
789#else
790static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
791#endif /* CONFIG_PCI_MSI */
792
11685bec
GS
793/*
794 * This function is supposed to be called on basis of PE from top
795 * to bottom style. So the the I/O or MMIO segment assigned to
796 * parent PE could be overrided by its child PEs if necessary.
797 */
cad5cef6
GKH
798static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
799 struct pnv_ioda_pe *pe)
11685bec
GS
800{
801 struct pnv_phb *phb = hose->private_data;
802 struct pci_bus_region region;
803 struct resource *res;
804 int i, index;
805 int rc;
806
807 /*
808 * NOTE: We only care PCI bus based PE for now. For PCI
809 * device based PE, for example SRIOV sensitive VF should
810 * be figured out later.
811 */
812 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
813
814 pci_bus_for_each_resource(pe->pbus, res, i) {
815 if (!res || !res->flags ||
816 res->start > res->end)
817 continue;
818
819 if (res->flags & IORESOURCE_IO) {
820 region.start = res->start - phb->ioda.io_pci_base;
821 region.end = res->end - phb->ioda.io_pci_base;
822 index = region.start / phb->ioda.io_segsize;
823
824 while (index < phb->ioda.total_pe &&
825 region.start <= region.end) {
826 phb->ioda.io_segmap[index] = pe->pe_number;
827 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
828 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
829 if (rc != OPAL_SUCCESS) {
830 pr_err("%s: OPAL error %d when mapping IO "
831 "segment #%d to PE#%d\n",
832 __func__, rc, index, pe->pe_number);
833 break;
834 }
835
836 region.start += phb->ioda.io_segsize;
837 index++;
838 }
839 } else if (res->flags & IORESOURCE_MEM) {
840 region.start = res->start -
841 hose->pci_mem_offset -
842 phb->ioda.m32_pci_base;
843 region.end = res->end -
844 hose->pci_mem_offset -
845 phb->ioda.m32_pci_base;
846 index = region.start / phb->ioda.m32_segsize;
847
848 while (index < phb->ioda.total_pe &&
849 region.start <= region.end) {
850 phb->ioda.m32_segmap[index] = pe->pe_number;
851 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
852 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
853 if (rc != OPAL_SUCCESS) {
854 pr_err("%s: OPAL error %d when mapping M32 "
855 "segment#%d to PE#%d",
856 __func__, rc, index, pe->pe_number);
857 break;
858 }
859
860 region.start += phb->ioda.m32_segsize;
861 index++;
862 }
863 }
864 }
865}
866
cad5cef6 867static void pnv_pci_ioda_setup_seg(void)
11685bec
GS
868{
869 struct pci_controller *tmp, *hose;
870 struct pnv_phb *phb;
871 struct pnv_ioda_pe *pe;
872
873 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
874 phb = hose->private_data;
875 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
876 pnv_ioda_setup_pe_seg(hose, pe);
877 }
878 }
879}
880
cad5cef6 881static void pnv_pci_ioda_setup_DMA(void)
13395c48
GS
882{
883 struct pci_controller *hose, *tmp;
db1266c8 884 struct pnv_phb *phb;
13395c48
GS
885
886 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
887 pnv_ioda_setup_dma(hose->private_data);
db1266c8
GS
888
889 /* Mark the PHB initialization done */
890 phb = hose->private_data;
891 phb->initialized = 1;
13395c48
GS
892 }
893}
894
cad5cef6 895static void pnv_pci_ioda_fixup(void)
fb446ad0
GS
896{
897 pnv_pci_ioda_setup_PEs();
11685bec 898 pnv_pci_ioda_setup_seg();
13395c48 899 pnv_pci_ioda_setup_DMA();
fb446ad0
GS
900}
901
271fd03a
GS
902/*
903 * Returns the alignment for I/O or memory windows for P2P
904 * bridges. That actually depends on how PEs are segmented.
905 * For now, we return I/O or M32 segment size for PE sensitive
906 * P2P bridges. Otherwise, the default values (4KiB for I/O,
907 * 1MiB for memory) will be returned.
908 *
909 * The current PCI bus might be put into one PE, which was
910 * create against the parent PCI bridge. For that case, we
911 * needn't enlarge the alignment so that we can save some
912 * resources.
913 */
914static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
915 unsigned long type)
916{
917 struct pci_dev *bridge;
918 struct pci_controller *hose = pci_bus_to_host(bus);
919 struct pnv_phb *phb = hose->private_data;
920 int num_pci_bridges = 0;
921
922 bridge = bus->self;
923 while (bridge) {
924 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
925 num_pci_bridges++;
926 if (num_pci_bridges >= 2)
927 return 1;
928 }
929
930 bridge = bridge->bus->self;
931 }
932
933 /* We need support prefetchable memory window later */
934 if (type & IORESOURCE_MEM)
935 return phb->ioda.m32_segsize;
936
937 return phb->ioda.io_segsize;
938}
939
184cd4a3
BH
940/* Prevent enabling devices for which we couldn't properly
941 * assign a PE
942 */
cad5cef6 943static int pnv_pci_enable_device_hook(struct pci_dev *dev)
184cd4a3 944{
db1266c8
GS
945 struct pci_controller *hose = pci_bus_to_host(dev->bus);
946 struct pnv_phb *phb = hose->private_data;
947 struct pci_dn *pdn;
184cd4a3 948
db1266c8
GS
949 /* The function is probably called while the PEs have
950 * not be created yet. For example, resource reassignment
951 * during PCI probe period. We just skip the check if
952 * PEs isn't ready.
953 */
954 if (!phb->initialized)
955 return 0;
956
957 pdn = pnv_ioda_get_pdn(dev);
184cd4a3
BH
958 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
959 return -EINVAL;
db1266c8 960
184cd4a3
BH
961 return 0;
962}
963
964static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
965 u32 devfn)
966{
967 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
968}
969
aa0c033f 970void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
184cd4a3
BH
971{
972 struct pci_controller *hose;
973 static int primary = 1;
974 struct pnv_phb *phb;
975 unsigned long size, m32map_off, iomap_off, pemap_off;
976 const u64 *prop64;
aa0c033f 977 const u32 *prop32;
184cd4a3
BH
978 u64 phb_id;
979 void *aux;
980 long rc;
981
aa0c033f 982 pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
184cd4a3
BH
983
984 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
985 if (!prop64) {
986 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
987 return;
988 }
989 phb_id = be64_to_cpup(prop64);
990 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
991
992 phb = alloc_bootmem(sizeof(struct pnv_phb));
993 if (phb) {
994 memset(phb, 0, sizeof(struct pnv_phb));
995 phb->hose = hose = pcibios_alloc_controller(np);
996 }
997 if (!phb || !phb->hose) {
998 pr_err("PCI: Failed to allocate PCI controller for %s\n",
999 np->full_name);
1000 return;
1001 }
1002
1003 spin_lock_init(&phb->lock);
1004 /* XXX Use device-tree */
1005 hose->first_busno = 0;
1006 hose->last_busno = 0xff;
1007 hose->private_data = phb;
1008 phb->opal_id = phb_id;
aa0c033f 1009 phb->type = ioda_type;
184cd4a3 1010
cee72d5b
BH
1011 /* Detect specific models for error handling */
1012 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1013 phb->model = PNV_PHB_MODEL_P7IOC;
aa0c033f
GS
1014 else if (of_device_is_compatible(np, "ibm,p8-pciex"))
1015 phb->model = PNV_PHB_MODEL_PHB3;
cee72d5b
BH
1016 else
1017 phb->model = PNV_PHB_MODEL_UNKNOWN;
1018
aa0c033f 1019 /* Parse 32-bit and IO ranges (if any) */
184cd4a3
BH
1020 pci_process_bridge_OF_ranges(phb->hose, np, primary);
1021 primary = 0;
1022
aa0c033f 1023 /* Get registers */
184cd4a3
BH
1024 phb->regs = of_iomap(np, 0);
1025 if (phb->regs == NULL)
1026 pr_err(" Failed to map registers !\n");
1027
184cd4a3 1028 /* Initialize more IODA stuff */
aa0c033f
GS
1029 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1030 if (!prop32)
1031 phb->ioda.total_pe = 1;
1032 else
1033 phb->ioda.total_pe = *prop32;
184cd4a3
BH
1034
1035 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
aa0c033f 1036 /* FW Has already off top 64k of M32 space (MSI space) */
184cd4a3
BH
1037 phb->ioda.m32_size += 0x10000;
1038
1039 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1040 phb->ioda.m32_pci_base = hose->mem_resources[0].start -
1041 hose->pci_mem_offset;
1042 phb->ioda.io_size = hose->pci_io_size;
1043 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1044 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1045
aa0c033f
GS
1046 /* Allocate aux data & arrays
1047 *
1048 * XXX TODO: Don't allocate io segmap on PHB3
1049 */
184cd4a3
BH
1050 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1051 m32map_off = size;
e47747f4 1052 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
184cd4a3 1053 iomap_off = size;
e47747f4 1054 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
184cd4a3
BH
1055 pemap_off = size;
1056 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1057 aux = alloc_bootmem(size);
1058 memset(aux, 0, size);
1059 phb->ioda.pe_alloc = aux;
1060 phb->ioda.m32_segmap = aux + m32map_off;
1061 phb->ioda.io_segmap = aux + iomap_off;
1062 phb->ioda.pe_array = aux + pemap_off;
1063 set_bit(0, phb->ioda.pe_alloc);
1064
7ebdf956 1065 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
184cd4a3
BH
1066 INIT_LIST_HEAD(&phb->ioda.pe_list);
1067
1068 /* Calculate how many 32-bit TCE segments we have */
1069 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1070
1071 /* Clear unusable m64 */
1072 hose->mem_resources[1].flags = 0;
1073 hose->mem_resources[1].start = 0;
1074 hose->mem_resources[1].end = 0;
1075 hose->mem_resources[2].flags = 0;
1076 hose->mem_resources[2].start = 0;
1077 hose->mem_resources[2].end = 0;
1078
aa0c033f 1079#if 0 /* We should really do that ... */
184cd4a3
BH
1080 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1081 window_type,
1082 window_num,
1083 starting_real_address,
1084 starting_pci_address,
1085 segment_size);
1086#endif
1087
1088 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
1089 phb->ioda.total_pe,
1090 phb->ioda.m32_size, phb->ioda.m32_segsize,
1091 phb->ioda.io_size, phb->ioda.io_segsize);
1092
184cd4a3
BH
1093 phb->hose->ops = &pnv_pci_ops;
1094
1095 /* Setup RID -> PE mapping function */
1096 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1097
1098 /* Setup TCEs */
1099 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1100
1101 /* Setup MSI support */
1102 pnv_pci_init_ioda_msis(phb);
1103
c40a4210
GS
1104 /*
1105 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1106 * to let the PCI core do resource assignment. It's supposed
1107 * that the PCI core will do correct I/O and MMIO alignment
1108 * for the P2P bridge bars so that each PCI bus (excluding
1109 * the child P2P bridges) can form individual PE.
184cd4a3 1110 */
fb446ad0 1111 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
184cd4a3 1112 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
271fd03a 1113 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
c40a4210 1114 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
184cd4a3
BH
1115
1116 /* Reset IODA tables to a clean state */
f11fe552 1117 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
184cd4a3 1118 if (rc)
f11fe552 1119 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
aa0c033f
GS
1120
1121 /*
1122 * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
1123 * has cleared the RTT which has the same effect
1124 */
1125 if (ioda_type == PNV_PHB_IODA1)
1126 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1127}
1128
1129void pnv_pci_init_ioda2_phb(struct device_node *np)
1130{
1131 pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
184cd4a3
BH
1132}
1133
1134void __init pnv_pci_init_ioda_hub(struct device_node *np)
1135{
1136 struct device_node *phbn;
1137 const u64 *prop64;
1138 u64 hub_id;
1139
1140 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1141
1142 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1143 if (!prop64) {
1144 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1145 return;
1146 }
1147 hub_id = be64_to_cpup(prop64);
1148 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1149
1150 /* Count child PHBs */
1151 for_each_child_of_node(np, phbn) {
1152 /* Look for IODA1 PHBs */
1153 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
aa0c033f 1154 pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);
184cd4a3
BH
1155 }
1156}