]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/powerpc/platforms/powernv/pci-ioda.c
powerpc/powernv/ioda/ioda2: Rework TCE invalidation in tce_build()/tce_free()
[mirror_ubuntu-focal-kernel.git] / arch / powerpc / platforms / powernv / pci-ioda.c
CommitLineData
184cd4a3
BH
1/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
cee72d5b 12#undef DEBUG
184cd4a3
BH
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
361f2a2a 16#include <linux/crash_dump.h>
37c367f2 17#include <linux/debugfs.h>
184cd4a3
BH
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/msi.h>
cd15b048 25#include <linux/memblock.h>
ac9a5889 26#include <linux/iommu.h>
184cd4a3
BH
27
28#include <asm/sections.h>
29#include <asm/io.h>
30#include <asm/prom.h>
31#include <asm/pci-bridge.h>
32#include <asm/machdep.h>
fb1b55d6 33#include <asm/msi_bitmap.h>
184cd4a3
BH
34#include <asm/ppc-pci.h>
35#include <asm/opal.h>
36#include <asm/iommu.h>
37#include <asm/tce.h>
137436c9 38#include <asm/xics.h>
37c367f2 39#include <asm/debug.h>
262af557 40#include <asm/firmware.h>
80c49c7e
IM
41#include <asm/pnv-pci.h>
42
ec249dd8 43#include <misc/cxl-base.h>
184cd4a3
BH
44
45#include "powernv.h"
46#include "pci.h"
47
781a868f
WY
48/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
49#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
50
6d31c2fa
JP
51static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
52 const char *fmt, ...)
53{
54 struct va_format vaf;
55 va_list args;
56 char pfix[32];
57
58 va_start(args, fmt);
59
60 vaf.fmt = fmt;
61 vaf.va = &args;
62
781a868f 63 if (pe->flags & PNV_IODA_PE_DEV)
6d31c2fa 64 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
781a868f 65 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
6d31c2fa
JP
66 sprintf(pfix, "%04x:%02x ",
67 pci_domain_nr(pe->pbus), pe->pbus->number);
781a868f
WY
68#ifdef CONFIG_PCI_IOV
69 else if (pe->flags & PNV_IODA_PE_VF)
70 sprintf(pfix, "%04x:%02x:%2x.%d",
71 pci_domain_nr(pe->parent_dev->bus),
72 (pe->rid & 0xff00) >> 8,
73 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
74#endif /* CONFIG_PCI_IOV*/
6d31c2fa
JP
75
76 printk("%spci %s: [PE# %.3d] %pV",
77 level, pfix, pe->pe_number, &vaf);
78
79 va_end(args);
80}
184cd4a3 81
6d31c2fa
JP
82#define pe_err(pe, fmt, ...) \
83 pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
84#define pe_warn(pe, fmt, ...) \
85 pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
86#define pe_info(pe, fmt, ...) \
87 pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
184cd4a3 88
4e287840
TLSC
89static bool pnv_iommu_bypass_disabled __read_mostly;
90
91static int __init iommu_setup(char *str)
92{
93 if (!str)
94 return -EINVAL;
95
96 while (*str) {
97 if (!strncmp(str, "nobypass", 8)) {
98 pnv_iommu_bypass_disabled = true;
99 pr_info("PowerNV: IOMMU bypass window disabled.\n");
100 break;
101 }
102 str += strcspn(str, ",");
103 if (*str == ',')
104 str++;
105 }
106
107 return 0;
108}
109early_param("iommu", iommu_setup);
110
8e0a1611
AK
111/*
112 * stdcix is only supposed to be used in hypervisor real mode as per
113 * the architecture spec
114 */
115static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
116{
117 __asm__ __volatile__("stdcix %0,0,%1"
118 : : "r" (val), "r" (paddr) : "memory");
119}
120
262af557
GC
121static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
122{
123 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
124 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
125}
126
4b82ab18
GS
127static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
128{
129 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) {
130 pr_warn("%s: Invalid PE %d on PHB#%x\n",
131 __func__, pe_no, phb->hose->global_number);
132 return;
133 }
134
135 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
136 pr_warn("%s: PE %d was assigned on PHB#%x\n",
137 __func__, pe_no, phb->hose->global_number);
138 return;
139 }
140
141 phb->ioda.pe_array[pe_no].phb = phb;
142 phb->ioda.pe_array[pe_no].pe_number = pe_no;
143}
144
cad5cef6 145static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
184cd4a3
BH
146{
147 unsigned long pe;
148
149 do {
150 pe = find_next_zero_bit(phb->ioda.pe_alloc,
151 phb->ioda.total_pe, 0);
152 if (pe >= phb->ioda.total_pe)
153 return IODA_INVALID_PE;
154 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
155
4cce9550 156 phb->ioda.pe_array[pe].phb = phb;
184cd4a3
BH
157 phb->ioda.pe_array[pe].pe_number = pe;
158 return pe;
159}
160
cad5cef6 161static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
184cd4a3
BH
162{
163 WARN_ON(phb->ioda.pe_array[pe].pdev);
164
165 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
166 clear_bit(pe, phb->ioda.pe_alloc);
167}
168
262af557
GC
169/* The default M64 BAR is shared by all PEs */
170static int pnv_ioda2_init_m64(struct pnv_phb *phb)
171{
172 const char *desc;
173 struct resource *r;
174 s64 rc;
175
176 /* Configure the default M64 BAR */
177 rc = opal_pci_set_phb_mem_window(phb->opal_id,
178 OPAL_M64_WINDOW_TYPE,
179 phb->ioda.m64_bar_idx,
180 phb->ioda.m64_base,
181 0, /* unused */
182 phb->ioda.m64_size);
183 if (rc != OPAL_SUCCESS) {
184 desc = "configuring";
185 goto fail;
186 }
187
188 /* Enable the default M64 BAR */
189 rc = opal_pci_phb_mmio_enable(phb->opal_id,
190 OPAL_M64_WINDOW_TYPE,
191 phb->ioda.m64_bar_idx,
192 OPAL_ENABLE_M64_SPLIT);
193 if (rc != OPAL_SUCCESS) {
194 desc = "enabling";
195 goto fail;
196 }
197
198 /* Mark the M64 BAR assigned */
199 set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
200
201 /*
202 * Strip off the segment used by the reserved PE, which is
203 * expected to be 0 or last one of PE capabicity.
204 */
205 r = &phb->hose->mem_resources[1];
206 if (phb->ioda.reserved_pe == 0)
207 r->start += phb->ioda.m64_segsize;
208 else if (phb->ioda.reserved_pe == (phb->ioda.total_pe - 1))
209 r->end -= phb->ioda.m64_segsize;
210 else
211 pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
212 phb->ioda.reserved_pe);
213
214 return 0;
215
216fail:
217 pr_warn(" Failure %lld %s M64 BAR#%d\n",
218 rc, desc, phb->ioda.m64_bar_idx);
219 opal_pci_phb_mmio_enable(phb->opal_id,
220 OPAL_M64_WINDOW_TYPE,
221 phb->ioda.m64_bar_idx,
222 OPAL_DISABLE_M64);
223 return -EIO;
224}
225
5ef73567 226static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
262af557
GC
227{
228 resource_size_t sgsz = phb->ioda.m64_segsize;
229 struct pci_dev *pdev;
230 struct resource *r;
231 int base, step, i;
232
233 /*
234 * Root bus always has full M64 range and root port has
235 * M64 range used in reality. So we're checking root port
236 * instead of root bus.
237 */
238 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
4b82ab18
GS
239 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
240 r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
262af557
GC
241 if (!r->parent ||
242 !pnv_pci_is_mem_pref_64(r->flags))
243 continue;
244
245 base = (r->start - phb->ioda.m64_base) / sgsz;
246 for (step = 0; step < resource_size(r) / sgsz; step++)
4b82ab18 247 pnv_ioda_reserve_pe(phb, base + step);
262af557
GC
248 }
249 }
250}
251
252static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
253 struct pci_bus *bus, int all)
254{
255 resource_size_t segsz = phb->ioda.m64_segsize;
256 struct pci_dev *pdev;
257 struct resource *r;
258 struct pnv_ioda_pe *master_pe, *pe;
259 unsigned long size, *pe_alloc;
260 bool found;
261 int start, i, j;
262
263 /* Root bus shouldn't use M64 */
264 if (pci_is_root_bus(bus))
265 return IODA_INVALID_PE;
266
267 /* We support only one M64 window on each bus */
268 found = false;
269 pci_bus_for_each_resource(bus, r, i) {
270 if (r && r->parent &&
271 pnv_pci_is_mem_pref_64(r->flags)) {
272 found = true;
273 break;
274 }
275 }
276
277 /* No M64 window found ? */
278 if (!found)
279 return IODA_INVALID_PE;
280
281 /* Allocate bitmap */
282 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
283 pe_alloc = kzalloc(size, GFP_KERNEL);
284 if (!pe_alloc) {
285 pr_warn("%s: Out of memory !\n",
286 __func__);
287 return IODA_INVALID_PE;
288 }
289
290 /*
291 * Figure out reserved PE numbers by the PE
292 * the its child PEs.
293 */
294 start = (r->start - phb->ioda.m64_base) / segsz;
295 for (i = 0; i < resource_size(r) / segsz; i++)
296 set_bit(start + i, pe_alloc);
297
298 if (all)
299 goto done;
300
301 /*
302 * If the PE doesn't cover all subordinate buses,
303 * we need subtract from reserved PEs for children.
304 */
305 list_for_each_entry(pdev, &bus->devices, bus_list) {
306 if (!pdev->subordinate)
307 continue;
308
309 pci_bus_for_each_resource(pdev->subordinate, r, i) {
310 if (!r || !r->parent ||
311 !pnv_pci_is_mem_pref_64(r->flags))
312 continue;
313
314 start = (r->start - phb->ioda.m64_base) / segsz;
315 for (j = 0; j < resource_size(r) / segsz ; j++)
316 clear_bit(start + j, pe_alloc);
317 }
318 }
319
320 /*
321 * the current bus might not own M64 window and that's all
322 * contributed by its child buses. For the case, we needn't
323 * pick M64 dependent PE#.
324 */
325 if (bitmap_empty(pe_alloc, phb->ioda.total_pe)) {
326 kfree(pe_alloc);
327 return IODA_INVALID_PE;
328 }
329
330 /*
331 * Figure out the master PE and put all slave PEs to master
332 * PE's list to form compound PE.
333 */
334done:
335 master_pe = NULL;
336 i = -1;
337 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
338 phb->ioda.total_pe) {
339 pe = &phb->ioda.pe_array[i];
262af557
GC
340
341 if (!master_pe) {
342 pe->flags |= PNV_IODA_PE_MASTER;
343 INIT_LIST_HEAD(&pe->slaves);
344 master_pe = pe;
345 } else {
346 pe->flags |= PNV_IODA_PE_SLAVE;
347 pe->master = master_pe;
348 list_add_tail(&pe->list, &master_pe->slaves);
349 }
350 }
351
352 kfree(pe_alloc);
353 return master_pe->pe_number;
354}
355
356static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
357{
358 struct pci_controller *hose = phb->hose;
359 struct device_node *dn = hose->dn;
360 struct resource *res;
361 const u32 *r;
362 u64 pci_addr;
363
1665c4a8
GS
364 /* FIXME: Support M64 for P7IOC */
365 if (phb->type != PNV_PHB_IODA2) {
366 pr_info(" Not support M64 window\n");
367 return;
368 }
369
262af557
GC
370 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
371 pr_info(" Firmware too old to support M64 window\n");
372 return;
373 }
374
375 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
376 if (!r) {
377 pr_info(" No <ibm,opal-m64-window> on %s\n",
378 dn->full_name);
379 return;
380 }
381
262af557
GC
382 res = &hose->mem_resources[1];
383 res->start = of_translate_address(dn, r + 2);
384 res->end = res->start + of_read_number(r + 4, 2) - 1;
385 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
386 pci_addr = of_read_number(r, 2);
387 hose->mem_offset[1] = res->start - pci_addr;
388
389 phb->ioda.m64_size = resource_size(res);
390 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
391 phb->ioda.m64_base = pci_addr;
392
e9863e68
WY
393 pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
394 res->start, res->end, pci_addr);
395
262af557
GC
396 /* Use last M64 BAR to cover M64 window */
397 phb->ioda.m64_bar_idx = 15;
398 phb->init_m64 = pnv_ioda2_init_m64;
5ef73567 399 phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe;
262af557
GC
400 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
401}
402
49dec922
GS
403static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
404{
405 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
406 struct pnv_ioda_pe *slave;
407 s64 rc;
408
409 /* Fetch master PE */
410 if (pe->flags & PNV_IODA_PE_SLAVE) {
411 pe = pe->master;
ec8e4e9d
GS
412 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
413 return;
414
49dec922
GS
415 pe_no = pe->pe_number;
416 }
417
418 /* Freeze master PE */
419 rc = opal_pci_eeh_freeze_set(phb->opal_id,
420 pe_no,
421 OPAL_EEH_ACTION_SET_FREEZE_ALL);
422 if (rc != OPAL_SUCCESS) {
423 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
424 __func__, rc, phb->hose->global_number, pe_no);
425 return;
426 }
427
428 /* Freeze slave PEs */
429 if (!(pe->flags & PNV_IODA_PE_MASTER))
430 return;
431
432 list_for_each_entry(slave, &pe->slaves, list) {
433 rc = opal_pci_eeh_freeze_set(phb->opal_id,
434 slave->pe_number,
435 OPAL_EEH_ACTION_SET_FREEZE_ALL);
436 if (rc != OPAL_SUCCESS)
437 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
438 __func__, rc, phb->hose->global_number,
439 slave->pe_number);
440 }
441}
442
e51df2c1 443static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
49dec922
GS
444{
445 struct pnv_ioda_pe *pe, *slave;
446 s64 rc;
447
448 /* Find master PE */
449 pe = &phb->ioda.pe_array[pe_no];
450 if (pe->flags & PNV_IODA_PE_SLAVE) {
451 pe = pe->master;
452 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
453 pe_no = pe->pe_number;
454 }
455
456 /* Clear frozen state for master PE */
457 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
458 if (rc != OPAL_SUCCESS) {
459 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
460 __func__, rc, opt, phb->hose->global_number, pe_no);
461 return -EIO;
462 }
463
464 if (!(pe->flags & PNV_IODA_PE_MASTER))
465 return 0;
466
467 /* Clear frozen state for slave PEs */
468 list_for_each_entry(slave, &pe->slaves, list) {
469 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
470 slave->pe_number,
471 opt);
472 if (rc != OPAL_SUCCESS) {
473 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
474 __func__, rc, opt, phb->hose->global_number,
475 slave->pe_number);
476 return -EIO;
477 }
478 }
479
480 return 0;
481}
482
483static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
484{
485 struct pnv_ioda_pe *slave, *pe;
486 u8 fstate, state;
487 __be16 pcierr;
488 s64 rc;
489
490 /* Sanity check on PE number */
491 if (pe_no < 0 || pe_no >= phb->ioda.total_pe)
492 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
493
494 /*
495 * Fetch the master PE and the PE instance might be
496 * not initialized yet.
497 */
498 pe = &phb->ioda.pe_array[pe_no];
499 if (pe->flags & PNV_IODA_PE_SLAVE) {
500 pe = pe->master;
501 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
502 pe_no = pe->pe_number;
503 }
504
505 /* Check the master PE */
506 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
507 &state, &pcierr, NULL);
508 if (rc != OPAL_SUCCESS) {
509 pr_warn("%s: Failure %lld getting "
510 "PHB#%x-PE#%x state\n",
511 __func__, rc,
512 phb->hose->global_number, pe_no);
513 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
514 }
515
516 /* Check the slave PE */
517 if (!(pe->flags & PNV_IODA_PE_MASTER))
518 return state;
519
520 list_for_each_entry(slave, &pe->slaves, list) {
521 rc = opal_pci_eeh_freeze_status(phb->opal_id,
522 slave->pe_number,
523 &fstate,
524 &pcierr,
525 NULL);
526 if (rc != OPAL_SUCCESS) {
527 pr_warn("%s: Failure %lld getting "
528 "PHB#%x-PE#%x state\n",
529 __func__, rc,
530 phb->hose->global_number, slave->pe_number);
531 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
532 }
533
534 /*
535 * Override the result based on the ascending
536 * priority.
537 */
538 if (fstate > state)
539 state = fstate;
540 }
541
542 return state;
543}
544
184cd4a3
BH
545/* Currently those 2 are only used when MSIs are enabled, this will change
546 * but in the meantime, we need to protect them to avoid warnings
547 */
548#ifdef CONFIG_PCI_MSI
cad5cef6 549static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
184cd4a3
BH
550{
551 struct pci_controller *hose = pci_bus_to_host(dev->bus);
552 struct pnv_phb *phb = hose->private_data;
b72c1f65 553 struct pci_dn *pdn = pci_get_pdn(dev);
184cd4a3
BH
554
555 if (!pdn)
556 return NULL;
557 if (pdn->pe_number == IODA_INVALID_PE)
558 return NULL;
559 return &phb->ioda.pe_array[pdn->pe_number];
560}
184cd4a3
BH
561#endif /* CONFIG_PCI_MSI */
562
b131a842
GS
563static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
564 struct pnv_ioda_pe *parent,
565 struct pnv_ioda_pe *child,
566 bool is_add)
567{
568 const char *desc = is_add ? "adding" : "removing";
569 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
570 OPAL_REMOVE_PE_FROM_DOMAIN;
571 struct pnv_ioda_pe *slave;
572 long rc;
573
574 /* Parent PE affects child PE */
575 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
576 child->pe_number, op);
577 if (rc != OPAL_SUCCESS) {
578 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
579 rc, desc);
580 return -ENXIO;
581 }
582
583 if (!(child->flags & PNV_IODA_PE_MASTER))
584 return 0;
585
586 /* Compound case: parent PE affects slave PEs */
587 list_for_each_entry(slave, &child->slaves, list) {
588 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
589 slave->pe_number, op);
590 if (rc != OPAL_SUCCESS) {
591 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
592 rc, desc);
593 return -ENXIO;
594 }
595 }
596
597 return 0;
598}
599
600static int pnv_ioda_set_peltv(struct pnv_phb *phb,
601 struct pnv_ioda_pe *pe,
602 bool is_add)
603{
604 struct pnv_ioda_pe *slave;
781a868f 605 struct pci_dev *pdev = NULL;
b131a842
GS
606 int ret;
607
608 /*
609 * Clear PE frozen state. If it's master PE, we need
610 * clear slave PE frozen state as well.
611 */
612 if (is_add) {
613 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
614 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
615 if (pe->flags & PNV_IODA_PE_MASTER) {
616 list_for_each_entry(slave, &pe->slaves, list)
617 opal_pci_eeh_freeze_clear(phb->opal_id,
618 slave->pe_number,
619 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
620 }
621 }
622
623 /*
624 * Associate PE in PELT. We need add the PE into the
625 * corresponding PELT-V as well. Otherwise, the error
626 * originated from the PE might contribute to other
627 * PEs.
628 */
629 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
630 if (ret)
631 return ret;
632
633 /* For compound PEs, any one affects all of them */
634 if (pe->flags & PNV_IODA_PE_MASTER) {
635 list_for_each_entry(slave, &pe->slaves, list) {
636 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
637 if (ret)
638 return ret;
639 }
640 }
641
642 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
643 pdev = pe->pbus->self;
781a868f 644 else if (pe->flags & PNV_IODA_PE_DEV)
b131a842 645 pdev = pe->pdev->bus->self;
781a868f
WY
646#ifdef CONFIG_PCI_IOV
647 else if (pe->flags & PNV_IODA_PE_VF)
648 pdev = pe->parent_dev->bus->self;
649#endif /* CONFIG_PCI_IOV */
b131a842
GS
650 while (pdev) {
651 struct pci_dn *pdn = pci_get_pdn(pdev);
652 struct pnv_ioda_pe *parent;
653
654 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
655 parent = &phb->ioda.pe_array[pdn->pe_number];
656 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
657 if (ret)
658 return ret;
659 }
660
661 pdev = pdev->bus->self;
662 }
663
664 return 0;
665}
666
781a868f
WY
667#ifdef CONFIG_PCI_IOV
668static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
669{
670 struct pci_dev *parent;
671 uint8_t bcomp, dcomp, fcomp;
672 int64_t rc;
673 long rid_end, rid;
674
675 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
676 if (pe->pbus) {
677 int count;
678
679 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
680 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
681 parent = pe->pbus->self;
682 if (pe->flags & PNV_IODA_PE_BUS_ALL)
683 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
684 else
685 count = 1;
686
687 switch(count) {
688 case 1: bcomp = OpalPciBusAll; break;
689 case 2: bcomp = OpalPciBus7Bits; break;
690 case 4: bcomp = OpalPciBus6Bits; break;
691 case 8: bcomp = OpalPciBus5Bits; break;
692 case 16: bcomp = OpalPciBus4Bits; break;
693 case 32: bcomp = OpalPciBus3Bits; break;
694 default:
695 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
696 count);
697 /* Do an exact match only */
698 bcomp = OpalPciBusAll;
699 }
700 rid_end = pe->rid + (count << 8);
701 } else {
702 if (pe->flags & PNV_IODA_PE_VF)
703 parent = pe->parent_dev;
704 else
705 parent = pe->pdev->bus->self;
706 bcomp = OpalPciBusAll;
707 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
708 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
709 rid_end = pe->rid + 1;
710 }
711
712 /* Clear the reverse map */
713 for (rid = pe->rid; rid < rid_end; rid++)
714 phb->ioda.pe_rmap[rid] = 0;
715
716 /* Release from all parents PELT-V */
717 while (parent) {
718 struct pci_dn *pdn = pci_get_pdn(parent);
719 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
720 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
721 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
722 /* XXX What to do in case of error ? */
723 }
724 parent = parent->bus->self;
725 }
726
727 opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
728 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
729
730 /* Disassociate PE in PELT */
731 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
732 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
733 if (rc)
734 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
735 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
736 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
737 if (rc)
738 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
739
740 pe->pbus = NULL;
741 pe->pdev = NULL;
742 pe->parent_dev = NULL;
743
744 return 0;
745}
746#endif /* CONFIG_PCI_IOV */
747
cad5cef6 748static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
184cd4a3
BH
749{
750 struct pci_dev *parent;
751 uint8_t bcomp, dcomp, fcomp;
752 long rc, rid_end, rid;
753
754 /* Bus validation ? */
755 if (pe->pbus) {
756 int count;
757
758 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
759 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
760 parent = pe->pbus->self;
fb446ad0
GS
761 if (pe->flags & PNV_IODA_PE_BUS_ALL)
762 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
763 else
764 count = 1;
765
184cd4a3
BH
766 switch(count) {
767 case 1: bcomp = OpalPciBusAll; break;
768 case 2: bcomp = OpalPciBus7Bits; break;
769 case 4: bcomp = OpalPciBus6Bits; break;
770 case 8: bcomp = OpalPciBus5Bits; break;
771 case 16: bcomp = OpalPciBus4Bits; break;
772 case 32: bcomp = OpalPciBus3Bits; break;
773 default:
781a868f
WY
774 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
775 count);
184cd4a3
BH
776 /* Do an exact match only */
777 bcomp = OpalPciBusAll;
778 }
779 rid_end = pe->rid + (count << 8);
780 } else {
781a868f
WY
781#ifdef CONFIG_PCI_IOV
782 if (pe->flags & PNV_IODA_PE_VF)
783 parent = pe->parent_dev;
784 else
785#endif /* CONFIG_PCI_IOV */
786 parent = pe->pdev->bus->self;
184cd4a3
BH
787 bcomp = OpalPciBusAll;
788 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
789 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
790 rid_end = pe->rid + 1;
791 }
792
631ad691
GS
793 /*
794 * Associate PE in PELT. We need add the PE into the
795 * corresponding PELT-V as well. Otherwise, the error
796 * originated from the PE might contribute to other
797 * PEs.
798 */
184cd4a3
BH
799 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
800 bcomp, dcomp, fcomp, OPAL_MAP_PE);
801 if (rc) {
802 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
803 return -ENXIO;
804 }
631ad691 805
b131a842
GS
806 /* Configure PELTV */
807 pnv_ioda_set_peltv(phb, pe, true);
184cd4a3 808
184cd4a3
BH
809 /* Setup reverse map */
810 for (rid = pe->rid; rid < rid_end; rid++)
811 phb->ioda.pe_rmap[rid] = pe->pe_number;
812
813 /* Setup one MVTs on IODA1 */
4773f76b
GS
814 if (phb->type != PNV_PHB_IODA1) {
815 pe->mve_number = 0;
816 goto out;
817 }
818
819 pe->mve_number = pe->pe_number;
820 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
821 if (rc != OPAL_SUCCESS) {
822 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
823 rc, pe->mve_number);
824 pe->mve_number = -1;
825 } else {
826 rc = opal_pci_set_mve_enable(phb->opal_id,
827 pe->mve_number, OPAL_ENABLE_MVE);
184cd4a3 828 if (rc) {
4773f76b 829 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
184cd4a3
BH
830 rc, pe->mve_number);
831 pe->mve_number = -1;
184cd4a3 832 }
4773f76b 833 }
184cd4a3 834
4773f76b 835out:
184cd4a3
BH
836 return 0;
837}
838
cad5cef6
GKH
839static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
840 struct pnv_ioda_pe *pe)
184cd4a3
BH
841{
842 struct pnv_ioda_pe *lpe;
843
7ebdf956 844 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
184cd4a3 845 if (lpe->dma_weight < pe->dma_weight) {
7ebdf956 846 list_add_tail(&pe->dma_link, &lpe->dma_link);
184cd4a3
BH
847 return;
848 }
849 }
7ebdf956 850 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
184cd4a3
BH
851}
852
853static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
854{
855 /* This is quite simplistic. The "base" weight of a device
856 * is 10. 0 means no DMA is to be accounted for it.
857 */
858
859 /* If it's a bridge, no DMA */
860 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
861 return 0;
862
863 /* Reduce the weight of slow USB controllers */
864 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
865 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
866 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
867 return 3;
868
869 /* Increase the weight of RAID (includes Obsidian) */
870 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
871 return 15;
872
873 /* Default */
874 return 10;
875}
876
781a868f
WY
877#ifdef CONFIG_PCI_IOV
878static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
879{
880 struct pci_dn *pdn = pci_get_pdn(dev);
881 int i;
882 struct resource *res, res2;
883 resource_size_t size;
884 u16 num_vfs;
885
886 if (!dev->is_physfn)
887 return -EINVAL;
888
889 /*
890 * "offset" is in VFs. The M64 windows are sized so that when they
891 * are segmented, each segment is the same size as the IOV BAR.
892 * Each segment is in a separate PE, and the high order bits of the
893 * address are the PE number. Therefore, each VF's BAR is in a
894 * separate PE, and changing the IOV BAR start address changes the
895 * range of PEs the VFs are in.
896 */
897 num_vfs = pdn->num_vfs;
898 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
899 res = &dev->resource[i + PCI_IOV_RESOURCES];
900 if (!res->flags || !res->parent)
901 continue;
902
903 if (!pnv_pci_is_mem_pref_64(res->flags))
904 continue;
905
906 /*
907 * The actual IOV BAR range is determined by the start address
908 * and the actual size for num_vfs VFs BAR. This check is to
909 * make sure that after shifting, the range will not overlap
910 * with another device.
911 */
912 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
913 res2.flags = res->flags;
914 res2.start = res->start + (size * offset);
915 res2.end = res2.start + (size * num_vfs) - 1;
916
917 if (res2.end > res->end) {
918 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
919 i, &res2, res, num_vfs, offset);
920 return -EBUSY;
921 }
922 }
923
924 /*
925 * After doing so, there would be a "hole" in the /proc/iomem when
926 * offset is a positive value. It looks like the device return some
927 * mmio back to the system, which actually no one could use it.
928 */
929 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
930 res = &dev->resource[i + PCI_IOV_RESOURCES];
931 if (!res->flags || !res->parent)
932 continue;
933
934 if (!pnv_pci_is_mem_pref_64(res->flags))
935 continue;
936
937 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
938 res2 = *res;
939 res->start += size * offset;
940
941 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
942 i, &res2, res, num_vfs, offset);
943 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
944 }
945 return 0;
946}
947#endif /* CONFIG_PCI_IOV */
948
fb446ad0 949#if 0
cad5cef6 950static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
184cd4a3
BH
951{
952 struct pci_controller *hose = pci_bus_to_host(dev->bus);
953 struct pnv_phb *phb = hose->private_data;
b72c1f65 954 struct pci_dn *pdn = pci_get_pdn(dev);
184cd4a3
BH
955 struct pnv_ioda_pe *pe;
956 int pe_num;
957
958 if (!pdn) {
959 pr_err("%s: Device tree node not associated properly\n",
960 pci_name(dev));
961 return NULL;
962 }
963 if (pdn->pe_number != IODA_INVALID_PE)
964 return NULL;
965
966 /* PE#0 has been pre-set */
967 if (dev->bus->number == 0)
968 pe_num = 0;
969 else
970 pe_num = pnv_ioda_alloc_pe(phb);
971 if (pe_num == IODA_INVALID_PE) {
972 pr_warning("%s: Not enough PE# available, disabling device\n",
973 pci_name(dev));
974 return NULL;
975 }
976
977 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
978 * pointer in the PE data structure, both should be destroyed at the
979 * same time. However, this needs to be looked at more closely again
980 * once we actually start removing things (Hotplug, SR-IOV, ...)
981 *
982 * At some point we want to remove the PDN completely anyways
983 */
984 pe = &phb->ioda.pe_array[pe_num];
985 pci_dev_get(dev);
986 pdn->pcidev = dev;
987 pdn->pe_number = pe_num;
988 pe->pdev = dev;
989 pe->pbus = NULL;
990 pe->tce32_seg = -1;
991 pe->mve_number = -1;
992 pe->rid = dev->bus->number << 8 | pdn->devfn;
993
994 pe_info(pe, "Associated device to PE\n");
995
996 if (pnv_ioda_configure_pe(phb, pe)) {
997 /* XXX What do we do here ? */
998 if (pe_num)
999 pnv_ioda_free_pe(phb, pe_num);
1000 pdn->pe_number = IODA_INVALID_PE;
1001 pe->pdev = NULL;
1002 pci_dev_put(dev);
1003 return NULL;
1004 }
1005
1006 /* Assign a DMA weight to the device */
1007 pe->dma_weight = pnv_ioda_dma_weight(dev);
1008 if (pe->dma_weight != 0) {
1009 phb->ioda.dma_weight += pe->dma_weight;
1010 phb->ioda.dma_pe_count++;
1011 }
1012
1013 /* Link the PE */
1014 pnv_ioda_link_pe_by_weight(phb, pe);
1015
1016 return pe;
1017}
fb446ad0 1018#endif /* Useful for SRIOV case */
184cd4a3
BH
1019
1020static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1021{
1022 struct pci_dev *dev;
1023
1024 list_for_each_entry(dev, &bus->devices, bus_list) {
b72c1f65 1025 struct pci_dn *pdn = pci_get_pdn(dev);
184cd4a3
BH
1026
1027 if (pdn == NULL) {
1028 pr_warn("%s: No device node associated with device !\n",
1029 pci_name(dev));
1030 continue;
1031 }
184cd4a3
BH
1032 pdn->pe_number = pe->pe_number;
1033 pe->dma_weight += pnv_ioda_dma_weight(dev);
fb446ad0 1034 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
184cd4a3
BH
1035 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1036 }
1037}
1038
fb446ad0
GS
1039/*
1040 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1041 * single PCI bus. Another one that contains the primary PCI bus and its
1042 * subordinate PCI devices and buses. The second type of PE is normally
1043 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1044 */
cad5cef6 1045static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
184cd4a3 1046{
fb446ad0 1047 struct pci_controller *hose = pci_bus_to_host(bus);
184cd4a3 1048 struct pnv_phb *phb = hose->private_data;
184cd4a3 1049 struct pnv_ioda_pe *pe;
262af557
GC
1050 int pe_num = IODA_INVALID_PE;
1051
1052 /* Check if PE is determined by M64 */
1053 if (phb->pick_m64_pe)
1054 pe_num = phb->pick_m64_pe(phb, bus, all);
1055
1056 /* The PE number isn't pinned by M64 */
1057 if (pe_num == IODA_INVALID_PE)
1058 pe_num = pnv_ioda_alloc_pe(phb);
184cd4a3 1059
184cd4a3 1060 if (pe_num == IODA_INVALID_PE) {
fb446ad0
GS
1061 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1062 __func__, pci_domain_nr(bus), bus->number);
184cd4a3
BH
1063 return;
1064 }
1065
1066 pe = &phb->ioda.pe_array[pe_num];
262af557 1067 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
184cd4a3
BH
1068 pe->pbus = bus;
1069 pe->pdev = NULL;
1070 pe->tce32_seg = -1;
1071 pe->mve_number = -1;
b918c62e 1072 pe->rid = bus->busn_res.start << 8;
184cd4a3
BH
1073 pe->dma_weight = 0;
1074
fb446ad0
GS
1075 if (all)
1076 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
1077 bus->busn_res.start, bus->busn_res.end, pe_num);
1078 else
1079 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
1080 bus->busn_res.start, pe_num);
184cd4a3
BH
1081
1082 if (pnv_ioda_configure_pe(phb, pe)) {
1083 /* XXX What do we do here ? */
1084 if (pe_num)
1085 pnv_ioda_free_pe(phb, pe_num);
1086 pe->pbus = NULL;
1087 return;
1088 }
1089
9e8d4a19
WY
1090 pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
1091 GFP_KERNEL, hose->node);
1092 pe->tce32_table->data = pe;
1093
184cd4a3
BH
1094 /* Associate it with all child devices */
1095 pnv_ioda_setup_same_PE(bus, pe);
1096
7ebdf956
GS
1097 /* Put PE to the list */
1098 list_add_tail(&pe->list, &phb->ioda.pe_list);
1099
184cd4a3
BH
1100 /* Account for one DMA PE if at least one DMA capable device exist
1101 * below the bridge
1102 */
1103 if (pe->dma_weight != 0) {
1104 phb->ioda.dma_weight += pe->dma_weight;
1105 phb->ioda.dma_pe_count++;
1106 }
1107
1108 /* Link the PE */
1109 pnv_ioda_link_pe_by_weight(phb, pe);
1110}
1111
cad5cef6 1112static void pnv_ioda_setup_PEs(struct pci_bus *bus)
184cd4a3
BH
1113{
1114 struct pci_dev *dev;
fb446ad0
GS
1115
1116 pnv_ioda_setup_bus_PE(bus, 0);
184cd4a3
BH
1117
1118 list_for_each_entry(dev, &bus->devices, bus_list) {
fb446ad0
GS
1119 if (dev->subordinate) {
1120 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
1121 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
1122 else
1123 pnv_ioda_setup_PEs(dev->subordinate);
1124 }
1125 }
1126}
1127
1128/*
1129 * Configure PEs so that the downstream PCI buses and devices
1130 * could have their associated PE#. Unfortunately, we didn't
1131 * figure out the way to identify the PLX bridge yet. So we
1132 * simply put the PCI bus and the subordinate behind the root
1133 * port to PE# here. The game rule here is expected to be changed
1134 * as soon as we can detected PLX bridge correctly.
1135 */
cad5cef6 1136static void pnv_pci_ioda_setup_PEs(void)
fb446ad0
GS
1137{
1138 struct pci_controller *hose, *tmp;
262af557 1139 struct pnv_phb *phb;
fb446ad0
GS
1140
1141 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
262af557
GC
1142 phb = hose->private_data;
1143
1144 /* M64 layout might affect PE allocation */
5ef73567
GS
1145 if (phb->reserve_m64_pe)
1146 phb->reserve_m64_pe(phb);
262af557 1147
fb446ad0 1148 pnv_ioda_setup_PEs(hose->bus);
184cd4a3
BH
1149 }
1150}
1151
a8b2f828 1152#ifdef CONFIG_PCI_IOV
781a868f
WY
1153static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
1154{
1155 struct pci_bus *bus;
1156 struct pci_controller *hose;
1157 struct pnv_phb *phb;
1158 struct pci_dn *pdn;
02639b0e 1159 int i, j;
781a868f
WY
1160
1161 bus = pdev->bus;
1162 hose = pci_bus_to_host(bus);
1163 phb = hose->private_data;
1164 pdn = pci_get_pdn(pdev);
1165
02639b0e
WY
1166 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1167 for (j = 0; j < M64_PER_IOV; j++) {
1168 if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
1169 continue;
1170 opal_pci_phb_mmio_enable(phb->opal_id,
1171 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
1172 clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
1173 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1174 }
781a868f
WY
1175
1176 return 0;
1177}
1178
02639b0e 1179static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
781a868f
WY
1180{
1181 struct pci_bus *bus;
1182 struct pci_controller *hose;
1183 struct pnv_phb *phb;
1184 struct pci_dn *pdn;
1185 unsigned int win;
1186 struct resource *res;
02639b0e 1187 int i, j;
781a868f 1188 int64_t rc;
02639b0e
WY
1189 int total_vfs;
1190 resource_size_t size, start;
1191 int pe_num;
1192 int vf_groups;
1193 int vf_per_group;
781a868f
WY
1194
1195 bus = pdev->bus;
1196 hose = pci_bus_to_host(bus);
1197 phb = hose->private_data;
1198 pdn = pci_get_pdn(pdev);
02639b0e 1199 total_vfs = pci_sriov_get_totalvfs(pdev);
781a868f
WY
1200
1201 /* Initialize the m64_wins to IODA_INVALID_M64 */
1202 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
02639b0e
WY
1203 for (j = 0; j < M64_PER_IOV; j++)
1204 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1205
1206 if (pdn->m64_per_iov == M64_PER_IOV) {
1207 vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
1208 vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
1209 roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1210 } else {
1211 vf_groups = 1;
1212 vf_per_group = 1;
1213 }
781a868f
WY
1214
1215 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1216 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1217 if (!res->flags || !res->parent)
1218 continue;
1219
1220 if (!pnv_pci_is_mem_pref_64(res->flags))
1221 continue;
1222
02639b0e
WY
1223 for (j = 0; j < vf_groups; j++) {
1224 do {
1225 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1226 phb->ioda.m64_bar_idx + 1, 0);
1227
1228 if (win >= phb->ioda.m64_bar_idx + 1)
1229 goto m64_failed;
1230 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1231
1232 pdn->m64_wins[i][j] = win;
1233
1234 if (pdn->m64_per_iov == M64_PER_IOV) {
1235 size = pci_iov_resource_size(pdev,
1236 PCI_IOV_RESOURCES + i);
1237 size = size * vf_per_group;
1238 start = res->start + size * j;
1239 } else {
1240 size = resource_size(res);
1241 start = res->start;
1242 }
1243
1244 /* Map the M64 here */
1245 if (pdn->m64_per_iov == M64_PER_IOV) {
1246 pe_num = pdn->offset + j;
1247 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1248 pe_num, OPAL_M64_WINDOW_TYPE,
1249 pdn->m64_wins[i][j], 0);
1250 }
1251
1252 rc = opal_pci_set_phb_mem_window(phb->opal_id,
1253 OPAL_M64_WINDOW_TYPE,
1254 pdn->m64_wins[i][j],
1255 start,
1256 0, /* unused */
1257 size);
781a868f 1258
781a868f 1259
02639b0e
WY
1260 if (rc != OPAL_SUCCESS) {
1261 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1262 win, rc);
1263 goto m64_failed;
1264 }
781a868f 1265
02639b0e
WY
1266 if (pdn->m64_per_iov == M64_PER_IOV)
1267 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1268 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
1269 else
1270 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1271 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
781a868f 1272
02639b0e
WY
1273 if (rc != OPAL_SUCCESS) {
1274 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1275 win, rc);
1276 goto m64_failed;
1277 }
781a868f
WY
1278 }
1279 }
1280 return 0;
1281
1282m64_failed:
1283 pnv_pci_vf_release_m64(pdev);
1284 return -EBUSY;
1285}
1286
1287static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1288{
1289 struct pci_bus *bus;
1290 struct pci_controller *hose;
1291 struct pnv_phb *phb;
1292 struct iommu_table *tbl;
1293 unsigned long addr;
1294 int64_t rc;
1295
1296 bus = dev->bus;
1297 hose = pci_bus_to_host(bus);
1298 phb = hose->private_data;
1299 tbl = pe->tce32_table;
1300 addr = tbl->it_base;
1301
1302 opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1303 pe->pe_number << 1, 1, __pa(addr),
1304 0, 0x1000);
1305
1306 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1307 pe->pe_number,
1308 (pe->pe_number << 1) + 1,
1309 pe->tce_bypass_base,
1310 0);
1311 if (rc)
1312 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1313
ac9a5889
AK
1314 if (tbl->it_group) {
1315 iommu_group_put(tbl->it_group);
1316 BUG_ON(tbl->it_group);
1317 }
781a868f
WY
1318 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1319 free_pages(addr, get_order(TCE32_TABLE_SIZE));
1320 pe->tce32_table = NULL;
1321}
1322
02639b0e 1323static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
781a868f
WY
1324{
1325 struct pci_bus *bus;
1326 struct pci_controller *hose;
1327 struct pnv_phb *phb;
1328 struct pnv_ioda_pe *pe, *pe_n;
1329 struct pci_dn *pdn;
02639b0e
WY
1330 u16 vf_index;
1331 int64_t rc;
781a868f
WY
1332
1333 bus = pdev->bus;
1334 hose = pci_bus_to_host(bus);
1335 phb = hose->private_data;
02639b0e 1336 pdn = pci_get_pdn(pdev);
781a868f
WY
1337
1338 if (!pdev->is_physfn)
1339 return;
1340
02639b0e
WY
1341 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1342 int vf_group;
1343 int vf_per_group;
1344 int vf_index1;
1345
1346 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1347
1348 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
1349 for (vf_index = vf_group * vf_per_group;
1350 vf_index < (vf_group + 1) * vf_per_group &&
1351 vf_index < num_vfs;
1352 vf_index++)
1353 for (vf_index1 = vf_group * vf_per_group;
1354 vf_index1 < (vf_group + 1) * vf_per_group &&
1355 vf_index1 < num_vfs;
1356 vf_index1++){
1357
1358 rc = opal_pci_set_peltv(phb->opal_id,
1359 pdn->offset + vf_index,
1360 pdn->offset + vf_index1,
1361 OPAL_REMOVE_PE_FROM_DOMAIN);
1362
1363 if (rc)
1364 dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
1365 __func__,
1366 pdn->offset + vf_index1, rc);
1367 }
1368 }
1369
781a868f
WY
1370 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1371 if (pe->parent_dev != pdev)
1372 continue;
1373
1374 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1375
1376 /* Remove from list */
1377 mutex_lock(&phb->ioda.pe_list_mutex);
1378 list_del(&pe->list);
1379 mutex_unlock(&phb->ioda.pe_list_mutex);
1380
1381 pnv_ioda_deconfigure_pe(phb, pe);
1382
1383 pnv_ioda_free_pe(phb, pe->pe_number);
1384 }
1385}
1386
1387void pnv_pci_sriov_disable(struct pci_dev *pdev)
1388{
1389 struct pci_bus *bus;
1390 struct pci_controller *hose;
1391 struct pnv_phb *phb;
1392 struct pci_dn *pdn;
1393 struct pci_sriov *iov;
1394 u16 num_vfs;
1395
1396 bus = pdev->bus;
1397 hose = pci_bus_to_host(bus);
1398 phb = hose->private_data;
1399 pdn = pci_get_pdn(pdev);
1400 iov = pdev->sriov;
1401 num_vfs = pdn->num_vfs;
1402
1403 /* Release VF PEs */
02639b0e 1404 pnv_ioda_release_vf_PE(pdev, num_vfs);
781a868f
WY
1405
1406 if (phb->type == PNV_PHB_IODA2) {
02639b0e
WY
1407 if (pdn->m64_per_iov == 1)
1408 pnv_pci_vf_resource_shift(pdev, -pdn->offset);
781a868f
WY
1409
1410 /* Release M64 windows */
1411 pnv_pci_vf_release_m64(pdev);
1412
1413 /* Release PE numbers */
1414 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1415 pdn->offset = 0;
1416 }
1417}
1418
1419static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1420 struct pnv_ioda_pe *pe);
1421static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1422{
1423 struct pci_bus *bus;
1424 struct pci_controller *hose;
1425 struct pnv_phb *phb;
1426 struct pnv_ioda_pe *pe;
1427 int pe_num;
1428 u16 vf_index;
1429 struct pci_dn *pdn;
02639b0e 1430 int64_t rc;
781a868f
WY
1431
1432 bus = pdev->bus;
1433 hose = pci_bus_to_host(bus);
1434 phb = hose->private_data;
1435 pdn = pci_get_pdn(pdev);
1436
1437 if (!pdev->is_physfn)
1438 return;
1439
1440 /* Reserve PE for each VF */
1441 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1442 pe_num = pdn->offset + vf_index;
1443
1444 pe = &phb->ioda.pe_array[pe_num];
1445 pe->pe_number = pe_num;
1446 pe->phb = phb;
1447 pe->flags = PNV_IODA_PE_VF;
1448 pe->pbus = NULL;
1449 pe->parent_dev = pdev;
1450 pe->tce32_seg = -1;
1451 pe->mve_number = -1;
1452 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1453 pci_iov_virtfn_devfn(pdev, vf_index);
1454
1455 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
1456 hose->global_number, pdev->bus->number,
1457 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1458 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1459
1460 if (pnv_ioda_configure_pe(phb, pe)) {
1461 /* XXX What do we do here ? */
1462 if (pe_num)
1463 pnv_ioda_free_pe(phb, pe_num);
1464 pe->pdev = NULL;
1465 continue;
1466 }
1467
1468 pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
1469 GFP_KERNEL, hose->node);
1470 pe->tce32_table->data = pe;
1471
1472 /* Put PE to the list */
1473 mutex_lock(&phb->ioda.pe_list_mutex);
1474 list_add_tail(&pe->list, &phb->ioda.pe_list);
1475 mutex_unlock(&phb->ioda.pe_list_mutex);
1476
1477 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1478 }
02639b0e
WY
1479
1480 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1481 int vf_group;
1482 int vf_per_group;
1483 int vf_index1;
1484
1485 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1486
1487 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
1488 for (vf_index = vf_group * vf_per_group;
1489 vf_index < (vf_group + 1) * vf_per_group &&
1490 vf_index < num_vfs;
1491 vf_index++) {
1492 for (vf_index1 = vf_group * vf_per_group;
1493 vf_index1 < (vf_group + 1) * vf_per_group &&
1494 vf_index1 < num_vfs;
1495 vf_index1++) {
1496
1497 rc = opal_pci_set_peltv(phb->opal_id,
1498 pdn->offset + vf_index,
1499 pdn->offset + vf_index1,
1500 OPAL_ADD_PE_TO_DOMAIN);
1501
1502 if (rc)
1503 dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
1504 __func__,
1505 pdn->offset + vf_index1, rc);
1506 }
1507 }
1508 }
1509 }
781a868f
WY
1510}
1511
1512int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1513{
1514 struct pci_bus *bus;
1515 struct pci_controller *hose;
1516 struct pnv_phb *phb;
1517 struct pci_dn *pdn;
1518 int ret;
1519
1520 bus = pdev->bus;
1521 hose = pci_bus_to_host(bus);
1522 phb = hose->private_data;
1523 pdn = pci_get_pdn(pdev);
1524
1525 if (phb->type == PNV_PHB_IODA2) {
1526 /* Calculate available PE for required VFs */
1527 mutex_lock(&phb->ioda.pe_alloc_mutex);
1528 pdn->offset = bitmap_find_next_zero_area(
1529 phb->ioda.pe_alloc, phb->ioda.total_pe,
1530 0, num_vfs, 0);
1531 if (pdn->offset >= phb->ioda.total_pe) {
1532 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1533 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1534 pdn->offset = 0;
1535 return -EBUSY;
1536 }
1537 bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1538 pdn->num_vfs = num_vfs;
1539 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1540
1541 /* Assign M64 window accordingly */
02639b0e 1542 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
781a868f
WY
1543 if (ret) {
1544 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1545 goto m64_failed;
1546 }
1547
1548 /*
1549 * When using one M64 BAR to map one IOV BAR, we need to shift
1550 * the IOV BAR according to the PE# allocated to the VFs.
1551 * Otherwise, the PE# for the VF will conflict with others.
1552 */
02639b0e
WY
1553 if (pdn->m64_per_iov == 1) {
1554 ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
1555 if (ret)
1556 goto m64_failed;
1557 }
781a868f
WY
1558 }
1559
1560 /* Setup VF PEs */
1561 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1562
1563 return 0;
1564
1565m64_failed:
1566 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1567 pdn->offset = 0;
1568
1569 return ret;
1570}
1571
a8b2f828
GS
1572int pcibios_sriov_disable(struct pci_dev *pdev)
1573{
781a868f
WY
1574 pnv_pci_sriov_disable(pdev);
1575
a8b2f828
GS
1576 /* Release PCI data */
1577 remove_dev_pci_data(pdev);
1578 return 0;
1579}
1580
1581int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1582{
1583 /* Allocate PCI data */
1584 add_dev_pci_data(pdev);
781a868f
WY
1585
1586 pnv_pci_sriov_enable(pdev, num_vfs);
a8b2f828
GS
1587 return 0;
1588}
1589#endif /* CONFIG_PCI_IOV */
1590
959c9bdd 1591static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
184cd4a3 1592{
b72c1f65 1593 struct pci_dn *pdn = pci_get_pdn(pdev);
959c9bdd 1594 struct pnv_ioda_pe *pe;
184cd4a3 1595
959c9bdd
GS
1596 /*
1597 * The function can be called while the PE#
1598 * hasn't been assigned. Do nothing for the
1599 * case.
1600 */
1601 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1602 return;
184cd4a3 1603
959c9bdd 1604 pe = &phb->ioda.pe_array[pdn->pe_number];
cd15b048 1605 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
4617082e
AK
1606 set_iommu_table_base(&pdev->dev, pe->tce32_table);
1607 /*
1608 * Note: iommu_add_device() will fail here as
1609 * for physical PE: the device is already added by now;
1610 * for virtual PE: sysfs entries are not ready yet and
1611 * tce_iommu_bus_notifier will add the device to a group later.
1612 */
184cd4a3
BH
1613}
1614
763d2d8d 1615static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
cd15b048 1616{
763d2d8d
DA
1617 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1618 struct pnv_phb *phb = hose->private_data;
cd15b048
BH
1619 struct pci_dn *pdn = pci_get_pdn(pdev);
1620 struct pnv_ioda_pe *pe;
1621 uint64_t top;
1622 bool bypass = false;
1623
1624 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1625 return -ENODEV;;
1626
1627 pe = &phb->ioda.pe_array[pdn->pe_number];
1628 if (pe->tce_bypass_enabled) {
1629 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1630 bypass = (dma_mask >= top);
1631 }
1632
1633 if (bypass) {
1634 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1635 set_dma_ops(&pdev->dev, &dma_direct_ops);
1636 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1637 } else {
1638 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1639 set_dma_ops(&pdev->dev, &dma_iommu_ops);
9e8d4a19 1640 set_iommu_table_base(&pdev->dev, pe->tce32_table);
cd15b048 1641 }
a32305bf 1642 *pdev->dev.dma_mask = dma_mask;
cd15b048
BH
1643 return 0;
1644}
1645
fe7e85c6
GS
1646static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
1647 struct pci_dev *pdev)
1648{
1649 struct pci_dn *pdn = pci_get_pdn(pdev);
1650 struct pnv_ioda_pe *pe;
1651 u64 end, mask;
1652
1653 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1654 return 0;
1655
1656 pe = &phb->ioda.pe_array[pdn->pe_number];
1657 if (!pe->tce_bypass_enabled)
1658 return __dma_get_required_mask(&pdev->dev);
1659
1660
1661 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1662 mask = 1ULL << (fls64(end) - 1);
1663 mask += mask - 1;
1664
1665 return mask;
1666}
1667
dff4a39e 1668static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
ea30e99e 1669 struct pci_bus *bus)
74251fe2
BH
1670{
1671 struct pci_dev *dev;
1672
1673 list_for_each_entry(dev, &bus->devices, bus_list) {
4617082e
AK
1674 set_iommu_table_base(&dev->dev, pe->tce32_table);
1675 iommu_add_device(&dev->dev);
dff4a39e 1676
74251fe2 1677 if (dev->subordinate)
ea30e99e 1678 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
74251fe2
BH
1679 }
1680}
1681
decbda25
AK
1682static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
1683 unsigned long index, unsigned long npages, bool rm)
4cce9550 1684{
decbda25 1685 struct pnv_ioda_pe *pe = tbl->data;
3ad26e5c
BH
1686 __be64 __iomem *invalidate = rm ?
1687 (__be64 __iomem *)pe->tce_inval_reg_phys :
1688 (__be64 __iomem *)tbl->it_index;
4cce9550 1689 unsigned long start, end, inc;
b0376c9b 1690 const unsigned shift = tbl->it_page_shift;
4cce9550 1691
decbda25
AK
1692 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1693 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1694 npages - 1);
4cce9550
GS
1695
1696 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
1697 if (tbl->it_busno) {
b0376c9b
AK
1698 start <<= shift;
1699 end <<= shift;
1700 inc = 128ull << shift;
4cce9550
GS
1701 start |= tbl->it_busno;
1702 end |= tbl->it_busno;
1703 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
1704 /* p7ioc-style invalidation, 2 TCEs per write */
1705 start |= (1ull << 63);
1706 end |= (1ull << 63);
1707 inc = 16;
1708 } else {
1709 /* Default (older HW) */
1710 inc = 128;
1711 }
1712
1713 end |= inc - 1; /* round up end to be different than start */
1714
1715 mb(); /* Ensure above stores are visible */
1716 while (start <= end) {
8e0a1611 1717 if (rm)
3ad26e5c 1718 __raw_rm_writeq(cpu_to_be64(start), invalidate);
8e0a1611 1719 else
3ad26e5c 1720 __raw_writeq(cpu_to_be64(start), invalidate);
4cce9550
GS
1721 start += inc;
1722 }
1723
1724 /*
1725 * The iommu layer will do another mb() for us on build()
1726 * and we don't care on free()
1727 */
1728}
1729
decbda25
AK
1730static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1731 long npages, unsigned long uaddr,
1732 enum dma_data_direction direction,
1733 struct dma_attrs *attrs)
1734{
1735 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1736 attrs);
1737
1738 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1739 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1740
1741 return ret;
1742}
1743
1744static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1745 long npages)
1746{
1747 pnv_tce_free(tbl, index, npages);
1748
1749 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1750 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1751}
1752
da004c36 1753static struct iommu_table_ops pnv_ioda1_iommu_ops = {
decbda25
AK
1754 .set = pnv_ioda1_tce_build,
1755 .clear = pnv_ioda1_tce_free,
da004c36
AK
1756 .get = pnv_tce_get,
1757};
1758
decbda25
AK
1759static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1760 unsigned long index, unsigned long npages, bool rm)
4cce9550 1761{
decbda25 1762 struct pnv_ioda_pe *pe = tbl->data;
4cce9550 1763 unsigned long start, end, inc;
3ad26e5c
BH
1764 __be64 __iomem *invalidate = rm ?
1765 (__be64 __iomem *)pe->tce_inval_reg_phys :
1766 (__be64 __iomem *)tbl->it_index;
b0376c9b 1767 const unsigned shift = tbl->it_page_shift;
4cce9550
GS
1768
1769 /* We'll invalidate DMA address in PE scope */
b0376c9b 1770 start = 0x2ull << 60;
4cce9550
GS
1771 start |= (pe->pe_number & 0xFF);
1772 end = start;
1773
1774 /* Figure out the start, end and step */
decbda25
AK
1775 start |= (index << shift);
1776 end |= ((index + npages - 1) << shift);
b0376c9b 1777 inc = (0x1ull << shift);
4cce9550
GS
1778 mb();
1779
1780 while (start <= end) {
8e0a1611 1781 if (rm)
3ad26e5c 1782 __raw_rm_writeq(cpu_to_be64(start), invalidate);
8e0a1611 1783 else
3ad26e5c 1784 __raw_writeq(cpu_to_be64(start), invalidate);
4cce9550
GS
1785 start += inc;
1786 }
1787}
1788
decbda25
AK
1789static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1790 long npages, unsigned long uaddr,
1791 enum dma_data_direction direction,
1792 struct dma_attrs *attrs)
4cce9550 1793{
decbda25
AK
1794 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1795 attrs);
4cce9550 1796
decbda25
AK
1797 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1798 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1799
1800 return ret;
1801}
1802
1803static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1804 long npages)
1805{
1806 pnv_tce_free(tbl, index, npages);
1807
1808 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1809 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
4cce9550
GS
1810}
1811
da004c36 1812static struct iommu_table_ops pnv_ioda2_iommu_ops = {
decbda25
AK
1813 .set = pnv_ioda2_tce_build,
1814 .clear = pnv_ioda2_tce_free,
da004c36
AK
1815 .get = pnv_tce_get,
1816};
1817
cad5cef6
GKH
1818static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1819 struct pnv_ioda_pe *pe, unsigned int base,
1820 unsigned int segs)
184cd4a3
BH
1821{
1822
1823 struct page *tce_mem = NULL;
1824 const __be64 *swinvp;
1825 struct iommu_table *tbl;
1826 unsigned int i;
1827 int64_t rc;
1828 void *addr;
1829
184cd4a3
BH
1830 /* XXX FIXME: Handle 64-bit only DMA devices */
1831 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1832 /* XXX FIXME: Allocate multi-level tables on PHB3 */
1833
1834 /* We shouldn't already have a 32-bit DMA associated */
1835 if (WARN_ON(pe->tce32_seg >= 0))
1836 return;
1837
c5773822
AK
1838 tbl = pe->tce32_table;
1839 iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
1840
184cd4a3
BH
1841 /* Grab a 32-bit TCE table */
1842 pe->tce32_seg = base;
1843 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1844 (base << 28), ((base + segs) << 28) - 1);
1845
1846 /* XXX Currently, we allocate one big contiguous table for the
1847 * TCEs. We only really need one chunk per 256M of TCE space
1848 * (ie per segment) but that's an optimization for later, it
1849 * requires some added smarts with our get/put_tce implementation
1850 */
1851 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1852 get_order(TCE32_TABLE_SIZE * segs));
1853 if (!tce_mem) {
1854 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1855 goto fail;
1856 }
1857 addr = page_address(tce_mem);
1858 memset(addr, 0, TCE32_TABLE_SIZE * segs);
1859
1860 /* Configure HW */
1861 for (i = 0; i < segs; i++) {
1862 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1863 pe->pe_number,
1864 base + i, 1,
1865 __pa(addr) + TCE32_TABLE_SIZE * i,
1866 TCE32_TABLE_SIZE, 0x1000);
1867 if (rc) {
1868 pe_err(pe, " Failed to configure 32-bit TCE table,"
1869 " err %ld\n", rc);
1870 goto fail;
1871 }
1872 }
1873
1874 /* Setup linux iommu table */
184cd4a3 1875 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
8fa5d454 1876 base << 28, IOMMU_PAGE_SHIFT_4K);
184cd4a3
BH
1877
1878 /* OPAL variant of P7IOC SW invalidated TCEs */
1879 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
1880 if (swinvp) {
1881 /* We need a couple more fields -- an address and a data
1882 * to or. Since the bus is only printed out on table free
1883 * errors, and on the first pass the data will be a relative
1884 * bus number, print that out instead.
1885 */
8e0a1611
AK
1886 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
1887 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
1888 8);
65fd766b
GS
1889 tbl->it_type |= (TCE_PCI_SWINV_CREATE |
1890 TCE_PCI_SWINV_FREE |
1891 TCE_PCI_SWINV_PAIR);
184cd4a3 1892 }
da004c36 1893 tbl->it_ops = &pnv_ioda1_iommu_ops;
184cd4a3
BH
1894 iommu_init_table(tbl, phb->hose->node);
1895
781a868f 1896 if (pe->flags & PNV_IODA_PE_DEV) {
4617082e
AK
1897 /*
1898 * Setting table base here only for carrying iommu_group
1899 * further down to let iommu_add_device() do the job.
1900 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
1901 */
1902 set_iommu_table_base(&pe->pdev->dev, tbl);
1903 iommu_add_device(&pe->pdev->dev);
c5773822 1904 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
ea30e99e 1905 pnv_ioda_setup_bus_dma(pe, pe->pbus);
74251fe2 1906
184cd4a3
BH
1907 return;
1908 fail:
1909 /* XXX Failure: Try to fallback to 64-bit only ? */
1910 if (pe->tce32_seg >= 0)
1911 pe->tce32_seg = -1;
1912 if (tce_mem)
1913 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
1914}
1915
cd15b048
BH
1916static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
1917{
9e8d4a19 1918 struct pnv_ioda_pe *pe = tbl->data;
cd15b048
BH
1919 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1920 int64_t rc;
1921
1922 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
1923 if (enable) {
1924 phys_addr_t top = memblock_end_of_DRAM();
1925
1926 top = roundup_pow_of_two(top);
1927 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1928 pe->pe_number,
1929 window_id,
1930 pe->tce_bypass_base,
1931 top);
1932 } else {
1933 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1934 pe->pe_number,
1935 window_id,
1936 pe->tce_bypass_base,
1937 0);
cd15b048
BH
1938 }
1939 if (rc)
1940 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
1941 else
1942 pe->tce_bypass_enabled = enable;
1943}
1944
1945static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
1946 struct pnv_ioda_pe *pe)
1947{
1948 /* TVE #1 is selected by PCI address bit 59 */
1949 pe->tce_bypass_base = 1ull << 59;
1950
1951 /* Install set_bypass callback for VFIO */
9e8d4a19 1952 pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
cd15b048
BH
1953
1954 /* Enable bypass by default */
9e8d4a19 1955 pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
cd15b048
BH
1956}
1957
373f5657
GS
1958static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1959 struct pnv_ioda_pe *pe)
1960{
1961 struct page *tce_mem = NULL;
1962 void *addr;
1963 const __be64 *swinvp;
1964 struct iommu_table *tbl;
1965 unsigned int tce_table_size, end;
1966 int64_t rc;
1967
1968 /* We shouldn't already have a 32-bit DMA associated */
1969 if (WARN_ON(pe->tce32_seg >= 0))
1970 return;
1971
c5773822
AK
1972 tbl = pe->tce32_table;
1973 iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
1974
373f5657
GS
1975 /* The PE will reserve all possible 32-bits space */
1976 pe->tce32_seg = 0;
1977 end = (1 << ilog2(phb->ioda.m32_pci_base));
1978 tce_table_size = (end / 0x1000) * 8;
1979 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
1980 end);
1981
1982 /* Allocate TCE table */
1983 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1984 get_order(tce_table_size));
1985 if (!tce_mem) {
1986 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
1987 goto fail;
1988 }
1989 addr = page_address(tce_mem);
1990 memset(addr, 0, tce_table_size);
1991
1992 /*
1993 * Map TCE table through TVT. The TVE index is the PE number
1994 * shifted by 1 bit for 32-bits DMA space.
1995 */
1996 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1997 pe->pe_number << 1, 1, __pa(addr),
1998 tce_table_size, 0x1000);
1999 if (rc) {
2000 pe_err(pe, "Failed to configure 32-bit TCE table,"
2001 " err %ld\n", rc);
2002 goto fail;
2003 }
2004
2005 /* Setup linux iommu table */
8fa5d454
AK
2006 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
2007 IOMMU_PAGE_SHIFT_4K);
373f5657
GS
2008
2009 /* OPAL variant of PHB3 invalidated TCEs */
2010 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
2011 if (swinvp) {
2012 /* We need a couple more fields -- an address and a data
2013 * to or. Since the bus is only printed out on table free
2014 * errors, and on the first pass the data will be a relative
2015 * bus number, print that out instead.
2016 */
8e0a1611
AK
2017 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
2018 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
2019 8);
65fd766b 2020 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
373f5657 2021 }
da004c36 2022 tbl->it_ops = &pnv_ioda2_iommu_ops;
373f5657
GS
2023 iommu_init_table(tbl, phb->hose->node);
2024
781a868f 2025 if (pe->flags & PNV_IODA_PE_DEV) {
4617082e
AK
2026 /*
2027 * Setting table base here only for carrying iommu_group
2028 * further down to let iommu_add_device() do the job.
2029 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2030 */
2031 set_iommu_table_base(&pe->pdev->dev, tbl);
2032 iommu_add_device(&pe->pdev->dev);
c5773822 2033 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
ea30e99e 2034 pnv_ioda_setup_bus_dma(pe, pe->pbus);
74251fe2 2035
cd15b048 2036 /* Also create a bypass window */
4e287840
TLSC
2037 if (!pnv_iommu_bypass_disabled)
2038 pnv_pci_ioda2_setup_bypass_pe(phb, pe);
2039
373f5657
GS
2040 return;
2041fail:
2042 if (pe->tce32_seg >= 0)
2043 pe->tce32_seg = -1;
2044 if (tce_mem)
2045 __free_pages(tce_mem, get_order(tce_table_size));
2046}
2047
cad5cef6 2048static void pnv_ioda_setup_dma(struct pnv_phb *phb)
184cd4a3
BH
2049{
2050 struct pci_controller *hose = phb->hose;
2051 unsigned int residual, remaining, segs, tw, base;
2052 struct pnv_ioda_pe *pe;
2053
2054 /* If we have more PE# than segments available, hand out one
2055 * per PE until we run out and let the rest fail. If not,
2056 * then we assign at least one segment per PE, plus more based
2057 * on the amount of devices under that PE
2058 */
2059 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
2060 residual = 0;
2061 else
2062 residual = phb->ioda.tce32_count -
2063 phb->ioda.dma_pe_count;
2064
2065 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
2066 hose->global_number, phb->ioda.tce32_count);
2067 pr_info("PCI: %d PE# for a total weight of %d\n",
2068 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
2069
2070 /* Walk our PE list and configure their DMA segments, hand them
2071 * out one base segment plus any residual segments based on
2072 * weight
2073 */
2074 remaining = phb->ioda.tce32_count;
2075 tw = phb->ioda.dma_weight;
2076 base = 0;
7ebdf956 2077 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
184cd4a3
BH
2078 if (!pe->dma_weight)
2079 continue;
2080 if (!remaining) {
2081 pe_warn(pe, "No DMA32 resources available\n");
2082 continue;
2083 }
2084 segs = 1;
2085 if (residual) {
2086 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
2087 if (segs > remaining)
2088 segs = remaining;
2089 }
373f5657
GS
2090
2091 /*
2092 * For IODA2 compliant PHB3, we needn't care about the weight.
2093 * The all available 32-bits DMA space will be assigned to
2094 * the specific PE.
2095 */
2096 if (phb->type == PNV_PHB_IODA1) {
2097 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
2098 pe->dma_weight, segs);
2099 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
2100 } else {
2101 pe_info(pe, "Assign DMA32 space\n");
2102 segs = 0;
2103 pnv_pci_ioda2_setup_dma_pe(phb, pe);
2104 }
2105
184cd4a3
BH
2106 remaining -= segs;
2107 base += segs;
2108 }
2109}
2110
2111#ifdef CONFIG_PCI_MSI
137436c9
GS
2112static void pnv_ioda2_msi_eoi(struct irq_data *d)
2113{
2114 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2115 struct irq_chip *chip = irq_data_get_irq_chip(d);
2116 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2117 ioda.irq_chip);
2118 int64_t rc;
2119
2120 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
2121 WARN_ON_ONCE(rc);
2122
2123 icp_native_eoi(d);
2124}
2125
fd9a1c26
IM
2126
2127static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2128{
2129 struct irq_data *idata;
2130 struct irq_chip *ichip;
2131
2132 if (phb->type != PNV_PHB_IODA2)
2133 return;
2134
2135 if (!phb->ioda.irq_chip_init) {
2136 /*
2137 * First time we setup an MSI IRQ, we need to setup the
2138 * corresponding IRQ chip to route correctly.
2139 */
2140 idata = irq_get_irq_data(virq);
2141 ichip = irq_data_get_irq_chip(idata);
2142 phb->ioda.irq_chip_init = 1;
2143 phb->ioda.irq_chip = *ichip;
2144 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2145 }
2146 irq_set_chip(virq, &phb->ioda.irq_chip);
2147}
2148
80c49c7e
IM
2149#ifdef CONFIG_CXL_BASE
2150
6f963ec2 2151struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
80c49c7e
IM
2152{
2153 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2154
6f963ec2 2155 return of_node_get(hose->dn);
80c49c7e 2156}
6f963ec2 2157EXPORT_SYMBOL(pnv_pci_get_phb_node);
80c49c7e 2158
1212aa1c 2159int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
80c49c7e
IM
2160{
2161 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2162 struct pnv_phb *phb = hose->private_data;
2163 struct pnv_ioda_pe *pe;
2164 int rc;
2165
2166 pe = pnv_ioda_get_pe(dev);
2167 if (!pe)
2168 return -ENODEV;
2169
2170 pe_info(pe, "Switching PHB to CXL\n");
2171
1212aa1c 2172 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
80c49c7e
IM
2173 if (rc)
2174 dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
2175
2176 return rc;
2177}
1212aa1c 2178EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
80c49c7e
IM
2179
2180/* Find PHB for cxl dev and allocate MSI hwirqs?
2181 * Returns the absolute hardware IRQ number
2182 */
2183int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
2184{
2185 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2186 struct pnv_phb *phb = hose->private_data;
2187 int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
2188
2189 if (hwirq < 0) {
2190 dev_warn(&dev->dev, "Failed to find a free MSI\n");
2191 return -ENOSPC;
2192 }
2193
2194 return phb->msi_base + hwirq;
2195}
2196EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
2197
2198void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
2199{
2200 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2201 struct pnv_phb *phb = hose->private_data;
2202
2203 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
2204}
2205EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
2206
2207void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
2208 struct pci_dev *dev)
2209{
2210 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2211 struct pnv_phb *phb = hose->private_data;
2212 int i, hwirq;
2213
2214 for (i = 1; i < CXL_IRQ_RANGES; i++) {
2215 if (!irqs->range[i])
2216 continue;
2217 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
2218 i, irqs->offset[i],
2219 irqs->range[i]);
2220 hwirq = irqs->offset[i] - phb->msi_base;
2221 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
2222 irqs->range[i]);
2223 }
2224}
2225EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
2226
2227int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
2228 struct pci_dev *dev, int num)
2229{
2230 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2231 struct pnv_phb *phb = hose->private_data;
2232 int i, hwirq, try;
2233
2234 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
2235
2236 /* 0 is reserved for the multiplexed PSL DSI interrupt */
2237 for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
2238 try = num;
2239 while (try) {
2240 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
2241 if (hwirq >= 0)
2242 break;
2243 try /= 2;
2244 }
2245 if (!try)
2246 goto fail;
2247
2248 irqs->offset[i] = phb->msi_base + hwirq;
2249 irqs->range[i] = try;
2250 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
2251 i, irqs->offset[i], irqs->range[i]);
2252 num -= try;
2253 }
2254 if (num)
2255 goto fail;
2256
2257 return 0;
2258fail:
2259 pnv_cxl_release_hwirq_ranges(irqs, dev);
2260 return -ENOSPC;
2261}
2262EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
2263
2264int pnv_cxl_get_irq_count(struct pci_dev *dev)
2265{
2266 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2267 struct pnv_phb *phb = hose->private_data;
2268
2269 return phb->msi_bmp.irq_count;
2270}
2271EXPORT_SYMBOL(pnv_cxl_get_irq_count);
2272
2273int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
2274 unsigned int virq)
2275{
2276 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2277 struct pnv_phb *phb = hose->private_data;
2278 unsigned int xive_num = hwirq - phb->msi_base;
2279 struct pnv_ioda_pe *pe;
2280 int rc;
2281
2282 if (!(pe = pnv_ioda_get_pe(dev)))
2283 return -ENODEV;
2284
2285 /* Assign XIVE to PE */
2286 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2287 if (rc) {
2288 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
2289 "hwirq 0x%x XIVE 0x%x PE\n",
2290 pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
2291 return -EIO;
2292 }
2293 set_msi_irq_chip(phb, virq);
2294
2295 return 0;
2296}
2297EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
2298#endif
2299
184cd4a3 2300static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
137436c9
GS
2301 unsigned int hwirq, unsigned int virq,
2302 unsigned int is_64, struct msi_msg *msg)
184cd4a3
BH
2303{
2304 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2305 unsigned int xive_num = hwirq - phb->msi_base;
3a1a4661 2306 __be32 data;
184cd4a3
BH
2307 int rc;
2308
2309 /* No PE assigned ? bail out ... no MSI for you ! */
2310 if (pe == NULL)
2311 return -ENXIO;
2312
2313 /* Check if we have an MVE */
2314 if (pe->mve_number < 0)
2315 return -ENXIO;
2316
b72c1f65 2317 /* Force 32-bit MSI on some broken devices */
36074381 2318 if (dev->no_64bit_msi)
b72c1f65
BH
2319 is_64 = 0;
2320
184cd4a3
BH
2321 /* Assign XIVE to PE */
2322 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2323 if (rc) {
2324 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2325 pci_name(dev), rc, xive_num);
2326 return -EIO;
2327 }
2328
2329 if (is_64) {
3a1a4661
BH
2330 __be64 addr64;
2331
184cd4a3
BH
2332 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2333 &addr64, &data);
2334 if (rc) {
2335 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2336 pci_name(dev), rc);
2337 return -EIO;
2338 }
3a1a4661
BH
2339 msg->address_hi = be64_to_cpu(addr64) >> 32;
2340 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
184cd4a3 2341 } else {
3a1a4661
BH
2342 __be32 addr32;
2343
184cd4a3
BH
2344 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2345 &addr32, &data);
2346 if (rc) {
2347 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2348 pci_name(dev), rc);
2349 return -EIO;
2350 }
2351 msg->address_hi = 0;
3a1a4661 2352 msg->address_lo = be32_to_cpu(addr32);
184cd4a3 2353 }
3a1a4661 2354 msg->data = be32_to_cpu(data);
184cd4a3 2355
fd9a1c26 2356 set_msi_irq_chip(phb, virq);
137436c9 2357
184cd4a3
BH
2358 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2359 " address=%x_%08x data=%x PE# %d\n",
2360 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2361 msg->address_hi, msg->address_lo, data, pe->pe_number);
2362
2363 return 0;
2364}
2365
2366static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2367{
fb1b55d6 2368 unsigned int count;
184cd4a3
BH
2369 const __be32 *prop = of_get_property(phb->hose->dn,
2370 "ibm,opal-msi-ranges", NULL);
2371 if (!prop) {
2372 /* BML Fallback */
2373 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2374 }
2375 if (!prop)
2376 return;
2377
2378 phb->msi_base = be32_to_cpup(prop);
fb1b55d6
GS
2379 count = be32_to_cpup(prop + 1);
2380 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
184cd4a3
BH
2381 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2382 phb->hose->global_number);
2383 return;
2384 }
fb1b55d6 2385
184cd4a3
BH
2386 phb->msi_setup = pnv_pci_ioda_msi_setup;
2387 phb->msi32_support = 1;
2388 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
fb1b55d6 2389 count, phb->msi_base);
184cd4a3
BH
2390}
2391#else
2392static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2393#endif /* CONFIG_PCI_MSI */
2394
6e628c7d
WY
2395#ifdef CONFIG_PCI_IOV
2396static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2397{
2398 struct pci_controller *hose;
2399 struct pnv_phb *phb;
2400 struct resource *res;
2401 int i;
2402 resource_size_t size;
2403 struct pci_dn *pdn;
5b88ec22 2404 int mul, total_vfs;
6e628c7d
WY
2405
2406 if (!pdev->is_physfn || pdev->is_added)
2407 return;
2408
2409 hose = pci_bus_to_host(pdev->bus);
2410 phb = hose->private_data;
2411
2412 pdn = pci_get_pdn(pdev);
2413 pdn->vfs_expanded = 0;
2414
5b88ec22
WY
2415 total_vfs = pci_sriov_get_totalvfs(pdev);
2416 pdn->m64_per_iov = 1;
2417 mul = phb->ioda.total_pe;
2418
2419 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2420 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2421 if (!res->flags || res->parent)
2422 continue;
2423 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2424 dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
2425 i, res);
2426 continue;
2427 }
2428
2429 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2430
2431 /* bigger than 64M */
2432 if (size > (1 << 26)) {
2433 dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
2434 i, res);
2435 pdn->m64_per_iov = M64_PER_IOV;
2436 mul = roundup_pow_of_two(total_vfs);
2437 break;
2438 }
2439 }
2440
6e628c7d
WY
2441 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2442 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2443 if (!res->flags || res->parent)
2444 continue;
2445 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2446 dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
2447 i, res);
2448 continue;
2449 }
2450
2451 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2452 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
5b88ec22 2453 res->end = res->start + size * mul - 1;
6e628c7d
WY
2454 dev_dbg(&pdev->dev, " %pR\n", res);
2455 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
5b88ec22 2456 i, res, mul);
6e628c7d 2457 }
5b88ec22 2458 pdn->vfs_expanded = mul;
6e628c7d
WY
2459}
2460#endif /* CONFIG_PCI_IOV */
2461
11685bec
GS
2462/*
2463 * This function is supposed to be called on basis of PE from top
2464 * to bottom style. So the the I/O or MMIO segment assigned to
2465 * parent PE could be overrided by its child PEs if necessary.
2466 */
cad5cef6
GKH
2467static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
2468 struct pnv_ioda_pe *pe)
11685bec
GS
2469{
2470 struct pnv_phb *phb = hose->private_data;
2471 struct pci_bus_region region;
2472 struct resource *res;
2473 int i, index;
2474 int rc;
2475
2476 /*
2477 * NOTE: We only care PCI bus based PE for now. For PCI
2478 * device based PE, for example SRIOV sensitive VF should
2479 * be figured out later.
2480 */
2481 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2482
2483 pci_bus_for_each_resource(pe->pbus, res, i) {
2484 if (!res || !res->flags ||
2485 res->start > res->end)
2486 continue;
2487
2488 if (res->flags & IORESOURCE_IO) {
2489 region.start = res->start - phb->ioda.io_pci_base;
2490 region.end = res->end - phb->ioda.io_pci_base;
2491 index = region.start / phb->ioda.io_segsize;
2492
2493 while (index < phb->ioda.total_pe &&
2494 region.start <= region.end) {
2495 phb->ioda.io_segmap[index] = pe->pe_number;
2496 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2497 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2498 if (rc != OPAL_SUCCESS) {
2499 pr_err("%s: OPAL error %d when mapping IO "
2500 "segment #%d to PE#%d\n",
2501 __func__, rc, index, pe->pe_number);
2502 break;
2503 }
2504
2505 region.start += phb->ioda.io_segsize;
2506 index++;
2507 }
027fa02f
GS
2508 } else if ((res->flags & IORESOURCE_MEM) &&
2509 !pnv_pci_is_mem_pref_64(res->flags)) {
11685bec 2510 region.start = res->start -
3fd47f06 2511 hose->mem_offset[0] -
11685bec
GS
2512 phb->ioda.m32_pci_base;
2513 region.end = res->end -
3fd47f06 2514 hose->mem_offset[0] -
11685bec
GS
2515 phb->ioda.m32_pci_base;
2516 index = region.start / phb->ioda.m32_segsize;
2517
2518 while (index < phb->ioda.total_pe &&
2519 region.start <= region.end) {
2520 phb->ioda.m32_segmap[index] = pe->pe_number;
2521 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2522 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2523 if (rc != OPAL_SUCCESS) {
2524 pr_err("%s: OPAL error %d when mapping M32 "
2525 "segment#%d to PE#%d",
2526 __func__, rc, index, pe->pe_number);
2527 break;
2528 }
2529
2530 region.start += phb->ioda.m32_segsize;
2531 index++;
2532 }
2533 }
2534 }
2535}
2536
cad5cef6 2537static void pnv_pci_ioda_setup_seg(void)
11685bec
GS
2538{
2539 struct pci_controller *tmp, *hose;
2540 struct pnv_phb *phb;
2541 struct pnv_ioda_pe *pe;
2542
2543 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2544 phb = hose->private_data;
2545 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2546 pnv_ioda_setup_pe_seg(hose, pe);
2547 }
2548 }
2549}
2550
cad5cef6 2551static void pnv_pci_ioda_setup_DMA(void)
13395c48
GS
2552{
2553 struct pci_controller *hose, *tmp;
db1266c8 2554 struct pnv_phb *phb;
13395c48
GS
2555
2556 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2557 pnv_ioda_setup_dma(hose->private_data);
db1266c8
GS
2558
2559 /* Mark the PHB initialization done */
2560 phb = hose->private_data;
2561 phb->initialized = 1;
13395c48
GS
2562 }
2563}
2564
37c367f2
GS
2565static void pnv_pci_ioda_create_dbgfs(void)
2566{
2567#ifdef CONFIG_DEBUG_FS
2568 struct pci_controller *hose, *tmp;
2569 struct pnv_phb *phb;
2570 char name[16];
2571
2572 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2573 phb = hose->private_data;
2574
2575 sprintf(name, "PCI%04x", hose->global_number);
2576 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
2577 if (!phb->dbgfs)
2578 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
2579 __func__, hose->global_number);
2580 }
2581#endif /* CONFIG_DEBUG_FS */
2582}
2583
cad5cef6 2584static void pnv_pci_ioda_fixup(void)
fb446ad0
GS
2585{
2586 pnv_pci_ioda_setup_PEs();
11685bec 2587 pnv_pci_ioda_setup_seg();
13395c48 2588 pnv_pci_ioda_setup_DMA();
e9cc17d4 2589
37c367f2
GS
2590 pnv_pci_ioda_create_dbgfs();
2591
e9cc17d4 2592#ifdef CONFIG_EEH
e9cc17d4 2593 eeh_init();
dadcd6d6 2594 eeh_addr_cache_build();
e9cc17d4 2595#endif
fb446ad0
GS
2596}
2597
271fd03a
GS
2598/*
2599 * Returns the alignment for I/O or memory windows for P2P
2600 * bridges. That actually depends on how PEs are segmented.
2601 * For now, we return I/O or M32 segment size for PE sensitive
2602 * P2P bridges. Otherwise, the default values (4KiB for I/O,
2603 * 1MiB for memory) will be returned.
2604 *
2605 * The current PCI bus might be put into one PE, which was
2606 * create against the parent PCI bridge. For that case, we
2607 * needn't enlarge the alignment so that we can save some
2608 * resources.
2609 */
2610static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
2611 unsigned long type)
2612{
2613 struct pci_dev *bridge;
2614 struct pci_controller *hose = pci_bus_to_host(bus);
2615 struct pnv_phb *phb = hose->private_data;
2616 int num_pci_bridges = 0;
2617
2618 bridge = bus->self;
2619 while (bridge) {
2620 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
2621 num_pci_bridges++;
2622 if (num_pci_bridges >= 2)
2623 return 1;
2624 }
2625
2626 bridge = bridge->bus->self;
2627 }
2628
262af557
GC
2629 /* We fail back to M32 if M64 isn't supported */
2630 if (phb->ioda.m64_segsize &&
2631 pnv_pci_is_mem_pref_64(type))
2632 return phb->ioda.m64_segsize;
271fd03a
GS
2633 if (type & IORESOURCE_MEM)
2634 return phb->ioda.m32_segsize;
2635
2636 return phb->ioda.io_segsize;
2637}
2638
5350ab3f
WY
2639#ifdef CONFIG_PCI_IOV
2640static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
2641 int resno)
2642{
2643 struct pci_dn *pdn = pci_get_pdn(pdev);
2644 resource_size_t align, iov_align;
2645
2646 iov_align = resource_size(&pdev->resource[resno]);
2647 if (iov_align)
2648 return iov_align;
2649
2650 align = pci_iov_resource_size(pdev, resno);
2651 if (pdn->vfs_expanded)
2652 return pdn->vfs_expanded * align;
2653
2654 return align;
2655}
2656#endif /* CONFIG_PCI_IOV */
2657
184cd4a3
BH
2658/* Prevent enabling devices for which we couldn't properly
2659 * assign a PE
2660 */
c88c2a18 2661static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
184cd4a3 2662{
db1266c8
GS
2663 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2664 struct pnv_phb *phb = hose->private_data;
2665 struct pci_dn *pdn;
184cd4a3 2666
db1266c8
GS
2667 /* The function is probably called while the PEs have
2668 * not be created yet. For example, resource reassignment
2669 * during PCI probe period. We just skip the check if
2670 * PEs isn't ready.
2671 */
2672 if (!phb->initialized)
c88c2a18 2673 return true;
db1266c8 2674
b72c1f65 2675 pdn = pci_get_pdn(dev);
184cd4a3 2676 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
c88c2a18 2677 return false;
db1266c8 2678
c88c2a18 2679 return true;
184cd4a3
BH
2680}
2681
2682static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
2683 u32 devfn)
2684{
2685 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
2686}
2687
7a8e6bbf 2688static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
73ed148a 2689{
7a8e6bbf
MN
2690 struct pnv_phb *phb = hose->private_data;
2691
d1a85eee 2692 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
73ed148a
BH
2693 OPAL_ASSERT_RESET);
2694}
2695
92ae0353
DA
2696static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
2697 .dma_dev_setup = pnv_pci_dma_dev_setup,
2698#ifdef CONFIG_PCI_MSI
2699 .setup_msi_irqs = pnv_setup_msi_irqs,
2700 .teardown_msi_irqs = pnv_teardown_msi_irqs,
2701#endif
2702 .enable_device_hook = pnv_pci_enable_device_hook,
2703 .window_alignment = pnv_pci_window_alignment,
2704 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
763d2d8d 2705 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
7a8e6bbf 2706 .shutdown = pnv_pci_ioda_shutdown,
92ae0353
DA
2707};
2708
e51df2c1
AB
2709static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2710 u64 hub_id, int ioda_type)
184cd4a3
BH
2711{
2712 struct pci_controller *hose;
184cd4a3 2713 struct pnv_phb *phb;
8184616f 2714 unsigned long size, m32map_off, pemap_off, iomap_off = 0;
c681b93c 2715 const __be64 *prop64;
3a1a4661 2716 const __be32 *prop32;
f1b7cc3e 2717 int len;
184cd4a3
BH
2718 u64 phb_id;
2719 void *aux;
2720 long rc;
2721
58d714ec 2722 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
184cd4a3
BH
2723
2724 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
2725 if (!prop64) {
2726 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
2727 return;
2728 }
2729 phb_id = be64_to_cpup(prop64);
2730 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
2731
e39f223f 2732 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
58d714ec
GS
2733
2734 /* Allocate PCI controller */
58d714ec
GS
2735 phb->hose = hose = pcibios_alloc_controller(np);
2736 if (!phb->hose) {
2737 pr_err(" Can't allocate PCI controller for %s\n",
184cd4a3 2738 np->full_name);
e39f223f 2739 memblock_free(__pa(phb), sizeof(struct pnv_phb));
184cd4a3
BH
2740 return;
2741 }
2742
2743 spin_lock_init(&phb->lock);
f1b7cc3e
GS
2744 prop32 = of_get_property(np, "bus-range", &len);
2745 if (prop32 && len == 8) {
3a1a4661
BH
2746 hose->first_busno = be32_to_cpu(prop32[0]);
2747 hose->last_busno = be32_to_cpu(prop32[1]);
f1b7cc3e
GS
2748 } else {
2749 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
2750 hose->first_busno = 0;
2751 hose->last_busno = 0xff;
2752 }
184cd4a3 2753 hose->private_data = phb;
e9cc17d4 2754 phb->hub_id = hub_id;
184cd4a3 2755 phb->opal_id = phb_id;
aa0c033f 2756 phb->type = ioda_type;
781a868f 2757 mutex_init(&phb->ioda.pe_alloc_mutex);
184cd4a3 2758
cee72d5b
BH
2759 /* Detect specific models for error handling */
2760 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
2761 phb->model = PNV_PHB_MODEL_P7IOC;
f3d40c25 2762 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
aa0c033f 2763 phb->model = PNV_PHB_MODEL_PHB3;
cee72d5b
BH
2764 else
2765 phb->model = PNV_PHB_MODEL_UNKNOWN;
2766
aa0c033f 2767 /* Parse 32-bit and IO ranges (if any) */
2f1ec02e 2768 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
184cd4a3 2769
aa0c033f 2770 /* Get registers */
184cd4a3
BH
2771 phb->regs = of_iomap(np, 0);
2772 if (phb->regs == NULL)
2773 pr_err(" Failed to map registers !\n");
2774
184cd4a3 2775 /* Initialize more IODA stuff */
36954dc7 2776 phb->ioda.total_pe = 1;
aa0c033f 2777 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
36954dc7 2778 if (prop32)
3a1a4661 2779 phb->ioda.total_pe = be32_to_cpup(prop32);
36954dc7
GS
2780 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
2781 if (prop32)
2782 phb->ioda.reserved_pe = be32_to_cpup(prop32);
262af557
GC
2783
2784 /* Parse 64-bit MMIO range */
2785 pnv_ioda_parse_m64_window(phb);
2786
184cd4a3 2787 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
aa0c033f 2788 /* FW Has already off top 64k of M32 space (MSI space) */
184cd4a3
BH
2789 phb->ioda.m32_size += 0x10000;
2790
2791 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
3fd47f06 2792 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
184cd4a3
BH
2793 phb->ioda.io_size = hose->pci_io_size;
2794 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
2795 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
2796
c35d2a8c 2797 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
184cd4a3
BH
2798 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
2799 m32map_off = size;
e47747f4 2800 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
c35d2a8c
GS
2801 if (phb->type == PNV_PHB_IODA1) {
2802 iomap_off = size;
2803 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
2804 }
184cd4a3
BH
2805 pemap_off = size;
2806 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
e39f223f 2807 aux = memblock_virt_alloc(size, 0);
184cd4a3
BH
2808 phb->ioda.pe_alloc = aux;
2809 phb->ioda.m32_segmap = aux + m32map_off;
c35d2a8c
GS
2810 if (phb->type == PNV_PHB_IODA1)
2811 phb->ioda.io_segmap = aux + iomap_off;
184cd4a3 2812 phb->ioda.pe_array = aux + pemap_off;
36954dc7 2813 set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
184cd4a3 2814
7ebdf956 2815 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
184cd4a3 2816 INIT_LIST_HEAD(&phb->ioda.pe_list);
781a868f 2817 mutex_init(&phb->ioda.pe_list_mutex);
184cd4a3
BH
2818
2819 /* Calculate how many 32-bit TCE segments we have */
2820 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
2821
aa0c033f 2822#if 0 /* We should really do that ... */
184cd4a3
BH
2823 rc = opal_pci_set_phb_mem_window(opal->phb_id,
2824 window_type,
2825 window_num,
2826 starting_real_address,
2827 starting_pci_address,
2828 segment_size);
2829#endif
2830
262af557
GC
2831 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
2832 phb->ioda.total_pe, phb->ioda.reserved_pe,
2833 phb->ioda.m32_size, phb->ioda.m32_segsize);
2834 if (phb->ioda.m64_size)
2835 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
2836 phb->ioda.m64_size, phb->ioda.m64_segsize);
2837 if (phb->ioda.io_size)
2838 pr_info(" IO: 0x%x [segment=0x%x]\n",
2839 phb->ioda.io_size, phb->ioda.io_segsize);
2840
184cd4a3 2841
184cd4a3 2842 phb->hose->ops = &pnv_pci_ops;
49dec922
GS
2843 phb->get_pe_state = pnv_ioda_get_pe_state;
2844 phb->freeze_pe = pnv_ioda_freeze_pe;
2845 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
184cd4a3
BH
2846
2847 /* Setup RID -> PE mapping function */
2848 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
2849
2850 /* Setup TCEs */
2851 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
fe7e85c6 2852 phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
184cd4a3
BH
2853
2854 /* Setup MSI support */
2855 pnv_pci_init_ioda_msis(phb);
2856
c40a4210
GS
2857 /*
2858 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
2859 * to let the PCI core do resource assignment. It's supposed
2860 * that the PCI core will do correct I/O and MMIO alignment
2861 * for the P2P bridge bars so that each PCI bus (excluding
2862 * the child P2P bridges) can form individual PE.
184cd4a3 2863 */
fb446ad0 2864 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
92ae0353 2865 hose->controller_ops = pnv_pci_ioda_controller_ops;
ad30cb99 2866
6e628c7d
WY
2867#ifdef CONFIG_PCI_IOV
2868 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
5350ab3f 2869 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
ad30cb99
ME
2870#endif
2871
c40a4210 2872 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
184cd4a3
BH
2873
2874 /* Reset IODA tables to a clean state */
d1a85eee 2875 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
184cd4a3 2876 if (rc)
f11fe552 2877 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
361f2a2a
GS
2878
2879 /* If we're running in kdump kerenl, the previous kerenl never
2880 * shutdown PCI devices correctly. We already got IODA table
2881 * cleaned out. So we have to issue PHB reset to stop all PCI
2882 * transactions from previous kerenl.
2883 */
2884 if (is_kdump_kernel()) {
2885 pr_info(" Issue PHB reset ...\n");
cadf364d
GS
2886 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2887 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
361f2a2a 2888 }
262af557 2889
9e9e8935
GS
2890 /* Remove M64 resource if we can't configure it successfully */
2891 if (!phb->init_m64 || phb->init_m64(phb))
262af557 2892 hose->mem_resources[1].flags = 0;
aa0c033f
GS
2893}
2894
67975005 2895void __init pnv_pci_init_ioda2_phb(struct device_node *np)
aa0c033f 2896{
e9cc17d4 2897 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
184cd4a3
BH
2898}
2899
2900void __init pnv_pci_init_ioda_hub(struct device_node *np)
2901{
2902 struct device_node *phbn;
c681b93c 2903 const __be64 *prop64;
184cd4a3
BH
2904 u64 hub_id;
2905
2906 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
2907
2908 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
2909 if (!prop64) {
2910 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
2911 return;
2912 }
2913 hub_id = be64_to_cpup(prop64);
2914 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
2915
2916 /* Count child PHBs */
2917 for_each_child_of_node(np, phbn) {
2918 /* Look for IODA1 PHBs */
2919 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
e9cc17d4 2920 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
184cd4a3
BH
2921 }
2922}