]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/platforms/powernv/pci-ioda.c
powerpc/iommu: Fix IOMMU ownership control functions
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / platforms / powernv / pci-ioda.c
CommitLineData
184cd4a3
BH
1/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
cee72d5b 12#undef DEBUG
184cd4a3
BH
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
361f2a2a 16#include <linux/crash_dump.h>
37c367f2 17#include <linux/debugfs.h>
184cd4a3
BH
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/msi.h>
cd15b048 25#include <linux/memblock.h>
ac9a5889 26#include <linux/iommu.h>
184cd4a3
BH
27
28#include <asm/sections.h>
29#include <asm/io.h>
30#include <asm/prom.h>
31#include <asm/pci-bridge.h>
32#include <asm/machdep.h>
fb1b55d6 33#include <asm/msi_bitmap.h>
184cd4a3
BH
34#include <asm/ppc-pci.h>
35#include <asm/opal.h>
36#include <asm/iommu.h>
37#include <asm/tce.h>
137436c9 38#include <asm/xics.h>
37c367f2 39#include <asm/debug.h>
262af557 40#include <asm/firmware.h>
80c49c7e
IM
41#include <asm/pnv-pci.h>
42
ec249dd8 43#include <misc/cxl-base.h>
184cd4a3
BH
44
45#include "powernv.h"
46#include "pci.h"
47
781a868f
WY
48/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
49#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
50
6d31c2fa
JP
51static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
52 const char *fmt, ...)
53{
54 struct va_format vaf;
55 va_list args;
56 char pfix[32];
57
58 va_start(args, fmt);
59
60 vaf.fmt = fmt;
61 vaf.va = &args;
62
781a868f 63 if (pe->flags & PNV_IODA_PE_DEV)
6d31c2fa 64 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
781a868f 65 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
6d31c2fa
JP
66 sprintf(pfix, "%04x:%02x ",
67 pci_domain_nr(pe->pbus), pe->pbus->number);
781a868f
WY
68#ifdef CONFIG_PCI_IOV
69 else if (pe->flags & PNV_IODA_PE_VF)
70 sprintf(pfix, "%04x:%02x:%2x.%d",
71 pci_domain_nr(pe->parent_dev->bus),
72 (pe->rid & 0xff00) >> 8,
73 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
74#endif /* CONFIG_PCI_IOV*/
6d31c2fa
JP
75
76 printk("%spci %s: [PE# %.3d] %pV",
77 level, pfix, pe->pe_number, &vaf);
78
79 va_end(args);
80}
184cd4a3 81
6d31c2fa
JP
82#define pe_err(pe, fmt, ...) \
83 pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
84#define pe_warn(pe, fmt, ...) \
85 pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
86#define pe_info(pe, fmt, ...) \
87 pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
184cd4a3 88
4e287840
TLSC
89static bool pnv_iommu_bypass_disabled __read_mostly;
90
91static int __init iommu_setup(char *str)
92{
93 if (!str)
94 return -EINVAL;
95
96 while (*str) {
97 if (!strncmp(str, "nobypass", 8)) {
98 pnv_iommu_bypass_disabled = true;
99 pr_info("PowerNV: IOMMU bypass window disabled.\n");
100 break;
101 }
102 str += strcspn(str, ",");
103 if (*str == ',')
104 str++;
105 }
106
107 return 0;
108}
109early_param("iommu", iommu_setup);
110
8e0a1611
AK
111/*
112 * stdcix is only supposed to be used in hypervisor real mode as per
113 * the architecture spec
114 */
115static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
116{
117 __asm__ __volatile__("stdcix %0,0,%1"
118 : : "r" (val), "r" (paddr) : "memory");
119}
120
262af557
GC
121static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
122{
123 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
124 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
125}
126
4b82ab18
GS
127static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
128{
129 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) {
130 pr_warn("%s: Invalid PE %d on PHB#%x\n",
131 __func__, pe_no, phb->hose->global_number);
132 return;
133 }
134
135 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
136 pr_warn("%s: PE %d was assigned on PHB#%x\n",
137 __func__, pe_no, phb->hose->global_number);
138 return;
139 }
140
141 phb->ioda.pe_array[pe_no].phb = phb;
142 phb->ioda.pe_array[pe_no].pe_number = pe_no;
143}
144
cad5cef6 145static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
184cd4a3
BH
146{
147 unsigned long pe;
148
149 do {
150 pe = find_next_zero_bit(phb->ioda.pe_alloc,
151 phb->ioda.total_pe, 0);
152 if (pe >= phb->ioda.total_pe)
153 return IODA_INVALID_PE;
154 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
155
4cce9550 156 phb->ioda.pe_array[pe].phb = phb;
184cd4a3
BH
157 phb->ioda.pe_array[pe].pe_number = pe;
158 return pe;
159}
160
cad5cef6 161static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
184cd4a3
BH
162{
163 WARN_ON(phb->ioda.pe_array[pe].pdev);
164
165 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
166 clear_bit(pe, phb->ioda.pe_alloc);
167}
168
262af557
GC
169/* The default M64 BAR is shared by all PEs */
170static int pnv_ioda2_init_m64(struct pnv_phb *phb)
171{
172 const char *desc;
173 struct resource *r;
174 s64 rc;
175
176 /* Configure the default M64 BAR */
177 rc = opal_pci_set_phb_mem_window(phb->opal_id,
178 OPAL_M64_WINDOW_TYPE,
179 phb->ioda.m64_bar_idx,
180 phb->ioda.m64_base,
181 0, /* unused */
182 phb->ioda.m64_size);
183 if (rc != OPAL_SUCCESS) {
184 desc = "configuring";
185 goto fail;
186 }
187
188 /* Enable the default M64 BAR */
189 rc = opal_pci_phb_mmio_enable(phb->opal_id,
190 OPAL_M64_WINDOW_TYPE,
191 phb->ioda.m64_bar_idx,
192 OPAL_ENABLE_M64_SPLIT);
193 if (rc != OPAL_SUCCESS) {
194 desc = "enabling";
195 goto fail;
196 }
197
198 /* Mark the M64 BAR assigned */
199 set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
200
201 /*
202 * Strip off the segment used by the reserved PE, which is
203 * expected to be 0 or last one of PE capabicity.
204 */
205 r = &phb->hose->mem_resources[1];
206 if (phb->ioda.reserved_pe == 0)
207 r->start += phb->ioda.m64_segsize;
208 else if (phb->ioda.reserved_pe == (phb->ioda.total_pe - 1))
209 r->end -= phb->ioda.m64_segsize;
210 else
211 pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
212 phb->ioda.reserved_pe);
213
214 return 0;
215
216fail:
217 pr_warn(" Failure %lld %s M64 BAR#%d\n",
218 rc, desc, phb->ioda.m64_bar_idx);
219 opal_pci_phb_mmio_enable(phb->opal_id,
220 OPAL_M64_WINDOW_TYPE,
221 phb->ioda.m64_bar_idx,
222 OPAL_DISABLE_M64);
223 return -EIO;
224}
225
5ef73567 226static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
262af557
GC
227{
228 resource_size_t sgsz = phb->ioda.m64_segsize;
229 struct pci_dev *pdev;
230 struct resource *r;
231 int base, step, i;
232
233 /*
234 * Root bus always has full M64 range and root port has
235 * M64 range used in reality. So we're checking root port
236 * instead of root bus.
237 */
238 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
4b82ab18
GS
239 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
240 r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
262af557
GC
241 if (!r->parent ||
242 !pnv_pci_is_mem_pref_64(r->flags))
243 continue;
244
245 base = (r->start - phb->ioda.m64_base) / sgsz;
246 for (step = 0; step < resource_size(r) / sgsz; step++)
4b82ab18 247 pnv_ioda_reserve_pe(phb, base + step);
262af557
GC
248 }
249 }
250}
251
252static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
253 struct pci_bus *bus, int all)
254{
255 resource_size_t segsz = phb->ioda.m64_segsize;
256 struct pci_dev *pdev;
257 struct resource *r;
258 struct pnv_ioda_pe *master_pe, *pe;
259 unsigned long size, *pe_alloc;
260 bool found;
261 int start, i, j;
262
263 /* Root bus shouldn't use M64 */
264 if (pci_is_root_bus(bus))
265 return IODA_INVALID_PE;
266
267 /* We support only one M64 window on each bus */
268 found = false;
269 pci_bus_for_each_resource(bus, r, i) {
270 if (r && r->parent &&
271 pnv_pci_is_mem_pref_64(r->flags)) {
272 found = true;
273 break;
274 }
275 }
276
277 /* No M64 window found ? */
278 if (!found)
279 return IODA_INVALID_PE;
280
281 /* Allocate bitmap */
282 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
283 pe_alloc = kzalloc(size, GFP_KERNEL);
284 if (!pe_alloc) {
285 pr_warn("%s: Out of memory !\n",
286 __func__);
287 return IODA_INVALID_PE;
288 }
289
290 /*
291 * Figure out reserved PE numbers by the PE
292 * the its child PEs.
293 */
294 start = (r->start - phb->ioda.m64_base) / segsz;
295 for (i = 0; i < resource_size(r) / segsz; i++)
296 set_bit(start + i, pe_alloc);
297
298 if (all)
299 goto done;
300
301 /*
302 * If the PE doesn't cover all subordinate buses,
303 * we need subtract from reserved PEs for children.
304 */
305 list_for_each_entry(pdev, &bus->devices, bus_list) {
306 if (!pdev->subordinate)
307 continue;
308
309 pci_bus_for_each_resource(pdev->subordinate, r, i) {
310 if (!r || !r->parent ||
311 !pnv_pci_is_mem_pref_64(r->flags))
312 continue;
313
314 start = (r->start - phb->ioda.m64_base) / segsz;
315 for (j = 0; j < resource_size(r) / segsz ; j++)
316 clear_bit(start + j, pe_alloc);
317 }
318 }
319
320 /*
321 * the current bus might not own M64 window and that's all
322 * contributed by its child buses. For the case, we needn't
323 * pick M64 dependent PE#.
324 */
325 if (bitmap_empty(pe_alloc, phb->ioda.total_pe)) {
326 kfree(pe_alloc);
327 return IODA_INVALID_PE;
328 }
329
330 /*
331 * Figure out the master PE and put all slave PEs to master
332 * PE's list to form compound PE.
333 */
334done:
335 master_pe = NULL;
336 i = -1;
337 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
338 phb->ioda.total_pe) {
339 pe = &phb->ioda.pe_array[i];
262af557
GC
340
341 if (!master_pe) {
342 pe->flags |= PNV_IODA_PE_MASTER;
343 INIT_LIST_HEAD(&pe->slaves);
344 master_pe = pe;
345 } else {
346 pe->flags |= PNV_IODA_PE_SLAVE;
347 pe->master = master_pe;
348 list_add_tail(&pe->list, &master_pe->slaves);
349 }
350 }
351
352 kfree(pe_alloc);
353 return master_pe->pe_number;
354}
355
356static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
357{
358 struct pci_controller *hose = phb->hose;
359 struct device_node *dn = hose->dn;
360 struct resource *res;
361 const u32 *r;
362 u64 pci_addr;
363
1665c4a8
GS
364 /* FIXME: Support M64 for P7IOC */
365 if (phb->type != PNV_PHB_IODA2) {
366 pr_info(" Not support M64 window\n");
367 return;
368 }
369
262af557
GC
370 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
371 pr_info(" Firmware too old to support M64 window\n");
372 return;
373 }
374
375 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
376 if (!r) {
377 pr_info(" No <ibm,opal-m64-window> on %s\n",
378 dn->full_name);
379 return;
380 }
381
262af557
GC
382 res = &hose->mem_resources[1];
383 res->start = of_translate_address(dn, r + 2);
384 res->end = res->start + of_read_number(r + 4, 2) - 1;
385 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
386 pci_addr = of_read_number(r, 2);
387 hose->mem_offset[1] = res->start - pci_addr;
388
389 phb->ioda.m64_size = resource_size(res);
390 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
391 phb->ioda.m64_base = pci_addr;
392
e9863e68
WY
393 pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
394 res->start, res->end, pci_addr);
395
262af557
GC
396 /* Use last M64 BAR to cover M64 window */
397 phb->ioda.m64_bar_idx = 15;
398 phb->init_m64 = pnv_ioda2_init_m64;
5ef73567 399 phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe;
262af557
GC
400 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
401}
402
49dec922
GS
403static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
404{
405 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
406 struct pnv_ioda_pe *slave;
407 s64 rc;
408
409 /* Fetch master PE */
410 if (pe->flags & PNV_IODA_PE_SLAVE) {
411 pe = pe->master;
ec8e4e9d
GS
412 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
413 return;
414
49dec922
GS
415 pe_no = pe->pe_number;
416 }
417
418 /* Freeze master PE */
419 rc = opal_pci_eeh_freeze_set(phb->opal_id,
420 pe_no,
421 OPAL_EEH_ACTION_SET_FREEZE_ALL);
422 if (rc != OPAL_SUCCESS) {
423 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
424 __func__, rc, phb->hose->global_number, pe_no);
425 return;
426 }
427
428 /* Freeze slave PEs */
429 if (!(pe->flags & PNV_IODA_PE_MASTER))
430 return;
431
432 list_for_each_entry(slave, &pe->slaves, list) {
433 rc = opal_pci_eeh_freeze_set(phb->opal_id,
434 slave->pe_number,
435 OPAL_EEH_ACTION_SET_FREEZE_ALL);
436 if (rc != OPAL_SUCCESS)
437 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
438 __func__, rc, phb->hose->global_number,
439 slave->pe_number);
440 }
441}
442
e51df2c1 443static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
49dec922
GS
444{
445 struct pnv_ioda_pe *pe, *slave;
446 s64 rc;
447
448 /* Find master PE */
449 pe = &phb->ioda.pe_array[pe_no];
450 if (pe->flags & PNV_IODA_PE_SLAVE) {
451 pe = pe->master;
452 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
453 pe_no = pe->pe_number;
454 }
455
456 /* Clear frozen state for master PE */
457 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
458 if (rc != OPAL_SUCCESS) {
459 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
460 __func__, rc, opt, phb->hose->global_number, pe_no);
461 return -EIO;
462 }
463
464 if (!(pe->flags & PNV_IODA_PE_MASTER))
465 return 0;
466
467 /* Clear frozen state for slave PEs */
468 list_for_each_entry(slave, &pe->slaves, list) {
469 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
470 slave->pe_number,
471 opt);
472 if (rc != OPAL_SUCCESS) {
473 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
474 __func__, rc, opt, phb->hose->global_number,
475 slave->pe_number);
476 return -EIO;
477 }
478 }
479
480 return 0;
481}
482
483static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
484{
485 struct pnv_ioda_pe *slave, *pe;
486 u8 fstate, state;
487 __be16 pcierr;
488 s64 rc;
489
490 /* Sanity check on PE number */
491 if (pe_no < 0 || pe_no >= phb->ioda.total_pe)
492 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
493
494 /*
495 * Fetch the master PE and the PE instance might be
496 * not initialized yet.
497 */
498 pe = &phb->ioda.pe_array[pe_no];
499 if (pe->flags & PNV_IODA_PE_SLAVE) {
500 pe = pe->master;
501 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
502 pe_no = pe->pe_number;
503 }
504
505 /* Check the master PE */
506 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
507 &state, &pcierr, NULL);
508 if (rc != OPAL_SUCCESS) {
509 pr_warn("%s: Failure %lld getting "
510 "PHB#%x-PE#%x state\n",
511 __func__, rc,
512 phb->hose->global_number, pe_no);
513 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
514 }
515
516 /* Check the slave PE */
517 if (!(pe->flags & PNV_IODA_PE_MASTER))
518 return state;
519
520 list_for_each_entry(slave, &pe->slaves, list) {
521 rc = opal_pci_eeh_freeze_status(phb->opal_id,
522 slave->pe_number,
523 &fstate,
524 &pcierr,
525 NULL);
526 if (rc != OPAL_SUCCESS) {
527 pr_warn("%s: Failure %lld getting "
528 "PHB#%x-PE#%x state\n",
529 __func__, rc,
530 phb->hose->global_number, slave->pe_number);
531 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
532 }
533
534 /*
535 * Override the result based on the ascending
536 * priority.
537 */
538 if (fstate > state)
539 state = fstate;
540 }
541
542 return state;
543}
544
184cd4a3
BH
545/* Currently those 2 are only used when MSIs are enabled, this will change
546 * but in the meantime, we need to protect them to avoid warnings
547 */
548#ifdef CONFIG_PCI_MSI
cad5cef6 549static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
184cd4a3
BH
550{
551 struct pci_controller *hose = pci_bus_to_host(dev->bus);
552 struct pnv_phb *phb = hose->private_data;
b72c1f65 553 struct pci_dn *pdn = pci_get_pdn(dev);
184cd4a3
BH
554
555 if (!pdn)
556 return NULL;
557 if (pdn->pe_number == IODA_INVALID_PE)
558 return NULL;
559 return &phb->ioda.pe_array[pdn->pe_number];
560}
184cd4a3
BH
561#endif /* CONFIG_PCI_MSI */
562
b131a842
GS
563static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
564 struct pnv_ioda_pe *parent,
565 struct pnv_ioda_pe *child,
566 bool is_add)
567{
568 const char *desc = is_add ? "adding" : "removing";
569 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
570 OPAL_REMOVE_PE_FROM_DOMAIN;
571 struct pnv_ioda_pe *slave;
572 long rc;
573
574 /* Parent PE affects child PE */
575 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
576 child->pe_number, op);
577 if (rc != OPAL_SUCCESS) {
578 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
579 rc, desc);
580 return -ENXIO;
581 }
582
583 if (!(child->flags & PNV_IODA_PE_MASTER))
584 return 0;
585
586 /* Compound case: parent PE affects slave PEs */
587 list_for_each_entry(slave, &child->slaves, list) {
588 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
589 slave->pe_number, op);
590 if (rc != OPAL_SUCCESS) {
591 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
592 rc, desc);
593 return -ENXIO;
594 }
595 }
596
597 return 0;
598}
599
600static int pnv_ioda_set_peltv(struct pnv_phb *phb,
601 struct pnv_ioda_pe *pe,
602 bool is_add)
603{
604 struct pnv_ioda_pe *slave;
781a868f 605 struct pci_dev *pdev = NULL;
b131a842
GS
606 int ret;
607
608 /*
609 * Clear PE frozen state. If it's master PE, we need
610 * clear slave PE frozen state as well.
611 */
612 if (is_add) {
613 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
614 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
615 if (pe->flags & PNV_IODA_PE_MASTER) {
616 list_for_each_entry(slave, &pe->slaves, list)
617 opal_pci_eeh_freeze_clear(phb->opal_id,
618 slave->pe_number,
619 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
620 }
621 }
622
623 /*
624 * Associate PE in PELT. We need add the PE into the
625 * corresponding PELT-V as well. Otherwise, the error
626 * originated from the PE might contribute to other
627 * PEs.
628 */
629 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
630 if (ret)
631 return ret;
632
633 /* For compound PEs, any one affects all of them */
634 if (pe->flags & PNV_IODA_PE_MASTER) {
635 list_for_each_entry(slave, &pe->slaves, list) {
636 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
637 if (ret)
638 return ret;
639 }
640 }
641
642 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
643 pdev = pe->pbus->self;
781a868f 644 else if (pe->flags & PNV_IODA_PE_DEV)
b131a842 645 pdev = pe->pdev->bus->self;
781a868f
WY
646#ifdef CONFIG_PCI_IOV
647 else if (pe->flags & PNV_IODA_PE_VF)
648 pdev = pe->parent_dev->bus->self;
649#endif /* CONFIG_PCI_IOV */
b131a842
GS
650 while (pdev) {
651 struct pci_dn *pdn = pci_get_pdn(pdev);
652 struct pnv_ioda_pe *parent;
653
654 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
655 parent = &phb->ioda.pe_array[pdn->pe_number];
656 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
657 if (ret)
658 return ret;
659 }
660
661 pdev = pdev->bus->self;
662 }
663
664 return 0;
665}
666
781a868f
WY
667#ifdef CONFIG_PCI_IOV
668static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
669{
670 struct pci_dev *parent;
671 uint8_t bcomp, dcomp, fcomp;
672 int64_t rc;
673 long rid_end, rid;
674
675 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
676 if (pe->pbus) {
677 int count;
678
679 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
680 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
681 parent = pe->pbus->self;
682 if (pe->flags & PNV_IODA_PE_BUS_ALL)
683 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
684 else
685 count = 1;
686
687 switch(count) {
688 case 1: bcomp = OpalPciBusAll; break;
689 case 2: bcomp = OpalPciBus7Bits; break;
690 case 4: bcomp = OpalPciBus6Bits; break;
691 case 8: bcomp = OpalPciBus5Bits; break;
692 case 16: bcomp = OpalPciBus4Bits; break;
693 case 32: bcomp = OpalPciBus3Bits; break;
694 default:
695 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
696 count);
697 /* Do an exact match only */
698 bcomp = OpalPciBusAll;
699 }
700 rid_end = pe->rid + (count << 8);
701 } else {
702 if (pe->flags & PNV_IODA_PE_VF)
703 parent = pe->parent_dev;
704 else
705 parent = pe->pdev->bus->self;
706 bcomp = OpalPciBusAll;
707 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
708 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
709 rid_end = pe->rid + 1;
710 }
711
712 /* Clear the reverse map */
713 for (rid = pe->rid; rid < rid_end; rid++)
714 phb->ioda.pe_rmap[rid] = 0;
715
716 /* Release from all parents PELT-V */
717 while (parent) {
718 struct pci_dn *pdn = pci_get_pdn(parent);
719 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
720 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
721 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
722 /* XXX What to do in case of error ? */
723 }
724 parent = parent->bus->self;
725 }
726
727 opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
728 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
729
730 /* Disassociate PE in PELT */
731 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
732 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
733 if (rc)
734 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
735 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
736 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
737 if (rc)
738 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
739
740 pe->pbus = NULL;
741 pe->pdev = NULL;
742 pe->parent_dev = NULL;
743
744 return 0;
745}
746#endif /* CONFIG_PCI_IOV */
747
cad5cef6 748static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
184cd4a3
BH
749{
750 struct pci_dev *parent;
751 uint8_t bcomp, dcomp, fcomp;
752 long rc, rid_end, rid;
753
754 /* Bus validation ? */
755 if (pe->pbus) {
756 int count;
757
758 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
759 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
760 parent = pe->pbus->self;
fb446ad0
GS
761 if (pe->flags & PNV_IODA_PE_BUS_ALL)
762 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
763 else
764 count = 1;
765
184cd4a3
BH
766 switch(count) {
767 case 1: bcomp = OpalPciBusAll; break;
768 case 2: bcomp = OpalPciBus7Bits; break;
769 case 4: bcomp = OpalPciBus6Bits; break;
770 case 8: bcomp = OpalPciBus5Bits; break;
771 case 16: bcomp = OpalPciBus4Bits; break;
772 case 32: bcomp = OpalPciBus3Bits; break;
773 default:
781a868f
WY
774 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
775 count);
184cd4a3
BH
776 /* Do an exact match only */
777 bcomp = OpalPciBusAll;
778 }
779 rid_end = pe->rid + (count << 8);
780 } else {
781a868f
WY
781#ifdef CONFIG_PCI_IOV
782 if (pe->flags & PNV_IODA_PE_VF)
783 parent = pe->parent_dev;
784 else
785#endif /* CONFIG_PCI_IOV */
786 parent = pe->pdev->bus->self;
184cd4a3
BH
787 bcomp = OpalPciBusAll;
788 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
789 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
790 rid_end = pe->rid + 1;
791 }
792
631ad691
GS
793 /*
794 * Associate PE in PELT. We need add the PE into the
795 * corresponding PELT-V as well. Otherwise, the error
796 * originated from the PE might contribute to other
797 * PEs.
798 */
184cd4a3
BH
799 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
800 bcomp, dcomp, fcomp, OPAL_MAP_PE);
801 if (rc) {
802 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
803 return -ENXIO;
804 }
631ad691 805
b131a842
GS
806 /* Configure PELTV */
807 pnv_ioda_set_peltv(phb, pe, true);
184cd4a3 808
184cd4a3
BH
809 /* Setup reverse map */
810 for (rid = pe->rid; rid < rid_end; rid++)
811 phb->ioda.pe_rmap[rid] = pe->pe_number;
812
813 /* Setup one MVTs on IODA1 */
4773f76b
GS
814 if (phb->type != PNV_PHB_IODA1) {
815 pe->mve_number = 0;
816 goto out;
817 }
818
819 pe->mve_number = pe->pe_number;
820 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
821 if (rc != OPAL_SUCCESS) {
822 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
823 rc, pe->mve_number);
824 pe->mve_number = -1;
825 } else {
826 rc = opal_pci_set_mve_enable(phb->opal_id,
827 pe->mve_number, OPAL_ENABLE_MVE);
184cd4a3 828 if (rc) {
4773f76b 829 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
184cd4a3
BH
830 rc, pe->mve_number);
831 pe->mve_number = -1;
184cd4a3 832 }
4773f76b 833 }
184cd4a3 834
4773f76b 835out:
184cd4a3
BH
836 return 0;
837}
838
cad5cef6
GKH
839static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
840 struct pnv_ioda_pe *pe)
184cd4a3
BH
841{
842 struct pnv_ioda_pe *lpe;
843
7ebdf956 844 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
184cd4a3 845 if (lpe->dma_weight < pe->dma_weight) {
7ebdf956 846 list_add_tail(&pe->dma_link, &lpe->dma_link);
184cd4a3
BH
847 return;
848 }
849 }
7ebdf956 850 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
184cd4a3
BH
851}
852
853static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
854{
855 /* This is quite simplistic. The "base" weight of a device
856 * is 10. 0 means no DMA is to be accounted for it.
857 */
858
859 /* If it's a bridge, no DMA */
860 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
861 return 0;
862
863 /* Reduce the weight of slow USB controllers */
864 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
865 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
866 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
867 return 3;
868
869 /* Increase the weight of RAID (includes Obsidian) */
870 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
871 return 15;
872
873 /* Default */
874 return 10;
875}
876
781a868f
WY
877#ifdef CONFIG_PCI_IOV
878static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
879{
880 struct pci_dn *pdn = pci_get_pdn(dev);
881 int i;
882 struct resource *res, res2;
883 resource_size_t size;
884 u16 num_vfs;
885
886 if (!dev->is_physfn)
887 return -EINVAL;
888
889 /*
890 * "offset" is in VFs. The M64 windows are sized so that when they
891 * are segmented, each segment is the same size as the IOV BAR.
892 * Each segment is in a separate PE, and the high order bits of the
893 * address are the PE number. Therefore, each VF's BAR is in a
894 * separate PE, and changing the IOV BAR start address changes the
895 * range of PEs the VFs are in.
896 */
897 num_vfs = pdn->num_vfs;
898 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
899 res = &dev->resource[i + PCI_IOV_RESOURCES];
900 if (!res->flags || !res->parent)
901 continue;
902
903 if (!pnv_pci_is_mem_pref_64(res->flags))
904 continue;
905
906 /*
907 * The actual IOV BAR range is determined by the start address
908 * and the actual size for num_vfs VFs BAR. This check is to
909 * make sure that after shifting, the range will not overlap
910 * with another device.
911 */
912 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
913 res2.flags = res->flags;
914 res2.start = res->start + (size * offset);
915 res2.end = res2.start + (size * num_vfs) - 1;
916
917 if (res2.end > res->end) {
918 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
919 i, &res2, res, num_vfs, offset);
920 return -EBUSY;
921 }
922 }
923
924 /*
925 * After doing so, there would be a "hole" in the /proc/iomem when
926 * offset is a positive value. It looks like the device return some
927 * mmio back to the system, which actually no one could use it.
928 */
929 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
930 res = &dev->resource[i + PCI_IOV_RESOURCES];
931 if (!res->flags || !res->parent)
932 continue;
933
934 if (!pnv_pci_is_mem_pref_64(res->flags))
935 continue;
936
937 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
938 res2 = *res;
939 res->start += size * offset;
940
941 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
942 i, &res2, res, num_vfs, offset);
943 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
944 }
945 return 0;
946}
947#endif /* CONFIG_PCI_IOV */
948
fb446ad0 949#if 0
cad5cef6 950static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
184cd4a3
BH
951{
952 struct pci_controller *hose = pci_bus_to_host(dev->bus);
953 struct pnv_phb *phb = hose->private_data;
b72c1f65 954 struct pci_dn *pdn = pci_get_pdn(dev);
184cd4a3
BH
955 struct pnv_ioda_pe *pe;
956 int pe_num;
957
958 if (!pdn) {
959 pr_err("%s: Device tree node not associated properly\n",
960 pci_name(dev));
961 return NULL;
962 }
963 if (pdn->pe_number != IODA_INVALID_PE)
964 return NULL;
965
966 /* PE#0 has been pre-set */
967 if (dev->bus->number == 0)
968 pe_num = 0;
969 else
970 pe_num = pnv_ioda_alloc_pe(phb);
971 if (pe_num == IODA_INVALID_PE) {
972 pr_warning("%s: Not enough PE# available, disabling device\n",
973 pci_name(dev));
974 return NULL;
975 }
976
977 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
978 * pointer in the PE data structure, both should be destroyed at the
979 * same time. However, this needs to be looked at more closely again
980 * once we actually start removing things (Hotplug, SR-IOV, ...)
981 *
982 * At some point we want to remove the PDN completely anyways
983 */
984 pe = &phb->ioda.pe_array[pe_num];
985 pci_dev_get(dev);
986 pdn->pcidev = dev;
987 pdn->pe_number = pe_num;
988 pe->pdev = dev;
989 pe->pbus = NULL;
990 pe->tce32_seg = -1;
991 pe->mve_number = -1;
992 pe->rid = dev->bus->number << 8 | pdn->devfn;
993
994 pe_info(pe, "Associated device to PE\n");
995
996 if (pnv_ioda_configure_pe(phb, pe)) {
997 /* XXX What do we do here ? */
998 if (pe_num)
999 pnv_ioda_free_pe(phb, pe_num);
1000 pdn->pe_number = IODA_INVALID_PE;
1001 pe->pdev = NULL;
1002 pci_dev_put(dev);
1003 return NULL;
1004 }
1005
1006 /* Assign a DMA weight to the device */
1007 pe->dma_weight = pnv_ioda_dma_weight(dev);
1008 if (pe->dma_weight != 0) {
1009 phb->ioda.dma_weight += pe->dma_weight;
1010 phb->ioda.dma_pe_count++;
1011 }
1012
1013 /* Link the PE */
1014 pnv_ioda_link_pe_by_weight(phb, pe);
1015
1016 return pe;
1017}
fb446ad0 1018#endif /* Useful for SRIOV case */
184cd4a3
BH
1019
1020static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1021{
1022 struct pci_dev *dev;
1023
1024 list_for_each_entry(dev, &bus->devices, bus_list) {
b72c1f65 1025 struct pci_dn *pdn = pci_get_pdn(dev);
184cd4a3
BH
1026
1027 if (pdn == NULL) {
1028 pr_warn("%s: No device node associated with device !\n",
1029 pci_name(dev));
1030 continue;
1031 }
184cd4a3
BH
1032 pdn->pe_number = pe->pe_number;
1033 pe->dma_weight += pnv_ioda_dma_weight(dev);
fb446ad0 1034 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
184cd4a3
BH
1035 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1036 }
1037}
1038
fb446ad0
GS
1039/*
1040 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1041 * single PCI bus. Another one that contains the primary PCI bus and its
1042 * subordinate PCI devices and buses. The second type of PE is normally
1043 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1044 */
cad5cef6 1045static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
184cd4a3 1046{
fb446ad0 1047 struct pci_controller *hose = pci_bus_to_host(bus);
184cd4a3 1048 struct pnv_phb *phb = hose->private_data;
184cd4a3 1049 struct pnv_ioda_pe *pe;
262af557
GC
1050 int pe_num = IODA_INVALID_PE;
1051
1052 /* Check if PE is determined by M64 */
1053 if (phb->pick_m64_pe)
1054 pe_num = phb->pick_m64_pe(phb, bus, all);
1055
1056 /* The PE number isn't pinned by M64 */
1057 if (pe_num == IODA_INVALID_PE)
1058 pe_num = pnv_ioda_alloc_pe(phb);
184cd4a3 1059
184cd4a3 1060 if (pe_num == IODA_INVALID_PE) {
fb446ad0
GS
1061 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1062 __func__, pci_domain_nr(bus), bus->number);
184cd4a3
BH
1063 return;
1064 }
1065
1066 pe = &phb->ioda.pe_array[pe_num];
262af557 1067 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
184cd4a3
BH
1068 pe->pbus = bus;
1069 pe->pdev = NULL;
1070 pe->tce32_seg = -1;
1071 pe->mve_number = -1;
b918c62e 1072 pe->rid = bus->busn_res.start << 8;
184cd4a3
BH
1073 pe->dma_weight = 0;
1074
fb446ad0
GS
1075 if (all)
1076 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
1077 bus->busn_res.start, bus->busn_res.end, pe_num);
1078 else
1079 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
1080 bus->busn_res.start, pe_num);
184cd4a3
BH
1081
1082 if (pnv_ioda_configure_pe(phb, pe)) {
1083 /* XXX What do we do here ? */
1084 if (pe_num)
1085 pnv_ioda_free_pe(phb, pe_num);
1086 pe->pbus = NULL;
1087 return;
1088 }
1089
1090 /* Associate it with all child devices */
1091 pnv_ioda_setup_same_PE(bus, pe);
1092
7ebdf956
GS
1093 /* Put PE to the list */
1094 list_add_tail(&pe->list, &phb->ioda.pe_list);
1095
184cd4a3
BH
1096 /* Account for one DMA PE if at least one DMA capable device exist
1097 * below the bridge
1098 */
1099 if (pe->dma_weight != 0) {
1100 phb->ioda.dma_weight += pe->dma_weight;
1101 phb->ioda.dma_pe_count++;
1102 }
1103
1104 /* Link the PE */
1105 pnv_ioda_link_pe_by_weight(phb, pe);
1106}
1107
cad5cef6 1108static void pnv_ioda_setup_PEs(struct pci_bus *bus)
184cd4a3
BH
1109{
1110 struct pci_dev *dev;
fb446ad0
GS
1111
1112 pnv_ioda_setup_bus_PE(bus, 0);
184cd4a3
BH
1113
1114 list_for_each_entry(dev, &bus->devices, bus_list) {
fb446ad0
GS
1115 if (dev->subordinate) {
1116 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
1117 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
1118 else
1119 pnv_ioda_setup_PEs(dev->subordinate);
1120 }
1121 }
1122}
1123
1124/*
1125 * Configure PEs so that the downstream PCI buses and devices
1126 * could have their associated PE#. Unfortunately, we didn't
1127 * figure out the way to identify the PLX bridge yet. So we
1128 * simply put the PCI bus and the subordinate behind the root
1129 * port to PE# here. The game rule here is expected to be changed
1130 * as soon as we can detected PLX bridge correctly.
1131 */
cad5cef6 1132static void pnv_pci_ioda_setup_PEs(void)
fb446ad0
GS
1133{
1134 struct pci_controller *hose, *tmp;
262af557 1135 struct pnv_phb *phb;
fb446ad0
GS
1136
1137 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
262af557
GC
1138 phb = hose->private_data;
1139
1140 /* M64 layout might affect PE allocation */
5ef73567
GS
1141 if (phb->reserve_m64_pe)
1142 phb->reserve_m64_pe(phb);
262af557 1143
fb446ad0 1144 pnv_ioda_setup_PEs(hose->bus);
184cd4a3
BH
1145 }
1146}
1147
a8b2f828 1148#ifdef CONFIG_PCI_IOV
781a868f
WY
1149static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
1150{
1151 struct pci_bus *bus;
1152 struct pci_controller *hose;
1153 struct pnv_phb *phb;
1154 struct pci_dn *pdn;
02639b0e 1155 int i, j;
781a868f
WY
1156
1157 bus = pdev->bus;
1158 hose = pci_bus_to_host(bus);
1159 phb = hose->private_data;
1160 pdn = pci_get_pdn(pdev);
1161
02639b0e
WY
1162 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1163 for (j = 0; j < M64_PER_IOV; j++) {
1164 if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
1165 continue;
1166 opal_pci_phb_mmio_enable(phb->opal_id,
1167 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
1168 clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
1169 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1170 }
781a868f
WY
1171
1172 return 0;
1173}
1174
02639b0e 1175static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
781a868f
WY
1176{
1177 struct pci_bus *bus;
1178 struct pci_controller *hose;
1179 struct pnv_phb *phb;
1180 struct pci_dn *pdn;
1181 unsigned int win;
1182 struct resource *res;
02639b0e 1183 int i, j;
781a868f 1184 int64_t rc;
02639b0e
WY
1185 int total_vfs;
1186 resource_size_t size, start;
1187 int pe_num;
1188 int vf_groups;
1189 int vf_per_group;
781a868f
WY
1190
1191 bus = pdev->bus;
1192 hose = pci_bus_to_host(bus);
1193 phb = hose->private_data;
1194 pdn = pci_get_pdn(pdev);
02639b0e 1195 total_vfs = pci_sriov_get_totalvfs(pdev);
781a868f
WY
1196
1197 /* Initialize the m64_wins to IODA_INVALID_M64 */
1198 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
02639b0e
WY
1199 for (j = 0; j < M64_PER_IOV; j++)
1200 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1201
1202 if (pdn->m64_per_iov == M64_PER_IOV) {
1203 vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
1204 vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
1205 roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1206 } else {
1207 vf_groups = 1;
1208 vf_per_group = 1;
1209 }
781a868f
WY
1210
1211 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1212 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1213 if (!res->flags || !res->parent)
1214 continue;
1215
1216 if (!pnv_pci_is_mem_pref_64(res->flags))
1217 continue;
1218
02639b0e
WY
1219 for (j = 0; j < vf_groups; j++) {
1220 do {
1221 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1222 phb->ioda.m64_bar_idx + 1, 0);
1223
1224 if (win >= phb->ioda.m64_bar_idx + 1)
1225 goto m64_failed;
1226 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1227
1228 pdn->m64_wins[i][j] = win;
1229
1230 if (pdn->m64_per_iov == M64_PER_IOV) {
1231 size = pci_iov_resource_size(pdev,
1232 PCI_IOV_RESOURCES + i);
1233 size = size * vf_per_group;
1234 start = res->start + size * j;
1235 } else {
1236 size = resource_size(res);
1237 start = res->start;
1238 }
1239
1240 /* Map the M64 here */
1241 if (pdn->m64_per_iov == M64_PER_IOV) {
1242 pe_num = pdn->offset + j;
1243 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1244 pe_num, OPAL_M64_WINDOW_TYPE,
1245 pdn->m64_wins[i][j], 0);
1246 }
1247
1248 rc = opal_pci_set_phb_mem_window(phb->opal_id,
1249 OPAL_M64_WINDOW_TYPE,
1250 pdn->m64_wins[i][j],
1251 start,
1252 0, /* unused */
1253 size);
781a868f 1254
781a868f 1255
02639b0e
WY
1256 if (rc != OPAL_SUCCESS) {
1257 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1258 win, rc);
1259 goto m64_failed;
1260 }
781a868f 1261
02639b0e
WY
1262 if (pdn->m64_per_iov == M64_PER_IOV)
1263 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1264 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
1265 else
1266 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1267 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
781a868f 1268
02639b0e
WY
1269 if (rc != OPAL_SUCCESS) {
1270 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1271 win, rc);
1272 goto m64_failed;
1273 }
781a868f
WY
1274 }
1275 }
1276 return 0;
1277
1278m64_failed:
1279 pnv_pci_vf_release_m64(pdev);
1280 return -EBUSY;
1281}
1282
1283static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1284{
1285 struct pci_bus *bus;
1286 struct pci_controller *hose;
1287 struct pnv_phb *phb;
1288 struct iommu_table *tbl;
1289 unsigned long addr;
1290 int64_t rc;
1291
1292 bus = dev->bus;
1293 hose = pci_bus_to_host(bus);
1294 phb = hose->private_data;
b348aa65 1295 tbl = pe->table_group.tables[0];
781a868f
WY
1296 addr = tbl->it_base;
1297
1298 opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1299 pe->pe_number << 1, 1, __pa(addr),
1300 0, 0x1000);
1301
1302 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1303 pe->pe_number,
1304 (pe->pe_number << 1) + 1,
1305 pe->tce_bypass_base,
1306 0);
1307 if (rc)
1308 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1309
0eaf4def
AK
1310 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1311 if (pe->table_group.group) {
1312 iommu_group_put(pe->table_group.group);
1313 BUG_ON(pe->table_group.group);
ac9a5889 1314 }
781a868f
WY
1315 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1316 free_pages(addr, get_order(TCE32_TABLE_SIZE));
781a868f
WY
1317}
1318
02639b0e 1319static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
781a868f
WY
1320{
1321 struct pci_bus *bus;
1322 struct pci_controller *hose;
1323 struct pnv_phb *phb;
1324 struct pnv_ioda_pe *pe, *pe_n;
1325 struct pci_dn *pdn;
02639b0e
WY
1326 u16 vf_index;
1327 int64_t rc;
781a868f
WY
1328
1329 bus = pdev->bus;
1330 hose = pci_bus_to_host(bus);
1331 phb = hose->private_data;
02639b0e 1332 pdn = pci_get_pdn(pdev);
781a868f
WY
1333
1334 if (!pdev->is_physfn)
1335 return;
1336
02639b0e
WY
1337 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1338 int vf_group;
1339 int vf_per_group;
1340 int vf_index1;
1341
1342 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1343
1344 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
1345 for (vf_index = vf_group * vf_per_group;
1346 vf_index < (vf_group + 1) * vf_per_group &&
1347 vf_index < num_vfs;
1348 vf_index++)
1349 for (vf_index1 = vf_group * vf_per_group;
1350 vf_index1 < (vf_group + 1) * vf_per_group &&
1351 vf_index1 < num_vfs;
1352 vf_index1++){
1353
1354 rc = opal_pci_set_peltv(phb->opal_id,
1355 pdn->offset + vf_index,
1356 pdn->offset + vf_index1,
1357 OPAL_REMOVE_PE_FROM_DOMAIN);
1358
1359 if (rc)
1360 dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
1361 __func__,
1362 pdn->offset + vf_index1, rc);
1363 }
1364 }
1365
781a868f
WY
1366 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1367 if (pe->parent_dev != pdev)
1368 continue;
1369
1370 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1371
1372 /* Remove from list */
1373 mutex_lock(&phb->ioda.pe_list_mutex);
1374 list_del(&pe->list);
1375 mutex_unlock(&phb->ioda.pe_list_mutex);
1376
1377 pnv_ioda_deconfigure_pe(phb, pe);
1378
1379 pnv_ioda_free_pe(phb, pe->pe_number);
1380 }
1381}
1382
1383void pnv_pci_sriov_disable(struct pci_dev *pdev)
1384{
1385 struct pci_bus *bus;
1386 struct pci_controller *hose;
1387 struct pnv_phb *phb;
1388 struct pci_dn *pdn;
1389 struct pci_sriov *iov;
1390 u16 num_vfs;
1391
1392 bus = pdev->bus;
1393 hose = pci_bus_to_host(bus);
1394 phb = hose->private_data;
1395 pdn = pci_get_pdn(pdev);
1396 iov = pdev->sriov;
1397 num_vfs = pdn->num_vfs;
1398
1399 /* Release VF PEs */
02639b0e 1400 pnv_ioda_release_vf_PE(pdev, num_vfs);
781a868f
WY
1401
1402 if (phb->type == PNV_PHB_IODA2) {
02639b0e
WY
1403 if (pdn->m64_per_iov == 1)
1404 pnv_pci_vf_resource_shift(pdev, -pdn->offset);
781a868f
WY
1405
1406 /* Release M64 windows */
1407 pnv_pci_vf_release_m64(pdev);
1408
1409 /* Release PE numbers */
1410 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1411 pdn->offset = 0;
1412 }
1413}
1414
1415static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1416 struct pnv_ioda_pe *pe);
1417static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1418{
1419 struct pci_bus *bus;
1420 struct pci_controller *hose;
1421 struct pnv_phb *phb;
1422 struct pnv_ioda_pe *pe;
1423 int pe_num;
1424 u16 vf_index;
1425 struct pci_dn *pdn;
02639b0e 1426 int64_t rc;
781a868f
WY
1427
1428 bus = pdev->bus;
1429 hose = pci_bus_to_host(bus);
1430 phb = hose->private_data;
1431 pdn = pci_get_pdn(pdev);
1432
1433 if (!pdev->is_physfn)
1434 return;
1435
1436 /* Reserve PE for each VF */
1437 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1438 pe_num = pdn->offset + vf_index;
1439
1440 pe = &phb->ioda.pe_array[pe_num];
1441 pe->pe_number = pe_num;
1442 pe->phb = phb;
1443 pe->flags = PNV_IODA_PE_VF;
1444 pe->pbus = NULL;
1445 pe->parent_dev = pdev;
1446 pe->tce32_seg = -1;
1447 pe->mve_number = -1;
1448 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1449 pci_iov_virtfn_devfn(pdev, vf_index);
1450
1451 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
1452 hose->global_number, pdev->bus->number,
1453 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1454 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1455
1456 if (pnv_ioda_configure_pe(phb, pe)) {
1457 /* XXX What do we do here ? */
1458 if (pe_num)
1459 pnv_ioda_free_pe(phb, pe_num);
1460 pe->pdev = NULL;
1461 continue;
1462 }
1463
781a868f
WY
1464 /* Put PE to the list */
1465 mutex_lock(&phb->ioda.pe_list_mutex);
1466 list_add_tail(&pe->list, &phb->ioda.pe_list);
1467 mutex_unlock(&phb->ioda.pe_list_mutex);
1468
1469 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1470 }
02639b0e
WY
1471
1472 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1473 int vf_group;
1474 int vf_per_group;
1475 int vf_index1;
1476
1477 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1478
1479 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
1480 for (vf_index = vf_group * vf_per_group;
1481 vf_index < (vf_group + 1) * vf_per_group &&
1482 vf_index < num_vfs;
1483 vf_index++) {
1484 for (vf_index1 = vf_group * vf_per_group;
1485 vf_index1 < (vf_group + 1) * vf_per_group &&
1486 vf_index1 < num_vfs;
1487 vf_index1++) {
1488
1489 rc = opal_pci_set_peltv(phb->opal_id,
1490 pdn->offset + vf_index,
1491 pdn->offset + vf_index1,
1492 OPAL_ADD_PE_TO_DOMAIN);
1493
1494 if (rc)
1495 dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
1496 __func__,
1497 pdn->offset + vf_index1, rc);
1498 }
1499 }
1500 }
1501 }
781a868f
WY
1502}
1503
1504int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1505{
1506 struct pci_bus *bus;
1507 struct pci_controller *hose;
1508 struct pnv_phb *phb;
1509 struct pci_dn *pdn;
1510 int ret;
1511
1512 bus = pdev->bus;
1513 hose = pci_bus_to_host(bus);
1514 phb = hose->private_data;
1515 pdn = pci_get_pdn(pdev);
1516
1517 if (phb->type == PNV_PHB_IODA2) {
1518 /* Calculate available PE for required VFs */
1519 mutex_lock(&phb->ioda.pe_alloc_mutex);
1520 pdn->offset = bitmap_find_next_zero_area(
1521 phb->ioda.pe_alloc, phb->ioda.total_pe,
1522 0, num_vfs, 0);
1523 if (pdn->offset >= phb->ioda.total_pe) {
1524 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1525 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1526 pdn->offset = 0;
1527 return -EBUSY;
1528 }
1529 bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1530 pdn->num_vfs = num_vfs;
1531 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1532
1533 /* Assign M64 window accordingly */
02639b0e 1534 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
781a868f
WY
1535 if (ret) {
1536 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1537 goto m64_failed;
1538 }
1539
1540 /*
1541 * When using one M64 BAR to map one IOV BAR, we need to shift
1542 * the IOV BAR according to the PE# allocated to the VFs.
1543 * Otherwise, the PE# for the VF will conflict with others.
1544 */
02639b0e
WY
1545 if (pdn->m64_per_iov == 1) {
1546 ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
1547 if (ret)
1548 goto m64_failed;
1549 }
781a868f
WY
1550 }
1551
1552 /* Setup VF PEs */
1553 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1554
1555 return 0;
1556
1557m64_failed:
1558 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1559 pdn->offset = 0;
1560
1561 return ret;
1562}
1563
a8b2f828
GS
1564int pcibios_sriov_disable(struct pci_dev *pdev)
1565{
781a868f
WY
1566 pnv_pci_sriov_disable(pdev);
1567
a8b2f828
GS
1568 /* Release PCI data */
1569 remove_dev_pci_data(pdev);
1570 return 0;
1571}
1572
1573int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1574{
1575 /* Allocate PCI data */
1576 add_dev_pci_data(pdev);
781a868f
WY
1577
1578 pnv_pci_sriov_enable(pdev, num_vfs);
a8b2f828
GS
1579 return 0;
1580}
1581#endif /* CONFIG_PCI_IOV */
1582
959c9bdd 1583static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
184cd4a3 1584{
b72c1f65 1585 struct pci_dn *pdn = pci_get_pdn(pdev);
959c9bdd 1586 struct pnv_ioda_pe *pe;
184cd4a3 1587
959c9bdd
GS
1588 /*
1589 * The function can be called while the PE#
1590 * hasn't been assigned. Do nothing for the
1591 * case.
1592 */
1593 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1594 return;
184cd4a3 1595
959c9bdd 1596 pe = &phb->ioda.pe_array[pdn->pe_number];
cd15b048 1597 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
b348aa65 1598 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
4617082e
AK
1599 /*
1600 * Note: iommu_add_device() will fail here as
1601 * for physical PE: the device is already added by now;
1602 * for virtual PE: sysfs entries are not ready yet and
1603 * tce_iommu_bus_notifier will add the device to a group later.
1604 */
184cd4a3
BH
1605}
1606
763d2d8d 1607static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
cd15b048 1608{
763d2d8d
DA
1609 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1610 struct pnv_phb *phb = hose->private_data;
cd15b048
BH
1611 struct pci_dn *pdn = pci_get_pdn(pdev);
1612 struct pnv_ioda_pe *pe;
1613 uint64_t top;
1614 bool bypass = false;
1615
1616 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1617 return -ENODEV;;
1618
1619 pe = &phb->ioda.pe_array[pdn->pe_number];
1620 if (pe->tce_bypass_enabled) {
1621 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1622 bypass = (dma_mask >= top);
1623 }
1624
1625 if (bypass) {
1626 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1627 set_dma_ops(&pdev->dev, &dma_direct_ops);
1628 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1629 } else {
1630 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1631 set_dma_ops(&pdev->dev, &dma_iommu_ops);
b348aa65 1632 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
cd15b048 1633 }
a32305bf 1634 *pdev->dev.dma_mask = dma_mask;
cd15b048
BH
1635 return 0;
1636}
1637
fe7e85c6
GS
1638static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
1639 struct pci_dev *pdev)
1640{
1641 struct pci_dn *pdn = pci_get_pdn(pdev);
1642 struct pnv_ioda_pe *pe;
1643 u64 end, mask;
1644
1645 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1646 return 0;
1647
1648 pe = &phb->ioda.pe_array[pdn->pe_number];
1649 if (!pe->tce_bypass_enabled)
1650 return __dma_get_required_mask(&pdev->dev);
1651
1652
1653 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1654 mask = 1ULL << (fls64(end) - 1);
1655 mask += mask - 1;
1656
1657 return mask;
1658}
1659
dff4a39e 1660static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
ea30e99e 1661 struct pci_bus *bus)
74251fe2
BH
1662{
1663 struct pci_dev *dev;
1664
1665 list_for_each_entry(dev, &bus->devices, bus_list) {
b348aa65 1666 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
4617082e 1667 iommu_add_device(&dev->dev);
dff4a39e 1668
74251fe2 1669 if (dev->subordinate)
ea30e99e 1670 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
74251fe2
BH
1671 }
1672}
1673
decbda25
AK
1674static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
1675 unsigned long index, unsigned long npages, bool rm)
4cce9550 1676{
0eaf4def
AK
1677 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1678 &tbl->it_group_list, struct iommu_table_group_link,
1679 next);
1680 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
b348aa65 1681 struct pnv_ioda_pe, table_group);
3ad26e5c
BH
1682 __be64 __iomem *invalidate = rm ?
1683 (__be64 __iomem *)pe->tce_inval_reg_phys :
1684 (__be64 __iomem *)tbl->it_index;
4cce9550 1685 unsigned long start, end, inc;
b0376c9b 1686 const unsigned shift = tbl->it_page_shift;
4cce9550 1687
decbda25
AK
1688 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1689 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1690 npages - 1);
4cce9550
GS
1691
1692 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
1693 if (tbl->it_busno) {
b0376c9b
AK
1694 start <<= shift;
1695 end <<= shift;
1696 inc = 128ull << shift;
4cce9550
GS
1697 start |= tbl->it_busno;
1698 end |= tbl->it_busno;
1699 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
1700 /* p7ioc-style invalidation, 2 TCEs per write */
1701 start |= (1ull << 63);
1702 end |= (1ull << 63);
1703 inc = 16;
1704 } else {
1705 /* Default (older HW) */
1706 inc = 128;
1707 }
1708
1709 end |= inc - 1; /* round up end to be different than start */
1710
1711 mb(); /* Ensure above stores are visible */
1712 while (start <= end) {
8e0a1611 1713 if (rm)
3ad26e5c 1714 __raw_rm_writeq(cpu_to_be64(start), invalidate);
8e0a1611 1715 else
3ad26e5c 1716 __raw_writeq(cpu_to_be64(start), invalidate);
4cce9550
GS
1717 start += inc;
1718 }
1719
1720 /*
1721 * The iommu layer will do another mb() for us on build()
1722 * and we don't care on free()
1723 */
1724}
1725
decbda25
AK
1726static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1727 long npages, unsigned long uaddr,
1728 enum dma_data_direction direction,
1729 struct dma_attrs *attrs)
1730{
1731 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1732 attrs);
1733
1734 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1735 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1736
1737 return ret;
1738}
1739
1740static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1741 long npages)
1742{
1743 pnv_tce_free(tbl, index, npages);
1744
1745 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1746 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1747}
1748
da004c36 1749static struct iommu_table_ops pnv_ioda1_iommu_ops = {
decbda25
AK
1750 .set = pnv_ioda1_tce_build,
1751 .clear = pnv_ioda1_tce_free,
da004c36
AK
1752 .get = pnv_tce_get,
1753};
1754
decbda25
AK
1755static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1756 unsigned long index, unsigned long npages, bool rm)
4cce9550 1757{
0eaf4def
AK
1758 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1759 &tbl->it_group_list, struct iommu_table_group_link,
1760 next);
1761 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
b348aa65 1762 struct pnv_ioda_pe, table_group);
4cce9550 1763 unsigned long start, end, inc;
3ad26e5c
BH
1764 __be64 __iomem *invalidate = rm ?
1765 (__be64 __iomem *)pe->tce_inval_reg_phys :
1766 (__be64 __iomem *)tbl->it_index;
b0376c9b 1767 const unsigned shift = tbl->it_page_shift;
4cce9550
GS
1768
1769 /* We'll invalidate DMA address in PE scope */
b0376c9b 1770 start = 0x2ull << 60;
4cce9550
GS
1771 start |= (pe->pe_number & 0xFF);
1772 end = start;
1773
1774 /* Figure out the start, end and step */
decbda25
AK
1775 start |= (index << shift);
1776 end |= ((index + npages - 1) << shift);
b0376c9b 1777 inc = (0x1ull << shift);
4cce9550
GS
1778 mb();
1779
1780 while (start <= end) {
8e0a1611 1781 if (rm)
3ad26e5c 1782 __raw_rm_writeq(cpu_to_be64(start), invalidate);
8e0a1611 1783 else
3ad26e5c 1784 __raw_writeq(cpu_to_be64(start), invalidate);
4cce9550
GS
1785 start += inc;
1786 }
1787}
1788
decbda25
AK
1789static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1790 long npages, unsigned long uaddr,
1791 enum dma_data_direction direction,
1792 struct dma_attrs *attrs)
4cce9550 1793{
decbda25
AK
1794 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1795 attrs);
4cce9550 1796
decbda25
AK
1797 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1798 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1799
1800 return ret;
1801}
1802
1803static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1804 long npages)
1805{
1806 pnv_tce_free(tbl, index, npages);
1807
1808 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1809 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
4cce9550
GS
1810}
1811
da004c36 1812static struct iommu_table_ops pnv_ioda2_iommu_ops = {
decbda25
AK
1813 .set = pnv_ioda2_tce_build,
1814 .clear = pnv_ioda2_tce_free,
da004c36
AK
1815 .get = pnv_tce_get,
1816};
1817
cad5cef6
GKH
1818static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1819 struct pnv_ioda_pe *pe, unsigned int base,
1820 unsigned int segs)
184cd4a3
BH
1821{
1822
1823 struct page *tce_mem = NULL;
1824 const __be64 *swinvp;
1825 struct iommu_table *tbl;
1826 unsigned int i;
1827 int64_t rc;
1828 void *addr;
1829
184cd4a3
BH
1830 /* XXX FIXME: Handle 64-bit only DMA devices */
1831 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1832 /* XXX FIXME: Allocate multi-level tables on PHB3 */
1833
1834 /* We shouldn't already have a 32-bit DMA associated */
1835 if (WARN_ON(pe->tce32_seg >= 0))
1836 return;
1837
0eaf4def 1838 tbl = pnv_pci_table_alloc(phb->hose->node);
b348aa65
AK
1839 iommu_register_group(&pe->table_group, phb->hose->global_number,
1840 pe->pe_number);
0eaf4def 1841 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
c5773822 1842
184cd4a3
BH
1843 /* Grab a 32-bit TCE table */
1844 pe->tce32_seg = base;
1845 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1846 (base << 28), ((base + segs) << 28) - 1);
1847
1848 /* XXX Currently, we allocate one big contiguous table for the
1849 * TCEs. We only really need one chunk per 256M of TCE space
1850 * (ie per segment) but that's an optimization for later, it
1851 * requires some added smarts with our get/put_tce implementation
1852 */
1853 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1854 get_order(TCE32_TABLE_SIZE * segs));
1855 if (!tce_mem) {
1856 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1857 goto fail;
1858 }
1859 addr = page_address(tce_mem);
1860 memset(addr, 0, TCE32_TABLE_SIZE * segs);
1861
1862 /* Configure HW */
1863 for (i = 0; i < segs; i++) {
1864 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1865 pe->pe_number,
1866 base + i, 1,
1867 __pa(addr) + TCE32_TABLE_SIZE * i,
1868 TCE32_TABLE_SIZE, 0x1000);
1869 if (rc) {
1870 pe_err(pe, " Failed to configure 32-bit TCE table,"
1871 " err %ld\n", rc);
1872 goto fail;
1873 }
1874 }
1875
1876 /* Setup linux iommu table */
184cd4a3 1877 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
8fa5d454 1878 base << 28, IOMMU_PAGE_SHIFT_4K);
184cd4a3
BH
1879
1880 /* OPAL variant of P7IOC SW invalidated TCEs */
1881 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
1882 if (swinvp) {
1883 /* We need a couple more fields -- an address and a data
1884 * to or. Since the bus is only printed out on table free
1885 * errors, and on the first pass the data will be a relative
1886 * bus number, print that out instead.
1887 */
8e0a1611
AK
1888 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
1889 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
1890 8);
65fd766b
GS
1891 tbl->it_type |= (TCE_PCI_SWINV_CREATE |
1892 TCE_PCI_SWINV_FREE |
1893 TCE_PCI_SWINV_PAIR);
184cd4a3 1894 }
da004c36 1895 tbl->it_ops = &pnv_ioda1_iommu_ops;
184cd4a3
BH
1896 iommu_init_table(tbl, phb->hose->node);
1897
781a868f 1898 if (pe->flags & PNV_IODA_PE_DEV) {
4617082e
AK
1899 /*
1900 * Setting table base here only for carrying iommu_group
1901 * further down to let iommu_add_device() do the job.
1902 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
1903 */
1904 set_iommu_table_base(&pe->pdev->dev, tbl);
1905 iommu_add_device(&pe->pdev->dev);
c5773822 1906 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
ea30e99e 1907 pnv_ioda_setup_bus_dma(pe, pe->pbus);
74251fe2 1908
184cd4a3
BH
1909 return;
1910 fail:
1911 /* XXX Failure: Try to fallback to 64-bit only ? */
1912 if (pe->tce32_seg >= 0)
1913 pe->tce32_seg = -1;
1914 if (tce_mem)
1915 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
0eaf4def
AK
1916 if (tbl) {
1917 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1918 iommu_free_table(tbl, "pnv");
1919 }
184cd4a3
BH
1920}
1921
f87a8864 1922static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
cd15b048 1923{
cd15b048
BH
1924 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1925 int64_t rc;
1926
1927 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
1928 if (enable) {
1929 phys_addr_t top = memblock_end_of_DRAM();
1930
1931 top = roundup_pow_of_two(top);
1932 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1933 pe->pe_number,
1934 window_id,
1935 pe->tce_bypass_base,
1936 top);
1937 } else {
1938 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1939 pe->pe_number,
1940 window_id,
1941 pe->tce_bypass_base,
1942 0);
cd15b048
BH
1943 }
1944 if (rc)
1945 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
1946 else
1947 pe->tce_bypass_enabled = enable;
1948}
1949
f87a8864
AK
1950#ifdef CONFIG_IOMMU_API
1951static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
cd15b048 1952{
f87a8864
AK
1953 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1954 table_group);
cd15b048 1955
f87a8864
AK
1956 iommu_take_ownership(table_group->tables[0]);
1957 pnv_pci_ioda2_set_bypass(pe, false);
1958}
cd15b048 1959
f87a8864
AK
1960static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
1961{
1962 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1963 table_group);
1964
1965 iommu_release_ownership(table_group->tables[0]);
1966 pnv_pci_ioda2_set_bypass(pe, true);
cd15b048
BH
1967}
1968
f87a8864
AK
1969static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
1970 .take_ownership = pnv_ioda2_take_ownership,
1971 .release_ownership = pnv_ioda2_release_ownership,
1972};
1973#endif
1974
373f5657
GS
1975static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1976 struct pnv_ioda_pe *pe)
1977{
1978 struct page *tce_mem = NULL;
1979 void *addr;
1980 const __be64 *swinvp;
1981 struct iommu_table *tbl;
1982 unsigned int tce_table_size, end;
1983 int64_t rc;
1984
1985 /* We shouldn't already have a 32-bit DMA associated */
1986 if (WARN_ON(pe->tce32_seg >= 0))
1987 return;
1988
f87a8864
AK
1989 /* TVE #1 is selected by PCI address bit 59 */
1990 pe->tce_bypass_base = 1ull << 59;
1991
0eaf4def 1992 tbl = pnv_pci_table_alloc(phb->hose->node);
b348aa65
AK
1993 iommu_register_group(&pe->table_group, phb->hose->global_number,
1994 pe->pe_number);
0eaf4def 1995 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
c5773822 1996
373f5657
GS
1997 /* The PE will reserve all possible 32-bits space */
1998 pe->tce32_seg = 0;
1999 end = (1 << ilog2(phb->ioda.m32_pci_base));
2000 tce_table_size = (end / 0x1000) * 8;
2001 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2002 end);
2003
2004 /* Allocate TCE table */
2005 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
2006 get_order(tce_table_size));
2007 if (!tce_mem) {
2008 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
2009 goto fail;
2010 }
2011 addr = page_address(tce_mem);
2012 memset(addr, 0, tce_table_size);
2013
2014 /*
2015 * Map TCE table through TVT. The TVE index is the PE number
2016 * shifted by 1 bit for 32-bits DMA space.
2017 */
2018 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2019 pe->pe_number << 1, 1, __pa(addr),
2020 tce_table_size, 0x1000);
2021 if (rc) {
2022 pe_err(pe, "Failed to configure 32-bit TCE table,"
2023 " err %ld\n", rc);
2024 goto fail;
2025 }
2026
2027 /* Setup linux iommu table */
8fa5d454
AK
2028 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
2029 IOMMU_PAGE_SHIFT_4K);
373f5657
GS
2030
2031 /* OPAL variant of PHB3 invalidated TCEs */
2032 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
2033 if (swinvp) {
2034 /* We need a couple more fields -- an address and a data
2035 * to or. Since the bus is only printed out on table free
2036 * errors, and on the first pass the data will be a relative
2037 * bus number, print that out instead.
2038 */
8e0a1611
AK
2039 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
2040 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
2041 8);
65fd766b 2042 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
373f5657 2043 }
da004c36 2044 tbl->it_ops = &pnv_ioda2_iommu_ops;
373f5657 2045 iommu_init_table(tbl, phb->hose->node);
f87a8864
AK
2046#ifdef CONFIG_IOMMU_API
2047 pe->table_group.ops = &pnv_pci_ioda2_ops;
2048#endif
373f5657 2049
781a868f 2050 if (pe->flags & PNV_IODA_PE_DEV) {
4617082e
AK
2051 /*
2052 * Setting table base here only for carrying iommu_group
2053 * further down to let iommu_add_device() do the job.
2054 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2055 */
2056 set_iommu_table_base(&pe->pdev->dev, tbl);
2057 iommu_add_device(&pe->pdev->dev);
c5773822 2058 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
ea30e99e 2059 pnv_ioda_setup_bus_dma(pe, pe->pbus);
74251fe2 2060
cd15b048 2061 /* Also create a bypass window */
4e287840 2062 if (!pnv_iommu_bypass_disabled)
f87a8864 2063 pnv_pci_ioda2_set_bypass(pe, true);
4e287840 2064
373f5657
GS
2065 return;
2066fail:
2067 if (pe->tce32_seg >= 0)
2068 pe->tce32_seg = -1;
2069 if (tce_mem)
2070 __free_pages(tce_mem, get_order(tce_table_size));
0eaf4def
AK
2071 if (tbl) {
2072 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2073 iommu_free_table(tbl, "pnv");
2074 }
373f5657
GS
2075}
2076
cad5cef6 2077static void pnv_ioda_setup_dma(struct pnv_phb *phb)
184cd4a3
BH
2078{
2079 struct pci_controller *hose = phb->hose;
2080 unsigned int residual, remaining, segs, tw, base;
2081 struct pnv_ioda_pe *pe;
2082
2083 /* If we have more PE# than segments available, hand out one
2084 * per PE until we run out and let the rest fail. If not,
2085 * then we assign at least one segment per PE, plus more based
2086 * on the amount of devices under that PE
2087 */
2088 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
2089 residual = 0;
2090 else
2091 residual = phb->ioda.tce32_count -
2092 phb->ioda.dma_pe_count;
2093
2094 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
2095 hose->global_number, phb->ioda.tce32_count);
2096 pr_info("PCI: %d PE# for a total weight of %d\n",
2097 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
2098
2099 /* Walk our PE list and configure their DMA segments, hand them
2100 * out one base segment plus any residual segments based on
2101 * weight
2102 */
2103 remaining = phb->ioda.tce32_count;
2104 tw = phb->ioda.dma_weight;
2105 base = 0;
7ebdf956 2106 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
184cd4a3
BH
2107 if (!pe->dma_weight)
2108 continue;
2109 if (!remaining) {
2110 pe_warn(pe, "No DMA32 resources available\n");
2111 continue;
2112 }
2113 segs = 1;
2114 if (residual) {
2115 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
2116 if (segs > remaining)
2117 segs = remaining;
2118 }
373f5657
GS
2119
2120 /*
2121 * For IODA2 compliant PHB3, we needn't care about the weight.
2122 * The all available 32-bits DMA space will be assigned to
2123 * the specific PE.
2124 */
2125 if (phb->type == PNV_PHB_IODA1) {
2126 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
2127 pe->dma_weight, segs);
2128 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
2129 } else {
2130 pe_info(pe, "Assign DMA32 space\n");
2131 segs = 0;
2132 pnv_pci_ioda2_setup_dma_pe(phb, pe);
2133 }
2134
184cd4a3
BH
2135 remaining -= segs;
2136 base += segs;
2137 }
2138}
2139
2140#ifdef CONFIG_PCI_MSI
137436c9
GS
2141static void pnv_ioda2_msi_eoi(struct irq_data *d)
2142{
2143 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2144 struct irq_chip *chip = irq_data_get_irq_chip(d);
2145 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2146 ioda.irq_chip);
2147 int64_t rc;
2148
2149 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
2150 WARN_ON_ONCE(rc);
2151
2152 icp_native_eoi(d);
2153}
2154
fd9a1c26
IM
2155
2156static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2157{
2158 struct irq_data *idata;
2159 struct irq_chip *ichip;
2160
2161 if (phb->type != PNV_PHB_IODA2)
2162 return;
2163
2164 if (!phb->ioda.irq_chip_init) {
2165 /*
2166 * First time we setup an MSI IRQ, we need to setup the
2167 * corresponding IRQ chip to route correctly.
2168 */
2169 idata = irq_get_irq_data(virq);
2170 ichip = irq_data_get_irq_chip(idata);
2171 phb->ioda.irq_chip_init = 1;
2172 phb->ioda.irq_chip = *ichip;
2173 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2174 }
2175 irq_set_chip(virq, &phb->ioda.irq_chip);
2176}
2177
80c49c7e
IM
2178#ifdef CONFIG_CXL_BASE
2179
6f963ec2 2180struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
80c49c7e
IM
2181{
2182 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2183
6f963ec2 2184 return of_node_get(hose->dn);
80c49c7e 2185}
6f963ec2 2186EXPORT_SYMBOL(pnv_pci_get_phb_node);
80c49c7e 2187
1212aa1c 2188int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
80c49c7e
IM
2189{
2190 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2191 struct pnv_phb *phb = hose->private_data;
2192 struct pnv_ioda_pe *pe;
2193 int rc;
2194
2195 pe = pnv_ioda_get_pe(dev);
2196 if (!pe)
2197 return -ENODEV;
2198
2199 pe_info(pe, "Switching PHB to CXL\n");
2200
1212aa1c 2201 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
80c49c7e
IM
2202 if (rc)
2203 dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
2204
2205 return rc;
2206}
1212aa1c 2207EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
80c49c7e
IM
2208
2209/* Find PHB for cxl dev and allocate MSI hwirqs?
2210 * Returns the absolute hardware IRQ number
2211 */
2212int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
2213{
2214 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2215 struct pnv_phb *phb = hose->private_data;
2216 int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
2217
2218 if (hwirq < 0) {
2219 dev_warn(&dev->dev, "Failed to find a free MSI\n");
2220 return -ENOSPC;
2221 }
2222
2223 return phb->msi_base + hwirq;
2224}
2225EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
2226
2227void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
2228{
2229 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2230 struct pnv_phb *phb = hose->private_data;
2231
2232 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
2233}
2234EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
2235
2236void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
2237 struct pci_dev *dev)
2238{
2239 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2240 struct pnv_phb *phb = hose->private_data;
2241 int i, hwirq;
2242
2243 for (i = 1; i < CXL_IRQ_RANGES; i++) {
2244 if (!irqs->range[i])
2245 continue;
2246 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
2247 i, irqs->offset[i],
2248 irqs->range[i]);
2249 hwirq = irqs->offset[i] - phb->msi_base;
2250 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
2251 irqs->range[i]);
2252 }
2253}
2254EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
2255
2256int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
2257 struct pci_dev *dev, int num)
2258{
2259 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2260 struct pnv_phb *phb = hose->private_data;
2261 int i, hwirq, try;
2262
2263 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
2264
2265 /* 0 is reserved for the multiplexed PSL DSI interrupt */
2266 for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
2267 try = num;
2268 while (try) {
2269 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
2270 if (hwirq >= 0)
2271 break;
2272 try /= 2;
2273 }
2274 if (!try)
2275 goto fail;
2276
2277 irqs->offset[i] = phb->msi_base + hwirq;
2278 irqs->range[i] = try;
2279 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
2280 i, irqs->offset[i], irqs->range[i]);
2281 num -= try;
2282 }
2283 if (num)
2284 goto fail;
2285
2286 return 0;
2287fail:
2288 pnv_cxl_release_hwirq_ranges(irqs, dev);
2289 return -ENOSPC;
2290}
2291EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
2292
2293int pnv_cxl_get_irq_count(struct pci_dev *dev)
2294{
2295 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2296 struct pnv_phb *phb = hose->private_data;
2297
2298 return phb->msi_bmp.irq_count;
2299}
2300EXPORT_SYMBOL(pnv_cxl_get_irq_count);
2301
2302int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
2303 unsigned int virq)
2304{
2305 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2306 struct pnv_phb *phb = hose->private_data;
2307 unsigned int xive_num = hwirq - phb->msi_base;
2308 struct pnv_ioda_pe *pe;
2309 int rc;
2310
2311 if (!(pe = pnv_ioda_get_pe(dev)))
2312 return -ENODEV;
2313
2314 /* Assign XIVE to PE */
2315 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2316 if (rc) {
2317 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
2318 "hwirq 0x%x XIVE 0x%x PE\n",
2319 pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
2320 return -EIO;
2321 }
2322 set_msi_irq_chip(phb, virq);
2323
2324 return 0;
2325}
2326EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
2327#endif
2328
184cd4a3 2329static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
137436c9
GS
2330 unsigned int hwirq, unsigned int virq,
2331 unsigned int is_64, struct msi_msg *msg)
184cd4a3
BH
2332{
2333 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2334 unsigned int xive_num = hwirq - phb->msi_base;
3a1a4661 2335 __be32 data;
184cd4a3
BH
2336 int rc;
2337
2338 /* No PE assigned ? bail out ... no MSI for you ! */
2339 if (pe == NULL)
2340 return -ENXIO;
2341
2342 /* Check if we have an MVE */
2343 if (pe->mve_number < 0)
2344 return -ENXIO;
2345
b72c1f65 2346 /* Force 32-bit MSI on some broken devices */
36074381 2347 if (dev->no_64bit_msi)
b72c1f65
BH
2348 is_64 = 0;
2349
184cd4a3
BH
2350 /* Assign XIVE to PE */
2351 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2352 if (rc) {
2353 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2354 pci_name(dev), rc, xive_num);
2355 return -EIO;
2356 }
2357
2358 if (is_64) {
3a1a4661
BH
2359 __be64 addr64;
2360
184cd4a3
BH
2361 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2362 &addr64, &data);
2363 if (rc) {
2364 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2365 pci_name(dev), rc);
2366 return -EIO;
2367 }
3a1a4661
BH
2368 msg->address_hi = be64_to_cpu(addr64) >> 32;
2369 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
184cd4a3 2370 } else {
3a1a4661
BH
2371 __be32 addr32;
2372
184cd4a3
BH
2373 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2374 &addr32, &data);
2375 if (rc) {
2376 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2377 pci_name(dev), rc);
2378 return -EIO;
2379 }
2380 msg->address_hi = 0;
3a1a4661 2381 msg->address_lo = be32_to_cpu(addr32);
184cd4a3 2382 }
3a1a4661 2383 msg->data = be32_to_cpu(data);
184cd4a3 2384
fd9a1c26 2385 set_msi_irq_chip(phb, virq);
137436c9 2386
184cd4a3
BH
2387 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2388 " address=%x_%08x data=%x PE# %d\n",
2389 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2390 msg->address_hi, msg->address_lo, data, pe->pe_number);
2391
2392 return 0;
2393}
2394
2395static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2396{
fb1b55d6 2397 unsigned int count;
184cd4a3
BH
2398 const __be32 *prop = of_get_property(phb->hose->dn,
2399 "ibm,opal-msi-ranges", NULL);
2400 if (!prop) {
2401 /* BML Fallback */
2402 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2403 }
2404 if (!prop)
2405 return;
2406
2407 phb->msi_base = be32_to_cpup(prop);
fb1b55d6
GS
2408 count = be32_to_cpup(prop + 1);
2409 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
184cd4a3
BH
2410 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2411 phb->hose->global_number);
2412 return;
2413 }
fb1b55d6 2414
184cd4a3
BH
2415 phb->msi_setup = pnv_pci_ioda_msi_setup;
2416 phb->msi32_support = 1;
2417 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
fb1b55d6 2418 count, phb->msi_base);
184cd4a3
BH
2419}
2420#else
2421static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2422#endif /* CONFIG_PCI_MSI */
2423
6e628c7d
WY
2424#ifdef CONFIG_PCI_IOV
2425static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2426{
2427 struct pci_controller *hose;
2428 struct pnv_phb *phb;
2429 struct resource *res;
2430 int i;
2431 resource_size_t size;
2432 struct pci_dn *pdn;
5b88ec22 2433 int mul, total_vfs;
6e628c7d
WY
2434
2435 if (!pdev->is_physfn || pdev->is_added)
2436 return;
2437
2438 hose = pci_bus_to_host(pdev->bus);
2439 phb = hose->private_data;
2440
2441 pdn = pci_get_pdn(pdev);
2442 pdn->vfs_expanded = 0;
2443
5b88ec22
WY
2444 total_vfs = pci_sriov_get_totalvfs(pdev);
2445 pdn->m64_per_iov = 1;
2446 mul = phb->ioda.total_pe;
2447
2448 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2449 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2450 if (!res->flags || res->parent)
2451 continue;
2452 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2453 dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
2454 i, res);
2455 continue;
2456 }
2457
2458 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2459
2460 /* bigger than 64M */
2461 if (size > (1 << 26)) {
2462 dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
2463 i, res);
2464 pdn->m64_per_iov = M64_PER_IOV;
2465 mul = roundup_pow_of_two(total_vfs);
2466 break;
2467 }
2468 }
2469
6e628c7d
WY
2470 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2471 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2472 if (!res->flags || res->parent)
2473 continue;
2474 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2475 dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
2476 i, res);
2477 continue;
2478 }
2479
2480 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2481 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
5b88ec22 2482 res->end = res->start + size * mul - 1;
6e628c7d
WY
2483 dev_dbg(&pdev->dev, " %pR\n", res);
2484 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
5b88ec22 2485 i, res, mul);
6e628c7d 2486 }
5b88ec22 2487 pdn->vfs_expanded = mul;
6e628c7d
WY
2488}
2489#endif /* CONFIG_PCI_IOV */
2490
11685bec
GS
2491/*
2492 * This function is supposed to be called on basis of PE from top
2493 * to bottom style. So the the I/O or MMIO segment assigned to
2494 * parent PE could be overrided by its child PEs if necessary.
2495 */
cad5cef6
GKH
2496static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
2497 struct pnv_ioda_pe *pe)
11685bec
GS
2498{
2499 struct pnv_phb *phb = hose->private_data;
2500 struct pci_bus_region region;
2501 struct resource *res;
2502 int i, index;
2503 int rc;
2504
2505 /*
2506 * NOTE: We only care PCI bus based PE for now. For PCI
2507 * device based PE, for example SRIOV sensitive VF should
2508 * be figured out later.
2509 */
2510 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2511
2512 pci_bus_for_each_resource(pe->pbus, res, i) {
2513 if (!res || !res->flags ||
2514 res->start > res->end)
2515 continue;
2516
2517 if (res->flags & IORESOURCE_IO) {
2518 region.start = res->start - phb->ioda.io_pci_base;
2519 region.end = res->end - phb->ioda.io_pci_base;
2520 index = region.start / phb->ioda.io_segsize;
2521
2522 while (index < phb->ioda.total_pe &&
2523 region.start <= region.end) {
2524 phb->ioda.io_segmap[index] = pe->pe_number;
2525 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2526 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2527 if (rc != OPAL_SUCCESS) {
2528 pr_err("%s: OPAL error %d when mapping IO "
2529 "segment #%d to PE#%d\n",
2530 __func__, rc, index, pe->pe_number);
2531 break;
2532 }
2533
2534 region.start += phb->ioda.io_segsize;
2535 index++;
2536 }
027fa02f
GS
2537 } else if ((res->flags & IORESOURCE_MEM) &&
2538 !pnv_pci_is_mem_pref_64(res->flags)) {
11685bec 2539 region.start = res->start -
3fd47f06 2540 hose->mem_offset[0] -
11685bec
GS
2541 phb->ioda.m32_pci_base;
2542 region.end = res->end -
3fd47f06 2543 hose->mem_offset[0] -
11685bec
GS
2544 phb->ioda.m32_pci_base;
2545 index = region.start / phb->ioda.m32_segsize;
2546
2547 while (index < phb->ioda.total_pe &&
2548 region.start <= region.end) {
2549 phb->ioda.m32_segmap[index] = pe->pe_number;
2550 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2551 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2552 if (rc != OPAL_SUCCESS) {
2553 pr_err("%s: OPAL error %d when mapping M32 "
2554 "segment#%d to PE#%d",
2555 __func__, rc, index, pe->pe_number);
2556 break;
2557 }
2558
2559 region.start += phb->ioda.m32_segsize;
2560 index++;
2561 }
2562 }
2563 }
2564}
2565
cad5cef6 2566static void pnv_pci_ioda_setup_seg(void)
11685bec
GS
2567{
2568 struct pci_controller *tmp, *hose;
2569 struct pnv_phb *phb;
2570 struct pnv_ioda_pe *pe;
2571
2572 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2573 phb = hose->private_data;
2574 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2575 pnv_ioda_setup_pe_seg(hose, pe);
2576 }
2577 }
2578}
2579
cad5cef6 2580static void pnv_pci_ioda_setup_DMA(void)
13395c48
GS
2581{
2582 struct pci_controller *hose, *tmp;
db1266c8 2583 struct pnv_phb *phb;
13395c48
GS
2584
2585 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2586 pnv_ioda_setup_dma(hose->private_data);
db1266c8
GS
2587
2588 /* Mark the PHB initialization done */
2589 phb = hose->private_data;
2590 phb->initialized = 1;
13395c48
GS
2591 }
2592}
2593
37c367f2
GS
2594static void pnv_pci_ioda_create_dbgfs(void)
2595{
2596#ifdef CONFIG_DEBUG_FS
2597 struct pci_controller *hose, *tmp;
2598 struct pnv_phb *phb;
2599 char name[16];
2600
2601 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2602 phb = hose->private_data;
2603
2604 sprintf(name, "PCI%04x", hose->global_number);
2605 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
2606 if (!phb->dbgfs)
2607 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
2608 __func__, hose->global_number);
2609 }
2610#endif /* CONFIG_DEBUG_FS */
2611}
2612
cad5cef6 2613static void pnv_pci_ioda_fixup(void)
fb446ad0
GS
2614{
2615 pnv_pci_ioda_setup_PEs();
11685bec 2616 pnv_pci_ioda_setup_seg();
13395c48 2617 pnv_pci_ioda_setup_DMA();
e9cc17d4 2618
37c367f2
GS
2619 pnv_pci_ioda_create_dbgfs();
2620
e9cc17d4 2621#ifdef CONFIG_EEH
e9cc17d4 2622 eeh_init();
dadcd6d6 2623 eeh_addr_cache_build();
e9cc17d4 2624#endif
fb446ad0
GS
2625}
2626
271fd03a
GS
2627/*
2628 * Returns the alignment for I/O or memory windows for P2P
2629 * bridges. That actually depends on how PEs are segmented.
2630 * For now, we return I/O or M32 segment size for PE sensitive
2631 * P2P bridges. Otherwise, the default values (4KiB for I/O,
2632 * 1MiB for memory) will be returned.
2633 *
2634 * The current PCI bus might be put into one PE, which was
2635 * create against the parent PCI bridge. For that case, we
2636 * needn't enlarge the alignment so that we can save some
2637 * resources.
2638 */
2639static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
2640 unsigned long type)
2641{
2642 struct pci_dev *bridge;
2643 struct pci_controller *hose = pci_bus_to_host(bus);
2644 struct pnv_phb *phb = hose->private_data;
2645 int num_pci_bridges = 0;
2646
2647 bridge = bus->self;
2648 while (bridge) {
2649 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
2650 num_pci_bridges++;
2651 if (num_pci_bridges >= 2)
2652 return 1;
2653 }
2654
2655 bridge = bridge->bus->self;
2656 }
2657
262af557
GC
2658 /* We fail back to M32 if M64 isn't supported */
2659 if (phb->ioda.m64_segsize &&
2660 pnv_pci_is_mem_pref_64(type))
2661 return phb->ioda.m64_segsize;
271fd03a
GS
2662 if (type & IORESOURCE_MEM)
2663 return phb->ioda.m32_segsize;
2664
2665 return phb->ioda.io_segsize;
2666}
2667
5350ab3f
WY
2668#ifdef CONFIG_PCI_IOV
2669static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
2670 int resno)
2671{
2672 struct pci_dn *pdn = pci_get_pdn(pdev);
2673 resource_size_t align, iov_align;
2674
2675 iov_align = resource_size(&pdev->resource[resno]);
2676 if (iov_align)
2677 return iov_align;
2678
2679 align = pci_iov_resource_size(pdev, resno);
2680 if (pdn->vfs_expanded)
2681 return pdn->vfs_expanded * align;
2682
2683 return align;
2684}
2685#endif /* CONFIG_PCI_IOV */
2686
184cd4a3
BH
2687/* Prevent enabling devices for which we couldn't properly
2688 * assign a PE
2689 */
c88c2a18 2690static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
184cd4a3 2691{
db1266c8
GS
2692 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2693 struct pnv_phb *phb = hose->private_data;
2694 struct pci_dn *pdn;
184cd4a3 2695
db1266c8
GS
2696 /* The function is probably called while the PEs have
2697 * not be created yet. For example, resource reassignment
2698 * during PCI probe period. We just skip the check if
2699 * PEs isn't ready.
2700 */
2701 if (!phb->initialized)
c88c2a18 2702 return true;
db1266c8 2703
b72c1f65 2704 pdn = pci_get_pdn(dev);
184cd4a3 2705 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
c88c2a18 2706 return false;
db1266c8 2707
c88c2a18 2708 return true;
184cd4a3
BH
2709}
2710
2711static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
2712 u32 devfn)
2713{
2714 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
2715}
2716
7a8e6bbf 2717static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
73ed148a 2718{
7a8e6bbf
MN
2719 struct pnv_phb *phb = hose->private_data;
2720
d1a85eee 2721 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
73ed148a
BH
2722 OPAL_ASSERT_RESET);
2723}
2724
92ae0353
DA
2725static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
2726 .dma_dev_setup = pnv_pci_dma_dev_setup,
2727#ifdef CONFIG_PCI_MSI
2728 .setup_msi_irqs = pnv_setup_msi_irqs,
2729 .teardown_msi_irqs = pnv_teardown_msi_irqs,
2730#endif
2731 .enable_device_hook = pnv_pci_enable_device_hook,
2732 .window_alignment = pnv_pci_window_alignment,
2733 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
763d2d8d 2734 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
7a8e6bbf 2735 .shutdown = pnv_pci_ioda_shutdown,
92ae0353
DA
2736};
2737
e51df2c1
AB
2738static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2739 u64 hub_id, int ioda_type)
184cd4a3
BH
2740{
2741 struct pci_controller *hose;
184cd4a3 2742 struct pnv_phb *phb;
8184616f 2743 unsigned long size, m32map_off, pemap_off, iomap_off = 0;
c681b93c 2744 const __be64 *prop64;
3a1a4661 2745 const __be32 *prop32;
f1b7cc3e 2746 int len;
184cd4a3
BH
2747 u64 phb_id;
2748 void *aux;
2749 long rc;
2750
58d714ec 2751 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
184cd4a3
BH
2752
2753 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
2754 if (!prop64) {
2755 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
2756 return;
2757 }
2758 phb_id = be64_to_cpup(prop64);
2759 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
2760
e39f223f 2761 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
58d714ec
GS
2762
2763 /* Allocate PCI controller */
58d714ec
GS
2764 phb->hose = hose = pcibios_alloc_controller(np);
2765 if (!phb->hose) {
2766 pr_err(" Can't allocate PCI controller for %s\n",
184cd4a3 2767 np->full_name);
e39f223f 2768 memblock_free(__pa(phb), sizeof(struct pnv_phb));
184cd4a3
BH
2769 return;
2770 }
2771
2772 spin_lock_init(&phb->lock);
f1b7cc3e
GS
2773 prop32 = of_get_property(np, "bus-range", &len);
2774 if (prop32 && len == 8) {
3a1a4661
BH
2775 hose->first_busno = be32_to_cpu(prop32[0]);
2776 hose->last_busno = be32_to_cpu(prop32[1]);
f1b7cc3e
GS
2777 } else {
2778 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
2779 hose->first_busno = 0;
2780 hose->last_busno = 0xff;
2781 }
184cd4a3 2782 hose->private_data = phb;
e9cc17d4 2783 phb->hub_id = hub_id;
184cd4a3 2784 phb->opal_id = phb_id;
aa0c033f 2785 phb->type = ioda_type;
781a868f 2786 mutex_init(&phb->ioda.pe_alloc_mutex);
184cd4a3 2787
cee72d5b
BH
2788 /* Detect specific models for error handling */
2789 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
2790 phb->model = PNV_PHB_MODEL_P7IOC;
f3d40c25 2791 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
aa0c033f 2792 phb->model = PNV_PHB_MODEL_PHB3;
cee72d5b
BH
2793 else
2794 phb->model = PNV_PHB_MODEL_UNKNOWN;
2795
aa0c033f 2796 /* Parse 32-bit and IO ranges (if any) */
2f1ec02e 2797 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
184cd4a3 2798
aa0c033f 2799 /* Get registers */
184cd4a3
BH
2800 phb->regs = of_iomap(np, 0);
2801 if (phb->regs == NULL)
2802 pr_err(" Failed to map registers !\n");
2803
184cd4a3 2804 /* Initialize more IODA stuff */
36954dc7 2805 phb->ioda.total_pe = 1;
aa0c033f 2806 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
36954dc7 2807 if (prop32)
3a1a4661 2808 phb->ioda.total_pe = be32_to_cpup(prop32);
36954dc7
GS
2809 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
2810 if (prop32)
2811 phb->ioda.reserved_pe = be32_to_cpup(prop32);
262af557
GC
2812
2813 /* Parse 64-bit MMIO range */
2814 pnv_ioda_parse_m64_window(phb);
2815
184cd4a3 2816 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
aa0c033f 2817 /* FW Has already off top 64k of M32 space (MSI space) */
184cd4a3
BH
2818 phb->ioda.m32_size += 0x10000;
2819
2820 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
3fd47f06 2821 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
184cd4a3
BH
2822 phb->ioda.io_size = hose->pci_io_size;
2823 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
2824 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
2825
c35d2a8c 2826 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
184cd4a3
BH
2827 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
2828 m32map_off = size;
e47747f4 2829 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
c35d2a8c
GS
2830 if (phb->type == PNV_PHB_IODA1) {
2831 iomap_off = size;
2832 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
2833 }
184cd4a3
BH
2834 pemap_off = size;
2835 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
e39f223f 2836 aux = memblock_virt_alloc(size, 0);
184cd4a3
BH
2837 phb->ioda.pe_alloc = aux;
2838 phb->ioda.m32_segmap = aux + m32map_off;
c35d2a8c
GS
2839 if (phb->type == PNV_PHB_IODA1)
2840 phb->ioda.io_segmap = aux + iomap_off;
184cd4a3 2841 phb->ioda.pe_array = aux + pemap_off;
36954dc7 2842 set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
184cd4a3 2843
7ebdf956 2844 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
184cd4a3 2845 INIT_LIST_HEAD(&phb->ioda.pe_list);
781a868f 2846 mutex_init(&phb->ioda.pe_list_mutex);
184cd4a3
BH
2847
2848 /* Calculate how many 32-bit TCE segments we have */
2849 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
2850
aa0c033f 2851#if 0 /* We should really do that ... */
184cd4a3
BH
2852 rc = opal_pci_set_phb_mem_window(opal->phb_id,
2853 window_type,
2854 window_num,
2855 starting_real_address,
2856 starting_pci_address,
2857 segment_size);
2858#endif
2859
262af557
GC
2860 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
2861 phb->ioda.total_pe, phb->ioda.reserved_pe,
2862 phb->ioda.m32_size, phb->ioda.m32_segsize);
2863 if (phb->ioda.m64_size)
2864 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
2865 phb->ioda.m64_size, phb->ioda.m64_segsize);
2866 if (phb->ioda.io_size)
2867 pr_info(" IO: 0x%x [segment=0x%x]\n",
2868 phb->ioda.io_size, phb->ioda.io_segsize);
2869
184cd4a3 2870
184cd4a3 2871 phb->hose->ops = &pnv_pci_ops;
49dec922
GS
2872 phb->get_pe_state = pnv_ioda_get_pe_state;
2873 phb->freeze_pe = pnv_ioda_freeze_pe;
2874 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
184cd4a3
BH
2875
2876 /* Setup RID -> PE mapping function */
2877 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
2878
2879 /* Setup TCEs */
2880 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
fe7e85c6 2881 phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
184cd4a3
BH
2882
2883 /* Setup MSI support */
2884 pnv_pci_init_ioda_msis(phb);
2885
c40a4210
GS
2886 /*
2887 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
2888 * to let the PCI core do resource assignment. It's supposed
2889 * that the PCI core will do correct I/O and MMIO alignment
2890 * for the P2P bridge bars so that each PCI bus (excluding
2891 * the child P2P bridges) can form individual PE.
184cd4a3 2892 */
fb446ad0 2893 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
92ae0353 2894 hose->controller_ops = pnv_pci_ioda_controller_ops;
ad30cb99 2895
6e628c7d
WY
2896#ifdef CONFIG_PCI_IOV
2897 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
5350ab3f 2898 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
ad30cb99
ME
2899#endif
2900
c40a4210 2901 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
184cd4a3
BH
2902
2903 /* Reset IODA tables to a clean state */
d1a85eee 2904 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
184cd4a3 2905 if (rc)
f11fe552 2906 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
361f2a2a
GS
2907
2908 /* If we're running in kdump kerenl, the previous kerenl never
2909 * shutdown PCI devices correctly. We already got IODA table
2910 * cleaned out. So we have to issue PHB reset to stop all PCI
2911 * transactions from previous kerenl.
2912 */
2913 if (is_kdump_kernel()) {
2914 pr_info(" Issue PHB reset ...\n");
cadf364d
GS
2915 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2916 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
361f2a2a 2917 }
262af557 2918
9e9e8935
GS
2919 /* Remove M64 resource if we can't configure it successfully */
2920 if (!phb->init_m64 || phb->init_m64(phb))
262af557 2921 hose->mem_resources[1].flags = 0;
aa0c033f
GS
2922}
2923
67975005 2924void __init pnv_pci_init_ioda2_phb(struct device_node *np)
aa0c033f 2925{
e9cc17d4 2926 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
184cd4a3
BH
2927}
2928
2929void __init pnv_pci_init_ioda_hub(struct device_node *np)
2930{
2931 struct device_node *phbn;
c681b93c 2932 const __be64 *prop64;
184cd4a3
BH
2933 u64 hub_id;
2934
2935 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
2936
2937 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
2938 if (!prop64) {
2939 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
2940 return;
2941 }
2942 hub_id = be64_to_cpup(prop64);
2943 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
2944
2945 /* Count child PHBs */
2946 for_each_child_of_node(np, phbn) {
2947 /* Look for IODA1 PHBs */
2948 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
e9cc17d4 2949 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
184cd4a3
BH
2950 }
2951}