]>
Commit | Line | Data |
---|---|---|
184cd4a3 BH |
1 | /* |
2 | * Support PCI/PCIe on PowerNV platforms | |
3 | * | |
4 | * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
cee72d5b | 12 | #undef DEBUG |
184cd4a3 BH |
13 | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/pci.h> | |
361f2a2a | 16 | #include <linux/crash_dump.h> |
37c367f2 | 17 | #include <linux/debugfs.h> |
184cd4a3 BH |
18 | #include <linux/delay.h> |
19 | #include <linux/string.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/bootmem.h> | |
22 | #include <linux/irq.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/msi.h> | |
cd15b048 | 25 | #include <linux/memblock.h> |
ac9a5889 | 26 | #include <linux/iommu.h> |
e57080f1 | 27 | #include <linux/rculist.h> |
4793d65d | 28 | #include <linux/sizes.h> |
184cd4a3 BH |
29 | |
30 | #include <asm/sections.h> | |
31 | #include <asm/io.h> | |
32 | #include <asm/prom.h> | |
33 | #include <asm/pci-bridge.h> | |
34 | #include <asm/machdep.h> | |
fb1b55d6 | 35 | #include <asm/msi_bitmap.h> |
184cd4a3 BH |
36 | #include <asm/ppc-pci.h> |
37 | #include <asm/opal.h> | |
38 | #include <asm/iommu.h> | |
39 | #include <asm/tce.h> | |
137436c9 | 40 | #include <asm/xics.h> |
37c367f2 | 41 | #include <asm/debug.h> |
262af557 | 42 | #include <asm/firmware.h> |
80c49c7e | 43 | #include <asm/pnv-pci.h> |
aca6913f | 44 | #include <asm/mmzone.h> |
80c49c7e | 45 | |
ec249dd8 | 46 | #include <misc/cxl-base.h> |
184cd4a3 BH |
47 | |
48 | #include "powernv.h" | |
49 | #include "pci.h" | |
50 | ||
99451551 GS |
51 | #define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */ |
52 | #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ | |
acce971c | 53 | #define PNV_IODA1_DMA32_SEGSIZE 0x10000000 |
781a868f | 54 | |
bbb845c4 AK |
55 | #define POWERNV_IOMMU_DEFAULT_LEVELS 1 |
56 | #define POWERNV_IOMMU_MAX_LEVELS 5 | |
57 | ||
9497a1c1 | 58 | static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU" }; |
aca6913f AK |
59 | static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl); |
60 | ||
7d623e42 | 61 | void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, |
6d31c2fa JP |
62 | const char *fmt, ...) |
63 | { | |
64 | struct va_format vaf; | |
65 | va_list args; | |
66 | char pfix[32]; | |
67 | ||
68 | va_start(args, fmt); | |
69 | ||
70 | vaf.fmt = fmt; | |
71 | vaf.va = &args; | |
72 | ||
781a868f | 73 | if (pe->flags & PNV_IODA_PE_DEV) |
6d31c2fa | 74 | strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); |
781a868f | 75 | else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) |
6d31c2fa JP |
76 | sprintf(pfix, "%04x:%02x ", |
77 | pci_domain_nr(pe->pbus), pe->pbus->number); | |
781a868f WY |
78 | #ifdef CONFIG_PCI_IOV |
79 | else if (pe->flags & PNV_IODA_PE_VF) | |
80 | sprintf(pfix, "%04x:%02x:%2x.%d", | |
81 | pci_domain_nr(pe->parent_dev->bus), | |
82 | (pe->rid & 0xff00) >> 8, | |
83 | PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); | |
84 | #endif /* CONFIG_PCI_IOV*/ | |
6d31c2fa JP |
85 | |
86 | printk("%spci %s: [PE# %.3d] %pV", | |
87 | level, pfix, pe->pe_number, &vaf); | |
88 | ||
89 | va_end(args); | |
90 | } | |
184cd4a3 | 91 | |
4e287840 TLSC |
92 | static bool pnv_iommu_bypass_disabled __read_mostly; |
93 | ||
94 | static int __init iommu_setup(char *str) | |
95 | { | |
96 | if (!str) | |
97 | return -EINVAL; | |
98 | ||
99 | while (*str) { | |
100 | if (!strncmp(str, "nobypass", 8)) { | |
101 | pnv_iommu_bypass_disabled = true; | |
102 | pr_info("PowerNV: IOMMU bypass window disabled.\n"); | |
103 | break; | |
104 | } | |
105 | str += strcspn(str, ","); | |
106 | if (*str == ',') | |
107 | str++; | |
108 | } | |
109 | ||
110 | return 0; | |
111 | } | |
112 | early_param("iommu", iommu_setup); | |
113 | ||
5958d19a | 114 | static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r) |
262af557 | 115 | { |
5958d19a BH |
116 | /* |
117 | * WARNING: We cannot rely on the resource flags. The Linux PCI | |
118 | * allocation code sometimes decides to put a 64-bit prefetchable | |
119 | * BAR in the 32-bit window, so we have to compare the addresses. | |
120 | * | |
121 | * For simplicity we only test resource start. | |
122 | */ | |
123 | return (r->start >= phb->ioda.m64_base && | |
124 | r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); | |
262af557 GC |
125 | } |
126 | ||
1e916772 GS |
127 | static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) |
128 | { | |
129 | phb->ioda.pe_array[pe_no].phb = phb; | |
130 | phb->ioda.pe_array[pe_no].pe_number = pe_no; | |
131 | ||
132 | return &phb->ioda.pe_array[pe_no]; | |
133 | } | |
134 | ||
4b82ab18 GS |
135 | static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) |
136 | { | |
92b8f137 | 137 | if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) { |
4b82ab18 GS |
138 | pr_warn("%s: Invalid PE %d on PHB#%x\n", |
139 | __func__, pe_no, phb->hose->global_number); | |
140 | return; | |
141 | } | |
142 | ||
e9dc4d7f GS |
143 | if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) |
144 | pr_debug("%s: PE %d was reserved on PHB#%x\n", | |
145 | __func__, pe_no, phb->hose->global_number); | |
4b82ab18 | 146 | |
1e916772 | 147 | pnv_ioda_init_pe(phb, pe_no); |
4b82ab18 GS |
148 | } |
149 | ||
1e916772 | 150 | static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) |
184cd4a3 | 151 | { |
60964816 | 152 | long pe; |
184cd4a3 | 153 | |
9fcd6f4a GS |
154 | for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { |
155 | if (!test_and_set_bit(pe, phb->ioda.pe_alloc)) | |
156 | return pnv_ioda_init_pe(phb, pe); | |
157 | } | |
184cd4a3 | 158 | |
9fcd6f4a | 159 | return NULL; |
184cd4a3 BH |
160 | } |
161 | ||
1e916772 | 162 | static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) |
184cd4a3 | 163 | { |
1e916772 | 164 | struct pnv_phb *phb = pe->phb; |
caa58f80 | 165 | unsigned int pe_num = pe->pe_number; |
1e916772 GS |
166 | |
167 | WARN_ON(pe->pdev); | |
184cd4a3 | 168 | |
1e916772 | 169 | memset(pe, 0, sizeof(struct pnv_ioda_pe)); |
caa58f80 | 170 | clear_bit(pe_num, phb->ioda.pe_alloc); |
184cd4a3 BH |
171 | } |
172 | ||
262af557 GC |
173 | /* The default M64 BAR is shared by all PEs */ |
174 | static int pnv_ioda2_init_m64(struct pnv_phb *phb) | |
175 | { | |
176 | const char *desc; | |
177 | struct resource *r; | |
178 | s64 rc; | |
179 | ||
180 | /* Configure the default M64 BAR */ | |
181 | rc = opal_pci_set_phb_mem_window(phb->opal_id, | |
182 | OPAL_M64_WINDOW_TYPE, | |
183 | phb->ioda.m64_bar_idx, | |
184 | phb->ioda.m64_base, | |
185 | 0, /* unused */ | |
186 | phb->ioda.m64_size); | |
187 | if (rc != OPAL_SUCCESS) { | |
188 | desc = "configuring"; | |
189 | goto fail; | |
190 | } | |
191 | ||
192 | /* Enable the default M64 BAR */ | |
193 | rc = opal_pci_phb_mmio_enable(phb->opal_id, | |
194 | OPAL_M64_WINDOW_TYPE, | |
195 | phb->ioda.m64_bar_idx, | |
196 | OPAL_ENABLE_M64_SPLIT); | |
197 | if (rc != OPAL_SUCCESS) { | |
198 | desc = "enabling"; | |
199 | goto fail; | |
200 | } | |
201 | ||
262af557 | 202 | /* |
63803c39 GS |
203 | * Exclude the segments for reserved and root bus PE, which |
204 | * are first or last two PEs. | |
262af557 GC |
205 | */ |
206 | r = &phb->hose->mem_resources[1]; | |
92b8f137 | 207 | if (phb->ioda.reserved_pe_idx == 0) |
63803c39 | 208 | r->start += (2 * phb->ioda.m64_segsize); |
92b8f137 | 209 | else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) |
63803c39 | 210 | r->end -= (2 * phb->ioda.m64_segsize); |
262af557 GC |
211 | else |
212 | pr_warn(" Cannot strip M64 segment for reserved PE#%d\n", | |
92b8f137 | 213 | phb->ioda.reserved_pe_idx); |
262af557 GC |
214 | |
215 | return 0; | |
216 | ||
217 | fail: | |
218 | pr_warn(" Failure %lld %s M64 BAR#%d\n", | |
219 | rc, desc, phb->ioda.m64_bar_idx); | |
220 | opal_pci_phb_mmio_enable(phb->opal_id, | |
221 | OPAL_M64_WINDOW_TYPE, | |
222 | phb->ioda.m64_bar_idx, | |
223 | OPAL_DISABLE_M64); | |
224 | return -EIO; | |
225 | } | |
226 | ||
c430670a | 227 | static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, |
96a2f92b | 228 | unsigned long *pe_bitmap) |
262af557 | 229 | { |
96a2f92b GS |
230 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
231 | struct pnv_phb *phb = hose->private_data; | |
262af557 | 232 | struct resource *r; |
96a2f92b GS |
233 | resource_size_t base, sgsz, start, end; |
234 | int segno, i; | |
235 | ||
236 | base = phb->ioda.m64_base; | |
237 | sgsz = phb->ioda.m64_segsize; | |
238 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | |
239 | r = &pdev->resource[i]; | |
5958d19a | 240 | if (!r->parent || !pnv_pci_is_m64(phb, r)) |
96a2f92b | 241 | continue; |
262af557 | 242 | |
96a2f92b GS |
243 | start = _ALIGN_DOWN(r->start - base, sgsz); |
244 | end = _ALIGN_UP(r->end - base, sgsz); | |
245 | for (segno = start / sgsz; segno < end / sgsz; segno++) { | |
246 | if (pe_bitmap) | |
247 | set_bit(segno, pe_bitmap); | |
248 | else | |
249 | pnv_ioda_reserve_pe(phb, segno); | |
262af557 GC |
250 | } |
251 | } | |
252 | } | |
253 | ||
99451551 GS |
254 | static int pnv_ioda1_init_m64(struct pnv_phb *phb) |
255 | { | |
256 | struct resource *r; | |
257 | int index; | |
258 | ||
259 | /* | |
260 | * There are 16 M64 BARs, each of which has 8 segments. So | |
261 | * there are as many M64 segments as the maximum number of | |
262 | * PEs, which is 128. | |
263 | */ | |
264 | for (index = 0; index < PNV_IODA1_M64_NUM; index++) { | |
265 | unsigned long base, segsz = phb->ioda.m64_segsize; | |
266 | int64_t rc; | |
267 | ||
268 | base = phb->ioda.m64_base + | |
269 | index * PNV_IODA1_M64_SEGS * segsz; | |
270 | rc = opal_pci_set_phb_mem_window(phb->opal_id, | |
271 | OPAL_M64_WINDOW_TYPE, index, base, 0, | |
272 | PNV_IODA1_M64_SEGS * segsz); | |
273 | if (rc != OPAL_SUCCESS) { | |
274 | pr_warn(" Error %lld setting M64 PHB#%d-BAR#%d\n", | |
275 | rc, phb->hose->global_number, index); | |
276 | goto fail; | |
277 | } | |
278 | ||
279 | rc = opal_pci_phb_mmio_enable(phb->opal_id, | |
280 | OPAL_M64_WINDOW_TYPE, index, | |
281 | OPAL_ENABLE_M64_SPLIT); | |
282 | if (rc != OPAL_SUCCESS) { | |
283 | pr_warn(" Error %lld enabling M64 PHB#%d-BAR#%d\n", | |
284 | rc, phb->hose->global_number, index); | |
285 | goto fail; | |
286 | } | |
287 | } | |
288 | ||
289 | /* | |
63803c39 GS |
290 | * Exclude the segments for reserved and root bus PE, which |
291 | * are first or last two PEs. | |
99451551 GS |
292 | */ |
293 | r = &phb->hose->mem_resources[1]; | |
294 | if (phb->ioda.reserved_pe_idx == 0) | |
63803c39 | 295 | r->start += (2 * phb->ioda.m64_segsize); |
99451551 | 296 | else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) |
63803c39 | 297 | r->end -= (2 * phb->ioda.m64_segsize); |
99451551 GS |
298 | else |
299 | WARN(1, "Wrong reserved PE#%d on PHB#%d\n", | |
300 | phb->ioda.reserved_pe_idx, phb->hose->global_number); | |
301 | ||
302 | return 0; | |
303 | ||
304 | fail: | |
305 | for ( ; index >= 0; index--) | |
306 | opal_pci_phb_mmio_enable(phb->opal_id, | |
307 | OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64); | |
308 | ||
309 | return -EIO; | |
310 | } | |
311 | ||
c430670a GS |
312 | static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus, |
313 | unsigned long *pe_bitmap, | |
314 | bool all) | |
262af557 | 315 | { |
262af557 | 316 | struct pci_dev *pdev; |
96a2f92b GS |
317 | |
318 | list_for_each_entry(pdev, &bus->devices, bus_list) { | |
c430670a | 319 | pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap); |
96a2f92b GS |
320 | |
321 | if (all && pdev->subordinate) | |
c430670a GS |
322 | pnv_ioda_reserve_m64_pe(pdev->subordinate, |
323 | pe_bitmap, all); | |
96a2f92b GS |
324 | } |
325 | } | |
326 | ||
1e916772 | 327 | static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) |
262af557 | 328 | { |
26ba248d GS |
329 | struct pci_controller *hose = pci_bus_to_host(bus); |
330 | struct pnv_phb *phb = hose->private_data; | |
262af557 GC |
331 | struct pnv_ioda_pe *master_pe, *pe; |
332 | unsigned long size, *pe_alloc; | |
26ba248d | 333 | int i; |
262af557 GC |
334 | |
335 | /* Root bus shouldn't use M64 */ | |
336 | if (pci_is_root_bus(bus)) | |
1e916772 | 337 | return NULL; |
262af557 | 338 | |
262af557 | 339 | /* Allocate bitmap */ |
92b8f137 | 340 | size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); |
262af557 GC |
341 | pe_alloc = kzalloc(size, GFP_KERNEL); |
342 | if (!pe_alloc) { | |
343 | pr_warn("%s: Out of memory !\n", | |
344 | __func__); | |
1e916772 | 345 | return NULL; |
262af557 GC |
346 | } |
347 | ||
26ba248d | 348 | /* Figure out reserved PE numbers by the PE */ |
c430670a | 349 | pnv_ioda_reserve_m64_pe(bus, pe_alloc, all); |
262af557 GC |
350 | |
351 | /* | |
352 | * the current bus might not own M64 window and that's all | |
353 | * contributed by its child buses. For the case, we needn't | |
354 | * pick M64 dependent PE#. | |
355 | */ | |
92b8f137 | 356 | if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) { |
262af557 | 357 | kfree(pe_alloc); |
1e916772 | 358 | return NULL; |
262af557 GC |
359 | } |
360 | ||
361 | /* | |
362 | * Figure out the master PE and put all slave PEs to master | |
363 | * PE's list to form compound PE. | |
364 | */ | |
262af557 GC |
365 | master_pe = NULL; |
366 | i = -1; | |
92b8f137 GS |
367 | while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) < |
368 | phb->ioda.total_pe_num) { | |
262af557 | 369 | pe = &phb->ioda.pe_array[i]; |
262af557 | 370 | |
93289d8c | 371 | phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; |
262af557 GC |
372 | if (!master_pe) { |
373 | pe->flags |= PNV_IODA_PE_MASTER; | |
374 | INIT_LIST_HEAD(&pe->slaves); | |
375 | master_pe = pe; | |
376 | } else { | |
377 | pe->flags |= PNV_IODA_PE_SLAVE; | |
378 | pe->master = master_pe; | |
379 | list_add_tail(&pe->list, &master_pe->slaves); | |
380 | } | |
99451551 GS |
381 | |
382 | /* | |
383 | * P7IOC supports M64DT, which helps mapping M64 segment | |
384 | * to one particular PE#. However, PHB3 has fixed mapping | |
385 | * between M64 segment and PE#. In order to have same logic | |
386 | * for P7IOC and PHB3, we enforce fixed mapping between M64 | |
387 | * segment and PE# on P7IOC. | |
388 | */ | |
389 | if (phb->type == PNV_PHB_IODA1) { | |
390 | int64_t rc; | |
391 | ||
392 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, | |
393 | pe->pe_number, OPAL_M64_WINDOW_TYPE, | |
394 | pe->pe_number / PNV_IODA1_M64_SEGS, | |
395 | pe->pe_number % PNV_IODA1_M64_SEGS); | |
396 | if (rc != OPAL_SUCCESS) | |
397 | pr_warn("%s: Error %lld mapping M64 for PHB#%d-PE#%d\n", | |
398 | __func__, rc, phb->hose->global_number, | |
399 | pe->pe_number); | |
400 | } | |
262af557 GC |
401 | } |
402 | ||
403 | kfree(pe_alloc); | |
1e916772 | 404 | return master_pe; |
262af557 GC |
405 | } |
406 | ||
407 | static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) | |
408 | { | |
409 | struct pci_controller *hose = phb->hose; | |
410 | struct device_node *dn = hose->dn; | |
411 | struct resource *res; | |
a1339faf | 412 | u32 m64_range[2], i; |
262af557 GC |
413 | const u32 *r; |
414 | u64 pci_addr; | |
415 | ||
99451551 | 416 | if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) { |
1665c4a8 GS |
417 | pr_info(" Not support M64 window\n"); |
418 | return; | |
419 | } | |
420 | ||
e4d54f71 | 421 | if (!firmware_has_feature(FW_FEATURE_OPAL)) { |
262af557 GC |
422 | pr_info(" Firmware too old to support M64 window\n"); |
423 | return; | |
424 | } | |
425 | ||
426 | r = of_get_property(dn, "ibm,opal-m64-window", NULL); | |
427 | if (!r) { | |
428 | pr_info(" No <ibm,opal-m64-window> on %s\n", | |
429 | dn->full_name); | |
430 | return; | |
431 | } | |
432 | ||
a1339faf BH |
433 | /* |
434 | * Find the available M64 BAR range and pickup the last one for | |
435 | * covering the whole 64-bits space. We support only one range. | |
436 | */ | |
437 | if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges", | |
438 | m64_range, 2)) { | |
439 | /* In absence of the property, assume 0..15 */ | |
440 | m64_range[0] = 0; | |
441 | m64_range[1] = 16; | |
442 | } | |
443 | /* We only support 64 bits in our allocator */ | |
444 | if (m64_range[1] > 63) { | |
445 | pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n", | |
446 | __func__, m64_range[1], phb->hose->global_number); | |
447 | m64_range[1] = 63; | |
448 | } | |
449 | /* Empty range, no m64 */ | |
450 | if (m64_range[1] <= m64_range[0]) { | |
451 | pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n", | |
452 | __func__, phb->hose->global_number); | |
453 | return; | |
454 | } | |
455 | ||
456 | /* Configure M64 informations */ | |
262af557 | 457 | res = &hose->mem_resources[1]; |
e80c4e7c | 458 | res->name = dn->full_name; |
262af557 GC |
459 | res->start = of_translate_address(dn, r + 2); |
460 | res->end = res->start + of_read_number(r + 4, 2) - 1; | |
461 | res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); | |
462 | pci_addr = of_read_number(r, 2); | |
463 | hose->mem_offset[1] = res->start - pci_addr; | |
464 | ||
465 | phb->ioda.m64_size = resource_size(res); | |
92b8f137 | 466 | phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num; |
262af557 GC |
467 | phb->ioda.m64_base = pci_addr; |
468 | ||
a1339faf BH |
469 | /* This lines up nicely with the display from processing OF ranges */ |
470 | pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n", | |
471 | res->start, res->end, pci_addr, m64_range[0], | |
472 | m64_range[0] + m64_range[1] - 1); | |
473 | ||
474 | /* Mark all M64 used up by default */ | |
475 | phb->ioda.m64_bar_alloc = (unsigned long)-1; | |
e9863e68 | 476 | |
262af557 | 477 | /* Use last M64 BAR to cover M64 window */ |
a1339faf BH |
478 | m64_range[1]--; |
479 | phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1]; | |
480 | ||
481 | pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx); | |
482 | ||
483 | /* Mark remaining ones free */ | |
484 | for (i = m64_range[0]; i < m64_range[1]; i++) | |
485 | clear_bit(i, &phb->ioda.m64_bar_alloc); | |
486 | ||
487 | /* | |
488 | * Setup init functions for M64 based on IODA version, IODA3 uses | |
489 | * the IODA2 code. | |
490 | */ | |
99451551 GS |
491 | if (phb->type == PNV_PHB_IODA1) |
492 | phb->init_m64 = pnv_ioda1_init_m64; | |
493 | else | |
494 | phb->init_m64 = pnv_ioda2_init_m64; | |
c430670a GS |
495 | phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe; |
496 | phb->pick_m64_pe = pnv_ioda_pick_m64_pe; | |
262af557 GC |
497 | } |
498 | ||
49dec922 GS |
499 | static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) |
500 | { | |
501 | struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; | |
502 | struct pnv_ioda_pe *slave; | |
503 | s64 rc; | |
504 | ||
505 | /* Fetch master PE */ | |
506 | if (pe->flags & PNV_IODA_PE_SLAVE) { | |
507 | pe = pe->master; | |
ec8e4e9d GS |
508 | if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) |
509 | return; | |
510 | ||
49dec922 GS |
511 | pe_no = pe->pe_number; |
512 | } | |
513 | ||
514 | /* Freeze master PE */ | |
515 | rc = opal_pci_eeh_freeze_set(phb->opal_id, | |
516 | pe_no, | |
517 | OPAL_EEH_ACTION_SET_FREEZE_ALL); | |
518 | if (rc != OPAL_SUCCESS) { | |
519 | pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", | |
520 | __func__, rc, phb->hose->global_number, pe_no); | |
521 | return; | |
522 | } | |
523 | ||
524 | /* Freeze slave PEs */ | |
525 | if (!(pe->flags & PNV_IODA_PE_MASTER)) | |
526 | return; | |
527 | ||
528 | list_for_each_entry(slave, &pe->slaves, list) { | |
529 | rc = opal_pci_eeh_freeze_set(phb->opal_id, | |
530 | slave->pe_number, | |
531 | OPAL_EEH_ACTION_SET_FREEZE_ALL); | |
532 | if (rc != OPAL_SUCCESS) | |
533 | pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", | |
534 | __func__, rc, phb->hose->global_number, | |
535 | slave->pe_number); | |
536 | } | |
537 | } | |
538 | ||
e51df2c1 | 539 | static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) |
49dec922 GS |
540 | { |
541 | struct pnv_ioda_pe *pe, *slave; | |
542 | s64 rc; | |
543 | ||
544 | /* Find master PE */ | |
545 | pe = &phb->ioda.pe_array[pe_no]; | |
546 | if (pe->flags & PNV_IODA_PE_SLAVE) { | |
547 | pe = pe->master; | |
548 | WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); | |
549 | pe_no = pe->pe_number; | |
550 | } | |
551 | ||
552 | /* Clear frozen state for master PE */ | |
553 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt); | |
554 | if (rc != OPAL_SUCCESS) { | |
555 | pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", | |
556 | __func__, rc, opt, phb->hose->global_number, pe_no); | |
557 | return -EIO; | |
558 | } | |
559 | ||
560 | if (!(pe->flags & PNV_IODA_PE_MASTER)) | |
561 | return 0; | |
562 | ||
563 | /* Clear frozen state for slave PEs */ | |
564 | list_for_each_entry(slave, &pe->slaves, list) { | |
565 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, | |
566 | slave->pe_number, | |
567 | opt); | |
568 | if (rc != OPAL_SUCCESS) { | |
569 | pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", | |
570 | __func__, rc, opt, phb->hose->global_number, | |
571 | slave->pe_number); | |
572 | return -EIO; | |
573 | } | |
574 | } | |
575 | ||
576 | return 0; | |
577 | } | |
578 | ||
579 | static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) | |
580 | { | |
581 | struct pnv_ioda_pe *slave, *pe; | |
582 | u8 fstate, state; | |
583 | __be16 pcierr; | |
584 | s64 rc; | |
585 | ||
586 | /* Sanity check on PE number */ | |
92b8f137 | 587 | if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num) |
49dec922 GS |
588 | return OPAL_EEH_STOPPED_PERM_UNAVAIL; |
589 | ||
590 | /* | |
591 | * Fetch the master PE and the PE instance might be | |
592 | * not initialized yet. | |
593 | */ | |
594 | pe = &phb->ioda.pe_array[pe_no]; | |
595 | if (pe->flags & PNV_IODA_PE_SLAVE) { | |
596 | pe = pe->master; | |
597 | WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); | |
598 | pe_no = pe->pe_number; | |
599 | } | |
600 | ||
601 | /* Check the master PE */ | |
602 | rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, | |
603 | &state, &pcierr, NULL); | |
604 | if (rc != OPAL_SUCCESS) { | |
605 | pr_warn("%s: Failure %lld getting " | |
606 | "PHB#%x-PE#%x state\n", | |
607 | __func__, rc, | |
608 | phb->hose->global_number, pe_no); | |
609 | return OPAL_EEH_STOPPED_TEMP_UNAVAIL; | |
610 | } | |
611 | ||
612 | /* Check the slave PE */ | |
613 | if (!(pe->flags & PNV_IODA_PE_MASTER)) | |
614 | return state; | |
615 | ||
616 | list_for_each_entry(slave, &pe->slaves, list) { | |
617 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | |
618 | slave->pe_number, | |
619 | &fstate, | |
620 | &pcierr, | |
621 | NULL); | |
622 | if (rc != OPAL_SUCCESS) { | |
623 | pr_warn("%s: Failure %lld getting " | |
624 | "PHB#%x-PE#%x state\n", | |
625 | __func__, rc, | |
626 | phb->hose->global_number, slave->pe_number); | |
627 | return OPAL_EEH_STOPPED_TEMP_UNAVAIL; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Override the result based on the ascending | |
632 | * priority. | |
633 | */ | |
634 | if (fstate > state) | |
635 | state = fstate; | |
636 | } | |
637 | ||
638 | return state; | |
639 | } | |
640 | ||
184cd4a3 BH |
641 | /* Currently those 2 are only used when MSIs are enabled, this will change |
642 | * but in the meantime, we need to protect them to avoid warnings | |
643 | */ | |
644 | #ifdef CONFIG_PCI_MSI | |
f456834a | 645 | struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) |
184cd4a3 BH |
646 | { |
647 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | |
648 | struct pnv_phb *phb = hose->private_data; | |
b72c1f65 | 649 | struct pci_dn *pdn = pci_get_pdn(dev); |
184cd4a3 BH |
650 | |
651 | if (!pdn) | |
652 | return NULL; | |
653 | if (pdn->pe_number == IODA_INVALID_PE) | |
654 | return NULL; | |
655 | return &phb->ioda.pe_array[pdn->pe_number]; | |
656 | } | |
184cd4a3 BH |
657 | #endif /* CONFIG_PCI_MSI */ |
658 | ||
b131a842 GS |
659 | static int pnv_ioda_set_one_peltv(struct pnv_phb *phb, |
660 | struct pnv_ioda_pe *parent, | |
661 | struct pnv_ioda_pe *child, | |
662 | bool is_add) | |
663 | { | |
664 | const char *desc = is_add ? "adding" : "removing"; | |
665 | uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN : | |
666 | OPAL_REMOVE_PE_FROM_DOMAIN; | |
667 | struct pnv_ioda_pe *slave; | |
668 | long rc; | |
669 | ||
670 | /* Parent PE affects child PE */ | |
671 | rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, | |
672 | child->pe_number, op); | |
673 | if (rc != OPAL_SUCCESS) { | |
674 | pe_warn(child, "OPAL error %ld %s to parent PELTV\n", | |
675 | rc, desc); | |
676 | return -ENXIO; | |
677 | } | |
678 | ||
679 | if (!(child->flags & PNV_IODA_PE_MASTER)) | |
680 | return 0; | |
681 | ||
682 | /* Compound case: parent PE affects slave PEs */ | |
683 | list_for_each_entry(slave, &child->slaves, list) { | |
684 | rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, | |
685 | slave->pe_number, op); | |
686 | if (rc != OPAL_SUCCESS) { | |
687 | pe_warn(slave, "OPAL error %ld %s to parent PELTV\n", | |
688 | rc, desc); | |
689 | return -ENXIO; | |
690 | } | |
691 | } | |
692 | ||
693 | return 0; | |
694 | } | |
695 | ||
696 | static int pnv_ioda_set_peltv(struct pnv_phb *phb, | |
697 | struct pnv_ioda_pe *pe, | |
698 | bool is_add) | |
699 | { | |
700 | struct pnv_ioda_pe *slave; | |
781a868f | 701 | struct pci_dev *pdev = NULL; |
b131a842 GS |
702 | int ret; |
703 | ||
704 | /* | |
705 | * Clear PE frozen state. If it's master PE, we need | |
706 | * clear slave PE frozen state as well. | |
707 | */ | |
708 | if (is_add) { | |
709 | opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, | |
710 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); | |
711 | if (pe->flags & PNV_IODA_PE_MASTER) { | |
712 | list_for_each_entry(slave, &pe->slaves, list) | |
713 | opal_pci_eeh_freeze_clear(phb->opal_id, | |
714 | slave->pe_number, | |
715 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); | |
716 | } | |
717 | } | |
718 | ||
719 | /* | |
720 | * Associate PE in PELT. We need add the PE into the | |
721 | * corresponding PELT-V as well. Otherwise, the error | |
722 | * originated from the PE might contribute to other | |
723 | * PEs. | |
724 | */ | |
725 | ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); | |
726 | if (ret) | |
727 | return ret; | |
728 | ||
729 | /* For compound PEs, any one affects all of them */ | |
730 | if (pe->flags & PNV_IODA_PE_MASTER) { | |
731 | list_for_each_entry(slave, &pe->slaves, list) { | |
732 | ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); | |
733 | if (ret) | |
734 | return ret; | |
735 | } | |
736 | } | |
737 | ||
738 | if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) | |
739 | pdev = pe->pbus->self; | |
781a868f | 740 | else if (pe->flags & PNV_IODA_PE_DEV) |
b131a842 | 741 | pdev = pe->pdev->bus->self; |
781a868f WY |
742 | #ifdef CONFIG_PCI_IOV |
743 | else if (pe->flags & PNV_IODA_PE_VF) | |
283e2d8a | 744 | pdev = pe->parent_dev; |
781a868f | 745 | #endif /* CONFIG_PCI_IOV */ |
b131a842 GS |
746 | while (pdev) { |
747 | struct pci_dn *pdn = pci_get_pdn(pdev); | |
748 | struct pnv_ioda_pe *parent; | |
749 | ||
750 | if (pdn && pdn->pe_number != IODA_INVALID_PE) { | |
751 | parent = &phb->ioda.pe_array[pdn->pe_number]; | |
752 | ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); | |
753 | if (ret) | |
754 | return ret; | |
755 | } | |
756 | ||
757 | pdev = pdev->bus->self; | |
758 | } | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
781a868f WY |
763 | static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) |
764 | { | |
765 | struct pci_dev *parent; | |
766 | uint8_t bcomp, dcomp, fcomp; | |
767 | int64_t rc; | |
768 | long rid_end, rid; | |
769 | ||
770 | /* Currently, we just deconfigure VF PE. Bus PE will always there.*/ | |
771 | if (pe->pbus) { | |
772 | int count; | |
773 | ||
774 | dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; | |
775 | fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; | |
776 | parent = pe->pbus->self; | |
777 | if (pe->flags & PNV_IODA_PE_BUS_ALL) | |
778 | count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; | |
779 | else | |
780 | count = 1; | |
781 | ||
782 | switch(count) { | |
783 | case 1: bcomp = OpalPciBusAll; break; | |
784 | case 2: bcomp = OpalPciBus7Bits; break; | |
785 | case 4: bcomp = OpalPciBus6Bits; break; | |
786 | case 8: bcomp = OpalPciBus5Bits; break; | |
787 | case 16: bcomp = OpalPciBus4Bits; break; | |
788 | case 32: bcomp = OpalPciBus3Bits; break; | |
789 | default: | |
790 | dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", | |
791 | count); | |
792 | /* Do an exact match only */ | |
793 | bcomp = OpalPciBusAll; | |
794 | } | |
795 | rid_end = pe->rid + (count << 8); | |
796 | } else { | |
93e01a50 | 797 | #ifdef CONFIG_PCI_IOV |
781a868f WY |
798 | if (pe->flags & PNV_IODA_PE_VF) |
799 | parent = pe->parent_dev; | |
800 | else | |
93e01a50 | 801 | #endif |
781a868f WY |
802 | parent = pe->pdev->bus->self; |
803 | bcomp = OpalPciBusAll; | |
804 | dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; | |
805 | fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; | |
806 | rid_end = pe->rid + 1; | |
807 | } | |
808 | ||
809 | /* Clear the reverse map */ | |
810 | for (rid = pe->rid; rid < rid_end; rid++) | |
c127562a | 811 | phb->ioda.pe_rmap[rid] = IODA_INVALID_PE; |
781a868f WY |
812 | |
813 | /* Release from all parents PELT-V */ | |
814 | while (parent) { | |
815 | struct pci_dn *pdn = pci_get_pdn(parent); | |
816 | if (pdn && pdn->pe_number != IODA_INVALID_PE) { | |
817 | rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, | |
818 | pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); | |
819 | /* XXX What to do in case of error ? */ | |
820 | } | |
821 | parent = parent->bus->self; | |
822 | } | |
823 | ||
f951e510 | 824 | opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, |
781a868f WY |
825 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); |
826 | ||
827 | /* Disassociate PE in PELT */ | |
828 | rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, | |
829 | pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); | |
830 | if (rc) | |
831 | pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc); | |
832 | rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, | |
833 | bcomp, dcomp, fcomp, OPAL_UNMAP_PE); | |
834 | if (rc) | |
835 | pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); | |
836 | ||
837 | pe->pbus = NULL; | |
838 | pe->pdev = NULL; | |
93e01a50 | 839 | #ifdef CONFIG_PCI_IOV |
781a868f | 840 | pe->parent_dev = NULL; |
93e01a50 | 841 | #endif |
781a868f WY |
842 | |
843 | return 0; | |
844 | } | |
781a868f | 845 | |
cad5cef6 | 846 | static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) |
184cd4a3 BH |
847 | { |
848 | struct pci_dev *parent; | |
849 | uint8_t bcomp, dcomp, fcomp; | |
850 | long rc, rid_end, rid; | |
851 | ||
852 | /* Bus validation ? */ | |
853 | if (pe->pbus) { | |
854 | int count; | |
855 | ||
856 | dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; | |
857 | fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; | |
858 | parent = pe->pbus->self; | |
fb446ad0 GS |
859 | if (pe->flags & PNV_IODA_PE_BUS_ALL) |
860 | count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; | |
861 | else | |
862 | count = 1; | |
863 | ||
184cd4a3 BH |
864 | switch(count) { |
865 | case 1: bcomp = OpalPciBusAll; break; | |
866 | case 2: bcomp = OpalPciBus7Bits; break; | |
867 | case 4: bcomp = OpalPciBus6Bits; break; | |
868 | case 8: bcomp = OpalPciBus5Bits; break; | |
869 | case 16: bcomp = OpalPciBus4Bits; break; | |
870 | case 32: bcomp = OpalPciBus3Bits; break; | |
871 | default: | |
781a868f WY |
872 | dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", |
873 | count); | |
184cd4a3 BH |
874 | /* Do an exact match only */ |
875 | bcomp = OpalPciBusAll; | |
876 | } | |
877 | rid_end = pe->rid + (count << 8); | |
878 | } else { | |
781a868f WY |
879 | #ifdef CONFIG_PCI_IOV |
880 | if (pe->flags & PNV_IODA_PE_VF) | |
881 | parent = pe->parent_dev; | |
882 | else | |
883 | #endif /* CONFIG_PCI_IOV */ | |
884 | parent = pe->pdev->bus->self; | |
184cd4a3 BH |
885 | bcomp = OpalPciBusAll; |
886 | dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; | |
887 | fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; | |
888 | rid_end = pe->rid + 1; | |
889 | } | |
890 | ||
631ad691 GS |
891 | /* |
892 | * Associate PE in PELT. We need add the PE into the | |
893 | * corresponding PELT-V as well. Otherwise, the error | |
894 | * originated from the PE might contribute to other | |
895 | * PEs. | |
896 | */ | |
184cd4a3 BH |
897 | rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, |
898 | bcomp, dcomp, fcomp, OPAL_MAP_PE); | |
899 | if (rc) { | |
900 | pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); | |
901 | return -ENXIO; | |
902 | } | |
631ad691 | 903 | |
5d2aa710 AP |
904 | /* |
905 | * Configure PELTV. NPUs don't have a PELTV table so skip | |
906 | * configuration on them. | |
907 | */ | |
908 | if (phb->type != PNV_PHB_NPU) | |
909 | pnv_ioda_set_peltv(phb, pe, true); | |
184cd4a3 | 910 | |
184cd4a3 BH |
911 | /* Setup reverse map */ |
912 | for (rid = pe->rid; rid < rid_end; rid++) | |
913 | phb->ioda.pe_rmap[rid] = pe->pe_number; | |
914 | ||
915 | /* Setup one MVTs on IODA1 */ | |
4773f76b GS |
916 | if (phb->type != PNV_PHB_IODA1) { |
917 | pe->mve_number = 0; | |
918 | goto out; | |
919 | } | |
920 | ||
921 | pe->mve_number = pe->pe_number; | |
922 | rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); | |
923 | if (rc != OPAL_SUCCESS) { | |
924 | pe_err(pe, "OPAL error %ld setting up MVE %d\n", | |
925 | rc, pe->mve_number); | |
926 | pe->mve_number = -1; | |
927 | } else { | |
928 | rc = opal_pci_set_mve_enable(phb->opal_id, | |
929 | pe->mve_number, OPAL_ENABLE_MVE); | |
184cd4a3 | 930 | if (rc) { |
4773f76b | 931 | pe_err(pe, "OPAL error %ld enabling MVE %d\n", |
184cd4a3 BH |
932 | rc, pe->mve_number); |
933 | pe->mve_number = -1; | |
184cd4a3 | 934 | } |
4773f76b | 935 | } |
184cd4a3 | 936 | |
4773f76b | 937 | out: |
184cd4a3 BH |
938 | return 0; |
939 | } | |
940 | ||
781a868f WY |
941 | #ifdef CONFIG_PCI_IOV |
942 | static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) | |
943 | { | |
944 | struct pci_dn *pdn = pci_get_pdn(dev); | |
945 | int i; | |
946 | struct resource *res, res2; | |
947 | resource_size_t size; | |
948 | u16 num_vfs; | |
949 | ||
950 | if (!dev->is_physfn) | |
951 | return -EINVAL; | |
952 | ||
953 | /* | |
954 | * "offset" is in VFs. The M64 windows are sized so that when they | |
955 | * are segmented, each segment is the same size as the IOV BAR. | |
956 | * Each segment is in a separate PE, and the high order bits of the | |
957 | * address are the PE number. Therefore, each VF's BAR is in a | |
958 | * separate PE, and changing the IOV BAR start address changes the | |
959 | * range of PEs the VFs are in. | |
960 | */ | |
961 | num_vfs = pdn->num_vfs; | |
962 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { | |
963 | res = &dev->resource[i + PCI_IOV_RESOURCES]; | |
964 | if (!res->flags || !res->parent) | |
965 | continue; | |
966 | ||
781a868f WY |
967 | /* |
968 | * The actual IOV BAR range is determined by the start address | |
969 | * and the actual size for num_vfs VFs BAR. This check is to | |
970 | * make sure that after shifting, the range will not overlap | |
971 | * with another device. | |
972 | */ | |
973 | size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); | |
974 | res2.flags = res->flags; | |
975 | res2.start = res->start + (size * offset); | |
976 | res2.end = res2.start + (size * num_vfs) - 1; | |
977 | ||
978 | if (res2.end > res->end) { | |
979 | dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n", | |
980 | i, &res2, res, num_vfs, offset); | |
981 | return -EBUSY; | |
982 | } | |
983 | } | |
984 | ||
985 | /* | |
986 | * After doing so, there would be a "hole" in the /proc/iomem when | |
987 | * offset is a positive value. It looks like the device return some | |
988 | * mmio back to the system, which actually no one could use it. | |
989 | */ | |
990 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { | |
991 | res = &dev->resource[i + PCI_IOV_RESOURCES]; | |
992 | if (!res->flags || !res->parent) | |
993 | continue; | |
994 | ||
781a868f WY |
995 | size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); |
996 | res2 = *res; | |
997 | res->start += size * offset; | |
998 | ||
74703cc4 WY |
999 | dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n", |
1000 | i, &res2, res, (offset > 0) ? "En" : "Dis", | |
1001 | num_vfs, offset); | |
781a868f WY |
1002 | pci_update_resource(dev, i + PCI_IOV_RESOURCES); |
1003 | } | |
1004 | return 0; | |
1005 | } | |
1006 | #endif /* CONFIG_PCI_IOV */ | |
1007 | ||
cad5cef6 | 1008 | static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) |
184cd4a3 BH |
1009 | { |
1010 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | |
1011 | struct pnv_phb *phb = hose->private_data; | |
b72c1f65 | 1012 | struct pci_dn *pdn = pci_get_pdn(dev); |
184cd4a3 | 1013 | struct pnv_ioda_pe *pe; |
184cd4a3 BH |
1014 | |
1015 | if (!pdn) { | |
1016 | pr_err("%s: Device tree node not associated properly\n", | |
1017 | pci_name(dev)); | |
1018 | return NULL; | |
1019 | } | |
1020 | if (pdn->pe_number != IODA_INVALID_PE) | |
1021 | return NULL; | |
1022 | ||
1e916772 GS |
1023 | pe = pnv_ioda_alloc_pe(phb); |
1024 | if (!pe) { | |
184cd4a3 BH |
1025 | pr_warning("%s: Not enough PE# available, disabling device\n", |
1026 | pci_name(dev)); | |
1027 | return NULL; | |
1028 | } | |
1029 | ||
1030 | /* NOTE: We get only one ref to the pci_dev for the pdn, not for the | |
1031 | * pointer in the PE data structure, both should be destroyed at the | |
1032 | * same time. However, this needs to be looked at more closely again | |
1033 | * once we actually start removing things (Hotplug, SR-IOV, ...) | |
1034 | * | |
1035 | * At some point we want to remove the PDN completely anyways | |
1036 | */ | |
184cd4a3 BH |
1037 | pci_dev_get(dev); |
1038 | pdn->pcidev = dev; | |
1e916772 | 1039 | pdn->pe_number = pe->pe_number; |
5d2aa710 | 1040 | pe->flags = PNV_IODA_PE_DEV; |
184cd4a3 BH |
1041 | pe->pdev = dev; |
1042 | pe->pbus = NULL; | |
184cd4a3 BH |
1043 | pe->mve_number = -1; |
1044 | pe->rid = dev->bus->number << 8 | pdn->devfn; | |
1045 | ||
1046 | pe_info(pe, "Associated device to PE\n"); | |
1047 | ||
1048 | if (pnv_ioda_configure_pe(phb, pe)) { | |
1049 | /* XXX What do we do here ? */ | |
1e916772 | 1050 | pnv_ioda_free_pe(pe); |
184cd4a3 BH |
1051 | pdn->pe_number = IODA_INVALID_PE; |
1052 | pe->pdev = NULL; | |
1053 | pci_dev_put(dev); | |
1054 | return NULL; | |
1055 | } | |
1056 | ||
1d4e89cf AK |
1057 | /* Put PE to the list */ |
1058 | list_add_tail(&pe->list, &phb->ioda.pe_list); | |
1059 | ||
184cd4a3 BH |
1060 | return pe; |
1061 | } | |
1062 | ||
1063 | static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) | |
1064 | { | |
1065 | struct pci_dev *dev; | |
1066 | ||
1067 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
b72c1f65 | 1068 | struct pci_dn *pdn = pci_get_pdn(dev); |
184cd4a3 BH |
1069 | |
1070 | if (pdn == NULL) { | |
1071 | pr_warn("%s: No device node associated with device !\n", | |
1072 | pci_name(dev)); | |
1073 | continue; | |
1074 | } | |
ccd1c191 GS |
1075 | |
1076 | /* | |
1077 | * In partial hotplug case, the PCI device might be still | |
1078 | * associated with the PE and needn't attach it to the PE | |
1079 | * again. | |
1080 | */ | |
1081 | if (pdn->pe_number != IODA_INVALID_PE) | |
1082 | continue; | |
1083 | ||
c5f7700b | 1084 | pe->device_count++; |
94973b24 | 1085 | pdn->pcidev = dev; |
184cd4a3 | 1086 | pdn->pe_number = pe->pe_number; |
fb446ad0 | 1087 | if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) |
184cd4a3 BH |
1088 | pnv_ioda_setup_same_PE(dev->subordinate, pe); |
1089 | } | |
1090 | } | |
1091 | ||
fb446ad0 GS |
1092 | /* |
1093 | * There're 2 types of PCI bus sensitive PEs: One that is compromised of | |
1094 | * single PCI bus. Another one that contains the primary PCI bus and its | |
1095 | * subordinate PCI devices and buses. The second type of PE is normally | |
1096 | * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports. | |
1097 | */ | |
1e916772 | 1098 | static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) |
184cd4a3 | 1099 | { |
fb446ad0 | 1100 | struct pci_controller *hose = pci_bus_to_host(bus); |
184cd4a3 | 1101 | struct pnv_phb *phb = hose->private_data; |
1e916772 | 1102 | struct pnv_ioda_pe *pe = NULL; |
ccd1c191 GS |
1103 | unsigned int pe_num; |
1104 | ||
1105 | /* | |
1106 | * In partial hotplug case, the PE instance might be still alive. | |
1107 | * We should reuse it instead of allocating a new one. | |
1108 | */ | |
1109 | pe_num = phb->ioda.pe_rmap[bus->number << 8]; | |
1110 | if (pe_num != IODA_INVALID_PE) { | |
1111 | pe = &phb->ioda.pe_array[pe_num]; | |
1112 | pnv_ioda_setup_same_PE(bus, pe); | |
1113 | return NULL; | |
1114 | } | |
262af557 | 1115 | |
63803c39 GS |
1116 | /* PE number for root bus should have been reserved */ |
1117 | if (pci_is_root_bus(bus) && | |
1118 | phb->ioda.root_pe_idx != IODA_INVALID_PE) | |
1119 | pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; | |
1120 | ||
262af557 | 1121 | /* Check if PE is determined by M64 */ |
63803c39 | 1122 | if (!pe && phb->pick_m64_pe) |
1e916772 | 1123 | pe = phb->pick_m64_pe(bus, all); |
262af557 GC |
1124 | |
1125 | /* The PE number isn't pinned by M64 */ | |
1e916772 GS |
1126 | if (!pe) |
1127 | pe = pnv_ioda_alloc_pe(phb); | |
184cd4a3 | 1128 | |
1e916772 | 1129 | if (!pe) { |
fb446ad0 GS |
1130 | pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n", |
1131 | __func__, pci_domain_nr(bus), bus->number); | |
1e916772 | 1132 | return NULL; |
184cd4a3 BH |
1133 | } |
1134 | ||
262af557 | 1135 | pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); |
184cd4a3 BH |
1136 | pe->pbus = bus; |
1137 | pe->pdev = NULL; | |
184cd4a3 | 1138 | pe->mve_number = -1; |
b918c62e | 1139 | pe->rid = bus->busn_res.start << 8; |
184cd4a3 | 1140 | |
fb446ad0 GS |
1141 | if (all) |
1142 | pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n", | |
1e916772 | 1143 | bus->busn_res.start, bus->busn_res.end, pe->pe_number); |
fb446ad0 GS |
1144 | else |
1145 | pe_info(pe, "Secondary bus %d associated with PE#%d\n", | |
1e916772 | 1146 | bus->busn_res.start, pe->pe_number); |
184cd4a3 BH |
1147 | |
1148 | if (pnv_ioda_configure_pe(phb, pe)) { | |
1149 | /* XXX What do we do here ? */ | |
1e916772 | 1150 | pnv_ioda_free_pe(pe); |
184cd4a3 | 1151 | pe->pbus = NULL; |
1e916772 | 1152 | return NULL; |
184cd4a3 BH |
1153 | } |
1154 | ||
1155 | /* Associate it with all child devices */ | |
1156 | pnv_ioda_setup_same_PE(bus, pe); | |
1157 | ||
7ebdf956 GS |
1158 | /* Put PE to the list */ |
1159 | list_add_tail(&pe->list, &phb->ioda.pe_list); | |
1e916772 GS |
1160 | |
1161 | return pe; | |
184cd4a3 BH |
1162 | } |
1163 | ||
b521549a AP |
1164 | static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) |
1165 | { | |
1166 | int pe_num, found_pe = false, rc; | |
1167 | long rid; | |
1168 | struct pnv_ioda_pe *pe; | |
1169 | struct pci_dev *gpu_pdev; | |
1170 | struct pci_dn *npu_pdn; | |
1171 | struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus); | |
1172 | struct pnv_phb *phb = hose->private_data; | |
1173 | ||
1174 | /* | |
1175 | * Due to a hardware errata PE#0 on the NPU is reserved for | |
1176 | * error handling. This means we only have three PEs remaining | |
1177 | * which need to be assigned to four links, implying some | |
1178 | * links must share PEs. | |
1179 | * | |
1180 | * To achieve this we assign PEs such that NPUs linking the | |
1181 | * same GPU get assigned the same PE. | |
1182 | */ | |
1183 | gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev); | |
92b8f137 | 1184 | for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) { |
b521549a AP |
1185 | pe = &phb->ioda.pe_array[pe_num]; |
1186 | if (!pe->pdev) | |
1187 | continue; | |
1188 | ||
1189 | if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) { | |
1190 | /* | |
1191 | * This device has the same peer GPU so should | |
1192 | * be assigned the same PE as the existing | |
1193 | * peer NPU. | |
1194 | */ | |
1195 | dev_info(&npu_pdev->dev, | |
1196 | "Associating to existing PE %d\n", pe_num); | |
1197 | pci_dev_get(npu_pdev); | |
1198 | npu_pdn = pci_get_pdn(npu_pdev); | |
1199 | rid = npu_pdev->bus->number << 8 | npu_pdn->devfn; | |
1200 | npu_pdn->pcidev = npu_pdev; | |
1201 | npu_pdn->pe_number = pe_num; | |
b521549a AP |
1202 | phb->ioda.pe_rmap[rid] = pe->pe_number; |
1203 | ||
1204 | /* Map the PE to this link */ | |
1205 | rc = opal_pci_set_pe(phb->opal_id, pe_num, rid, | |
1206 | OpalPciBusAll, | |
1207 | OPAL_COMPARE_RID_DEVICE_NUMBER, | |
1208 | OPAL_COMPARE_RID_FUNCTION_NUMBER, | |
1209 | OPAL_MAP_PE); | |
1210 | WARN_ON(rc != OPAL_SUCCESS); | |
1211 | found_pe = true; | |
1212 | break; | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | if (!found_pe) | |
1217 | /* | |
1218 | * Could not find an existing PE so allocate a new | |
1219 | * one. | |
1220 | */ | |
1221 | return pnv_ioda_setup_dev_PE(npu_pdev); | |
1222 | else | |
1223 | return pe; | |
1224 | } | |
1225 | ||
1226 | static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus) | |
5d2aa710 | 1227 | { |
5d2aa710 AP |
1228 | struct pci_dev *pdev; |
1229 | ||
1230 | list_for_each_entry(pdev, &bus->devices, bus_list) | |
b521549a | 1231 | pnv_ioda_setup_npu_PE(pdev); |
5d2aa710 AP |
1232 | } |
1233 | ||
cad5cef6 | 1234 | static void pnv_pci_ioda_setup_PEs(void) |
fb446ad0 GS |
1235 | { |
1236 | struct pci_controller *hose, *tmp; | |
262af557 | 1237 | struct pnv_phb *phb; |
fb446ad0 GS |
1238 | |
1239 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | |
262af557 | 1240 | phb = hose->private_data; |
08f48f32 AP |
1241 | if (phb->type == PNV_PHB_NPU) { |
1242 | /* PE#0 is needed for error reporting */ | |
1243 | pnv_ioda_reserve_pe(phb, 0); | |
b521549a | 1244 | pnv_ioda_setup_npu_PEs(hose->bus); |
ccd1c191 | 1245 | } |
184cd4a3 BH |
1246 | } |
1247 | } | |
1248 | ||
a8b2f828 | 1249 | #ifdef CONFIG_PCI_IOV |
ee8222fe | 1250 | static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) |
781a868f WY |
1251 | { |
1252 | struct pci_bus *bus; | |
1253 | struct pci_controller *hose; | |
1254 | struct pnv_phb *phb; | |
1255 | struct pci_dn *pdn; | |
02639b0e | 1256 | int i, j; |
ee8222fe | 1257 | int m64_bars; |
781a868f WY |
1258 | |
1259 | bus = pdev->bus; | |
1260 | hose = pci_bus_to_host(bus); | |
1261 | phb = hose->private_data; | |
1262 | pdn = pci_get_pdn(pdev); | |
1263 | ||
ee8222fe WY |
1264 | if (pdn->m64_single_mode) |
1265 | m64_bars = num_vfs; | |
1266 | else | |
1267 | m64_bars = 1; | |
1268 | ||
02639b0e | 1269 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) |
ee8222fe WY |
1270 | for (j = 0; j < m64_bars; j++) { |
1271 | if (pdn->m64_map[j][i] == IODA_INVALID_M64) | |
02639b0e WY |
1272 | continue; |
1273 | opal_pci_phb_mmio_enable(phb->opal_id, | |
ee8222fe WY |
1274 | OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0); |
1275 | clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc); | |
1276 | pdn->m64_map[j][i] = IODA_INVALID_M64; | |
02639b0e | 1277 | } |
781a868f | 1278 | |
ee8222fe | 1279 | kfree(pdn->m64_map); |
781a868f WY |
1280 | return 0; |
1281 | } | |
1282 | ||
02639b0e | 1283 | static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) |
781a868f WY |
1284 | { |
1285 | struct pci_bus *bus; | |
1286 | struct pci_controller *hose; | |
1287 | struct pnv_phb *phb; | |
1288 | struct pci_dn *pdn; | |
1289 | unsigned int win; | |
1290 | struct resource *res; | |
02639b0e | 1291 | int i, j; |
781a868f | 1292 | int64_t rc; |
02639b0e WY |
1293 | int total_vfs; |
1294 | resource_size_t size, start; | |
1295 | int pe_num; | |
ee8222fe | 1296 | int m64_bars; |
781a868f WY |
1297 | |
1298 | bus = pdev->bus; | |
1299 | hose = pci_bus_to_host(bus); | |
1300 | phb = hose->private_data; | |
1301 | pdn = pci_get_pdn(pdev); | |
02639b0e | 1302 | total_vfs = pci_sriov_get_totalvfs(pdev); |
781a868f | 1303 | |
ee8222fe WY |
1304 | if (pdn->m64_single_mode) |
1305 | m64_bars = num_vfs; | |
1306 | else | |
1307 | m64_bars = 1; | |
1308 | ||
1309 | pdn->m64_map = kmalloc(sizeof(*pdn->m64_map) * m64_bars, GFP_KERNEL); | |
1310 | if (!pdn->m64_map) | |
1311 | return -ENOMEM; | |
1312 | /* Initialize the m64_map to IODA_INVALID_M64 */ | |
1313 | for (i = 0; i < m64_bars ; i++) | |
1314 | for (j = 0; j < PCI_SRIOV_NUM_BARS; j++) | |
1315 | pdn->m64_map[i][j] = IODA_INVALID_M64; | |
02639b0e | 1316 | |
781a868f WY |
1317 | |
1318 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { | |
1319 | res = &pdev->resource[i + PCI_IOV_RESOURCES]; | |
1320 | if (!res->flags || !res->parent) | |
1321 | continue; | |
1322 | ||
ee8222fe | 1323 | for (j = 0; j < m64_bars; j++) { |
02639b0e WY |
1324 | do { |
1325 | win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, | |
1326 | phb->ioda.m64_bar_idx + 1, 0); | |
1327 | ||
1328 | if (win >= phb->ioda.m64_bar_idx + 1) | |
1329 | goto m64_failed; | |
1330 | } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); | |
1331 | ||
ee8222fe | 1332 | pdn->m64_map[j][i] = win; |
02639b0e | 1333 | |
ee8222fe | 1334 | if (pdn->m64_single_mode) { |
02639b0e WY |
1335 | size = pci_iov_resource_size(pdev, |
1336 | PCI_IOV_RESOURCES + i); | |
02639b0e WY |
1337 | start = res->start + size * j; |
1338 | } else { | |
1339 | size = resource_size(res); | |
1340 | start = res->start; | |
1341 | } | |
1342 | ||
1343 | /* Map the M64 here */ | |
ee8222fe | 1344 | if (pdn->m64_single_mode) { |
be283eeb | 1345 | pe_num = pdn->pe_num_map[j]; |
02639b0e WY |
1346 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, |
1347 | pe_num, OPAL_M64_WINDOW_TYPE, | |
ee8222fe | 1348 | pdn->m64_map[j][i], 0); |
02639b0e WY |
1349 | } |
1350 | ||
1351 | rc = opal_pci_set_phb_mem_window(phb->opal_id, | |
1352 | OPAL_M64_WINDOW_TYPE, | |
ee8222fe | 1353 | pdn->m64_map[j][i], |
02639b0e WY |
1354 | start, |
1355 | 0, /* unused */ | |
1356 | size); | |
781a868f | 1357 | |
781a868f | 1358 | |
02639b0e WY |
1359 | if (rc != OPAL_SUCCESS) { |
1360 | dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n", | |
1361 | win, rc); | |
1362 | goto m64_failed; | |
1363 | } | |
781a868f | 1364 | |
ee8222fe | 1365 | if (pdn->m64_single_mode) |
02639b0e | 1366 | rc = opal_pci_phb_mmio_enable(phb->opal_id, |
ee8222fe | 1367 | OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2); |
02639b0e WY |
1368 | else |
1369 | rc = opal_pci_phb_mmio_enable(phb->opal_id, | |
ee8222fe | 1370 | OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1); |
781a868f | 1371 | |
02639b0e WY |
1372 | if (rc != OPAL_SUCCESS) { |
1373 | dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n", | |
1374 | win, rc); | |
1375 | goto m64_failed; | |
1376 | } | |
781a868f WY |
1377 | } |
1378 | } | |
1379 | return 0; | |
1380 | ||
1381 | m64_failed: | |
ee8222fe | 1382 | pnv_pci_vf_release_m64(pdev, num_vfs); |
781a868f WY |
1383 | return -EBUSY; |
1384 | } | |
1385 | ||
c035e37b AK |
1386 | static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, |
1387 | int num); | |
1388 | static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); | |
1389 | ||
781a868f WY |
1390 | static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe) |
1391 | { | |
781a868f | 1392 | struct iommu_table *tbl; |
781a868f WY |
1393 | int64_t rc; |
1394 | ||
b348aa65 | 1395 | tbl = pe->table_group.tables[0]; |
c035e37b | 1396 | rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); |
781a868f WY |
1397 | if (rc) |
1398 | pe_warn(pe, "OPAL error %ld release DMA window\n", rc); | |
1399 | ||
c035e37b | 1400 | pnv_pci_ioda2_set_bypass(pe, false); |
0eaf4def AK |
1401 | if (pe->table_group.group) { |
1402 | iommu_group_put(pe->table_group.group); | |
1403 | BUG_ON(pe->table_group.group); | |
ac9a5889 | 1404 | } |
aca6913f | 1405 | pnv_pci_ioda2_table_free_pages(tbl); |
781a868f | 1406 | iommu_free_table(tbl, of_node_full_name(dev->dev.of_node)); |
781a868f WY |
1407 | } |
1408 | ||
ee8222fe | 1409 | static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) |
781a868f WY |
1410 | { |
1411 | struct pci_bus *bus; | |
1412 | struct pci_controller *hose; | |
1413 | struct pnv_phb *phb; | |
1414 | struct pnv_ioda_pe *pe, *pe_n; | |
1415 | struct pci_dn *pdn; | |
1416 | ||
1417 | bus = pdev->bus; | |
1418 | hose = pci_bus_to_host(bus); | |
1419 | phb = hose->private_data; | |
02639b0e | 1420 | pdn = pci_get_pdn(pdev); |
781a868f WY |
1421 | |
1422 | if (!pdev->is_physfn) | |
1423 | return; | |
1424 | ||
781a868f WY |
1425 | list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { |
1426 | if (pe->parent_dev != pdev) | |
1427 | continue; | |
1428 | ||
1429 | pnv_pci_ioda2_release_dma_pe(pdev, pe); | |
1430 | ||
1431 | /* Remove from list */ | |
1432 | mutex_lock(&phb->ioda.pe_list_mutex); | |
1433 | list_del(&pe->list); | |
1434 | mutex_unlock(&phb->ioda.pe_list_mutex); | |
1435 | ||
1436 | pnv_ioda_deconfigure_pe(phb, pe); | |
1437 | ||
1e916772 | 1438 | pnv_ioda_free_pe(pe); |
781a868f WY |
1439 | } |
1440 | } | |
1441 | ||
1442 | void pnv_pci_sriov_disable(struct pci_dev *pdev) | |
1443 | { | |
1444 | struct pci_bus *bus; | |
1445 | struct pci_controller *hose; | |
1446 | struct pnv_phb *phb; | |
1e916772 | 1447 | struct pnv_ioda_pe *pe; |
781a868f WY |
1448 | struct pci_dn *pdn; |
1449 | struct pci_sriov *iov; | |
be283eeb | 1450 | u16 num_vfs, i; |
781a868f WY |
1451 | |
1452 | bus = pdev->bus; | |
1453 | hose = pci_bus_to_host(bus); | |
1454 | phb = hose->private_data; | |
1455 | pdn = pci_get_pdn(pdev); | |
1456 | iov = pdev->sriov; | |
1457 | num_vfs = pdn->num_vfs; | |
1458 | ||
1459 | /* Release VF PEs */ | |
ee8222fe | 1460 | pnv_ioda_release_vf_PE(pdev); |
781a868f WY |
1461 | |
1462 | if (phb->type == PNV_PHB_IODA2) { | |
ee8222fe | 1463 | if (!pdn->m64_single_mode) |
be283eeb | 1464 | pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map); |
781a868f WY |
1465 | |
1466 | /* Release M64 windows */ | |
ee8222fe | 1467 | pnv_pci_vf_release_m64(pdev, num_vfs); |
781a868f WY |
1468 | |
1469 | /* Release PE numbers */ | |
be283eeb WY |
1470 | if (pdn->m64_single_mode) { |
1471 | for (i = 0; i < num_vfs; i++) { | |
1e916772 GS |
1472 | if (pdn->pe_num_map[i] == IODA_INVALID_PE) |
1473 | continue; | |
1474 | ||
1475 | pe = &phb->ioda.pe_array[pdn->pe_num_map[i]]; | |
1476 | pnv_ioda_free_pe(pe); | |
be283eeb WY |
1477 | } |
1478 | } else | |
1479 | bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); | |
1480 | /* Releasing pe_num_map */ | |
1481 | kfree(pdn->pe_num_map); | |
781a868f WY |
1482 | } |
1483 | } | |
1484 | ||
1485 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | |
1486 | struct pnv_ioda_pe *pe); | |
1487 | static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) | |
1488 | { | |
1489 | struct pci_bus *bus; | |
1490 | struct pci_controller *hose; | |
1491 | struct pnv_phb *phb; | |
1492 | struct pnv_ioda_pe *pe; | |
1493 | int pe_num; | |
1494 | u16 vf_index; | |
1495 | struct pci_dn *pdn; | |
1496 | ||
1497 | bus = pdev->bus; | |
1498 | hose = pci_bus_to_host(bus); | |
1499 | phb = hose->private_data; | |
1500 | pdn = pci_get_pdn(pdev); | |
1501 | ||
1502 | if (!pdev->is_physfn) | |
1503 | return; | |
1504 | ||
1505 | /* Reserve PE for each VF */ | |
1506 | for (vf_index = 0; vf_index < num_vfs; vf_index++) { | |
be283eeb WY |
1507 | if (pdn->m64_single_mode) |
1508 | pe_num = pdn->pe_num_map[vf_index]; | |
1509 | else | |
1510 | pe_num = *pdn->pe_num_map + vf_index; | |
781a868f WY |
1511 | |
1512 | pe = &phb->ioda.pe_array[pe_num]; | |
1513 | pe->pe_number = pe_num; | |
1514 | pe->phb = phb; | |
1515 | pe->flags = PNV_IODA_PE_VF; | |
1516 | pe->pbus = NULL; | |
1517 | pe->parent_dev = pdev; | |
781a868f WY |
1518 | pe->mve_number = -1; |
1519 | pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) | | |
1520 | pci_iov_virtfn_devfn(pdev, vf_index); | |
1521 | ||
1522 | pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n", | |
1523 | hose->global_number, pdev->bus->number, | |
1524 | PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)), | |
1525 | PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num); | |
1526 | ||
1527 | if (pnv_ioda_configure_pe(phb, pe)) { | |
1528 | /* XXX What do we do here ? */ | |
1e916772 | 1529 | pnv_ioda_free_pe(pe); |
781a868f WY |
1530 | pe->pdev = NULL; |
1531 | continue; | |
1532 | } | |
1533 | ||
781a868f WY |
1534 | /* Put PE to the list */ |
1535 | mutex_lock(&phb->ioda.pe_list_mutex); | |
1536 | list_add_tail(&pe->list, &phb->ioda.pe_list); | |
1537 | mutex_unlock(&phb->ioda.pe_list_mutex); | |
1538 | ||
1539 | pnv_pci_ioda2_setup_dma_pe(phb, pe); | |
1540 | } | |
1541 | } | |
1542 | ||
1543 | int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) | |
1544 | { | |
1545 | struct pci_bus *bus; | |
1546 | struct pci_controller *hose; | |
1547 | struct pnv_phb *phb; | |
1e916772 | 1548 | struct pnv_ioda_pe *pe; |
781a868f WY |
1549 | struct pci_dn *pdn; |
1550 | int ret; | |
be283eeb | 1551 | u16 i; |
781a868f WY |
1552 | |
1553 | bus = pdev->bus; | |
1554 | hose = pci_bus_to_host(bus); | |
1555 | phb = hose->private_data; | |
1556 | pdn = pci_get_pdn(pdev); | |
1557 | ||
1558 | if (phb->type == PNV_PHB_IODA2) { | |
b0331854 WY |
1559 | if (!pdn->vfs_expanded) { |
1560 | dev_info(&pdev->dev, "don't support this SRIOV device" | |
1561 | " with non 64bit-prefetchable IOV BAR\n"); | |
1562 | return -ENOSPC; | |
1563 | } | |
1564 | ||
ee8222fe WY |
1565 | /* |
1566 | * When M64 BARs functions in Single PE mode, the number of VFs | |
1567 | * could be enabled must be less than the number of M64 BARs. | |
1568 | */ | |
1569 | if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) { | |
1570 | dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n"); | |
1571 | return -EBUSY; | |
1572 | } | |
1573 | ||
be283eeb WY |
1574 | /* Allocating pe_num_map */ |
1575 | if (pdn->m64_single_mode) | |
1576 | pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map) * num_vfs, | |
1577 | GFP_KERNEL); | |
1578 | else | |
1579 | pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL); | |
1580 | ||
1581 | if (!pdn->pe_num_map) | |
1582 | return -ENOMEM; | |
1583 | ||
1584 | if (pdn->m64_single_mode) | |
1585 | for (i = 0; i < num_vfs; i++) | |
1586 | pdn->pe_num_map[i] = IODA_INVALID_PE; | |
1587 | ||
781a868f | 1588 | /* Calculate available PE for required VFs */ |
be283eeb WY |
1589 | if (pdn->m64_single_mode) { |
1590 | for (i = 0; i < num_vfs; i++) { | |
1e916772 GS |
1591 | pe = pnv_ioda_alloc_pe(phb); |
1592 | if (!pe) { | |
be283eeb WY |
1593 | ret = -EBUSY; |
1594 | goto m64_failed; | |
1595 | } | |
1e916772 GS |
1596 | |
1597 | pdn->pe_num_map[i] = pe->pe_number; | |
be283eeb WY |
1598 | } |
1599 | } else { | |
1600 | mutex_lock(&phb->ioda.pe_alloc_mutex); | |
1601 | *pdn->pe_num_map = bitmap_find_next_zero_area( | |
92b8f137 | 1602 | phb->ioda.pe_alloc, phb->ioda.total_pe_num, |
be283eeb | 1603 | 0, num_vfs, 0); |
92b8f137 | 1604 | if (*pdn->pe_num_map >= phb->ioda.total_pe_num) { |
be283eeb WY |
1605 | mutex_unlock(&phb->ioda.pe_alloc_mutex); |
1606 | dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs); | |
1607 | kfree(pdn->pe_num_map); | |
1608 | return -EBUSY; | |
1609 | } | |
1610 | bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); | |
781a868f | 1611 | mutex_unlock(&phb->ioda.pe_alloc_mutex); |
781a868f | 1612 | } |
781a868f | 1613 | pdn->num_vfs = num_vfs; |
781a868f WY |
1614 | |
1615 | /* Assign M64 window accordingly */ | |
02639b0e | 1616 | ret = pnv_pci_vf_assign_m64(pdev, num_vfs); |
781a868f WY |
1617 | if (ret) { |
1618 | dev_info(&pdev->dev, "Not enough M64 window resources\n"); | |
1619 | goto m64_failed; | |
1620 | } | |
1621 | ||
1622 | /* | |
1623 | * When using one M64 BAR to map one IOV BAR, we need to shift | |
1624 | * the IOV BAR according to the PE# allocated to the VFs. | |
1625 | * Otherwise, the PE# for the VF will conflict with others. | |
1626 | */ | |
ee8222fe | 1627 | if (!pdn->m64_single_mode) { |
be283eeb | 1628 | ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map); |
02639b0e WY |
1629 | if (ret) |
1630 | goto m64_failed; | |
1631 | } | |
781a868f WY |
1632 | } |
1633 | ||
1634 | /* Setup VF PEs */ | |
1635 | pnv_ioda_setup_vf_PE(pdev, num_vfs); | |
1636 | ||
1637 | return 0; | |
1638 | ||
1639 | m64_failed: | |
be283eeb WY |
1640 | if (pdn->m64_single_mode) { |
1641 | for (i = 0; i < num_vfs; i++) { | |
1e916772 GS |
1642 | if (pdn->pe_num_map[i] == IODA_INVALID_PE) |
1643 | continue; | |
1644 | ||
1645 | pe = &phb->ioda.pe_array[pdn->pe_num_map[i]]; | |
1646 | pnv_ioda_free_pe(pe); | |
be283eeb WY |
1647 | } |
1648 | } else | |
1649 | bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); | |
1650 | ||
1651 | /* Releasing pe_num_map */ | |
1652 | kfree(pdn->pe_num_map); | |
781a868f WY |
1653 | |
1654 | return ret; | |
1655 | } | |
1656 | ||
a8b2f828 GS |
1657 | int pcibios_sriov_disable(struct pci_dev *pdev) |
1658 | { | |
781a868f WY |
1659 | pnv_pci_sriov_disable(pdev); |
1660 | ||
a8b2f828 GS |
1661 | /* Release PCI data */ |
1662 | remove_dev_pci_data(pdev); | |
1663 | return 0; | |
1664 | } | |
1665 | ||
1666 | int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) | |
1667 | { | |
1668 | /* Allocate PCI data */ | |
1669 | add_dev_pci_data(pdev); | |
781a868f | 1670 | |
ee8222fe | 1671 | return pnv_pci_sriov_enable(pdev, num_vfs); |
a8b2f828 GS |
1672 | } |
1673 | #endif /* CONFIG_PCI_IOV */ | |
1674 | ||
959c9bdd | 1675 | static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) |
184cd4a3 | 1676 | { |
b72c1f65 | 1677 | struct pci_dn *pdn = pci_get_pdn(pdev); |
959c9bdd | 1678 | struct pnv_ioda_pe *pe; |
184cd4a3 | 1679 | |
959c9bdd GS |
1680 | /* |
1681 | * The function can be called while the PE# | |
1682 | * hasn't been assigned. Do nothing for the | |
1683 | * case. | |
1684 | */ | |
1685 | if (!pdn || pdn->pe_number == IODA_INVALID_PE) | |
1686 | return; | |
184cd4a3 | 1687 | |
959c9bdd | 1688 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
cd15b048 | 1689 | WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); |
0e1ffef0 | 1690 | set_dma_offset(&pdev->dev, pe->tce_bypass_base); |
b348aa65 | 1691 | set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); |
4617082e AK |
1692 | /* |
1693 | * Note: iommu_add_device() will fail here as | |
1694 | * for physical PE: the device is already added by now; | |
1695 | * for virtual PE: sysfs entries are not ready yet and | |
1696 | * tce_iommu_bus_notifier will add the device to a group later. | |
1697 | */ | |
184cd4a3 BH |
1698 | } |
1699 | ||
763d2d8d | 1700 | static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) |
cd15b048 | 1701 | { |
763d2d8d DA |
1702 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
1703 | struct pnv_phb *phb = hose->private_data; | |
cd15b048 BH |
1704 | struct pci_dn *pdn = pci_get_pdn(pdev); |
1705 | struct pnv_ioda_pe *pe; | |
1706 | uint64_t top; | |
1707 | bool bypass = false; | |
1708 | ||
1709 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
1710 | return -ENODEV;; | |
1711 | ||
1712 | pe = &phb->ioda.pe_array[pdn->pe_number]; | |
1713 | if (pe->tce_bypass_enabled) { | |
1714 | top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; | |
1715 | bypass = (dma_mask >= top); | |
1716 | } | |
1717 | ||
1718 | if (bypass) { | |
1719 | dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); | |
1720 | set_dma_ops(&pdev->dev, &dma_direct_ops); | |
cd15b048 BH |
1721 | } else { |
1722 | dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); | |
1723 | set_dma_ops(&pdev->dev, &dma_iommu_ops); | |
cd15b048 | 1724 | } |
a32305bf | 1725 | *pdev->dev.dma_mask = dma_mask; |
5d2aa710 AP |
1726 | |
1727 | /* Update peer npu devices */ | |
f9f83456 | 1728 | pnv_npu_try_dma_set_bypass(pdev, bypass); |
5d2aa710 | 1729 | |
cd15b048 BH |
1730 | return 0; |
1731 | } | |
1732 | ||
53522982 | 1733 | static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev) |
fe7e85c6 | 1734 | { |
53522982 AD |
1735 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
1736 | struct pnv_phb *phb = hose->private_data; | |
fe7e85c6 GS |
1737 | struct pci_dn *pdn = pci_get_pdn(pdev); |
1738 | struct pnv_ioda_pe *pe; | |
1739 | u64 end, mask; | |
1740 | ||
1741 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
1742 | return 0; | |
1743 | ||
1744 | pe = &phb->ioda.pe_array[pdn->pe_number]; | |
1745 | if (!pe->tce_bypass_enabled) | |
1746 | return __dma_get_required_mask(&pdev->dev); | |
1747 | ||
1748 | ||
1749 | end = pe->tce_bypass_base + memblock_end_of_DRAM(); | |
1750 | mask = 1ULL << (fls64(end) - 1); | |
1751 | mask += mask - 1; | |
1752 | ||
1753 | return mask; | |
1754 | } | |
1755 | ||
dff4a39e | 1756 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, |
ea30e99e | 1757 | struct pci_bus *bus) |
74251fe2 BH |
1758 | { |
1759 | struct pci_dev *dev; | |
1760 | ||
1761 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
b348aa65 | 1762 | set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); |
e91c2511 | 1763 | set_dma_offset(&dev->dev, pe->tce_bypass_base); |
4617082e | 1764 | iommu_add_device(&dev->dev); |
dff4a39e | 1765 | |
5c89a87d | 1766 | if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) |
ea30e99e | 1767 | pnv_ioda_setup_bus_dma(pe, dev->subordinate); |
74251fe2 BH |
1768 | } |
1769 | } | |
1770 | ||
fd141d1a BH |
1771 | static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb, |
1772 | bool real_mode) | |
1773 | { | |
1774 | return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) : | |
1775 | (phb->regs + 0x210); | |
1776 | } | |
1777 | ||
a34ab7c3 | 1778 | static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, |
decbda25 | 1779 | unsigned long index, unsigned long npages, bool rm) |
4cce9550 | 1780 | { |
0eaf4def AK |
1781 | struct iommu_table_group_link *tgl = list_first_entry_or_null( |
1782 | &tbl->it_group_list, struct iommu_table_group_link, | |
1783 | next); | |
1784 | struct pnv_ioda_pe *pe = container_of(tgl->table_group, | |
b348aa65 | 1785 | struct pnv_ioda_pe, table_group); |
fd141d1a | 1786 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); |
4cce9550 GS |
1787 | unsigned long start, end, inc; |
1788 | ||
decbda25 AK |
1789 | start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); |
1790 | end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset + | |
1791 | npages - 1); | |
4cce9550 | 1792 | |
08acce1c BH |
1793 | /* p7ioc-style invalidation, 2 TCEs per write */ |
1794 | start |= (1ull << 63); | |
1795 | end |= (1ull << 63); | |
1796 | inc = 16; | |
4cce9550 GS |
1797 | end |= inc - 1; /* round up end to be different than start */ |
1798 | ||
1799 | mb(); /* Ensure above stores are visible */ | |
1800 | while (start <= end) { | |
8e0a1611 | 1801 | if (rm) |
3ad26e5c | 1802 | __raw_rm_writeq(cpu_to_be64(start), invalidate); |
8e0a1611 | 1803 | else |
3ad26e5c | 1804 | __raw_writeq(cpu_to_be64(start), invalidate); |
4cce9550 GS |
1805 | start += inc; |
1806 | } | |
1807 | ||
1808 | /* | |
1809 | * The iommu layer will do another mb() for us on build() | |
1810 | * and we don't care on free() | |
1811 | */ | |
1812 | } | |
1813 | ||
decbda25 AK |
1814 | static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, |
1815 | long npages, unsigned long uaddr, | |
1816 | enum dma_data_direction direction, | |
00085f1e | 1817 | unsigned long attrs) |
decbda25 AK |
1818 | { |
1819 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, | |
1820 | attrs); | |
1821 | ||
08acce1c | 1822 | if (!ret) |
a34ab7c3 | 1823 | pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); |
decbda25 AK |
1824 | |
1825 | return ret; | |
1826 | } | |
1827 | ||
05c6cfb9 AK |
1828 | #ifdef CONFIG_IOMMU_API |
1829 | static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index, | |
1830 | unsigned long *hpa, enum dma_data_direction *direction) | |
1831 | { | |
1832 | long ret = pnv_tce_xchg(tbl, index, hpa, direction); | |
1833 | ||
08acce1c | 1834 | if (!ret) |
a34ab7c3 | 1835 | pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false); |
05c6cfb9 AK |
1836 | |
1837 | return ret; | |
1838 | } | |
1839 | #endif | |
1840 | ||
decbda25 AK |
1841 | static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, |
1842 | long npages) | |
1843 | { | |
1844 | pnv_tce_free(tbl, index, npages); | |
1845 | ||
08acce1c | 1846 | pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); |
decbda25 AK |
1847 | } |
1848 | ||
da004c36 | 1849 | static struct iommu_table_ops pnv_ioda1_iommu_ops = { |
decbda25 | 1850 | .set = pnv_ioda1_tce_build, |
05c6cfb9 AK |
1851 | #ifdef CONFIG_IOMMU_API |
1852 | .exchange = pnv_ioda1_tce_xchg, | |
1853 | #endif | |
decbda25 | 1854 | .clear = pnv_ioda1_tce_free, |
da004c36 AK |
1855 | .get = pnv_tce_get, |
1856 | }; | |
1857 | ||
a34ab7c3 BH |
1858 | #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0) |
1859 | #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) | |
1860 | #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) | |
bef9253f | 1861 | |
a34ab7c3 | 1862 | void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm) |
0bbcdb43 | 1863 | { |
fd141d1a | 1864 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm); |
a34ab7c3 | 1865 | const unsigned long val = PHB3_TCE_KILL_INVAL_ALL; |
0bbcdb43 AK |
1866 | |
1867 | mb(); /* Ensure previous TCE table stores are visible */ | |
1868 | if (rm) | |
fd141d1a | 1869 | __raw_rm_writeq(cpu_to_be64(val), invalidate); |
0bbcdb43 | 1870 | else |
fd141d1a | 1871 | __raw_writeq(cpu_to_be64(val), invalidate); |
0bbcdb43 AK |
1872 | } |
1873 | ||
a34ab7c3 | 1874 | static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) |
5780fb04 AK |
1875 | { |
1876 | /* 01xb - invalidate TCEs that match the specified PE# */ | |
fd141d1a | 1877 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); |
a34ab7c3 | 1878 | unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); |
5780fb04 AK |
1879 | |
1880 | mb(); /* Ensure above stores are visible */ | |
fd141d1a | 1881 | __raw_writeq(cpu_to_be64(val), invalidate); |
5780fb04 AK |
1882 | } |
1883 | ||
fd141d1a BH |
1884 | static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, |
1885 | unsigned shift, unsigned long index, | |
1886 | unsigned long npages) | |
4cce9550 | 1887 | { |
4d902195 | 1888 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); |
4cce9550 | 1889 | unsigned long start, end, inc; |
4cce9550 GS |
1890 | |
1891 | /* We'll invalidate DMA address in PE scope */ | |
a34ab7c3 | 1892 | start = PHB3_TCE_KILL_INVAL_ONE; |
fd141d1a | 1893 | start |= (pe->pe_number & 0xFF); |
4cce9550 GS |
1894 | end = start; |
1895 | ||
1896 | /* Figure out the start, end and step */ | |
decbda25 AK |
1897 | start |= (index << shift); |
1898 | end |= ((index + npages - 1) << shift); | |
b0376c9b | 1899 | inc = (0x1ull << shift); |
4cce9550 GS |
1900 | mb(); |
1901 | ||
1902 | while (start <= end) { | |
8e0a1611 | 1903 | if (rm) |
3ad26e5c | 1904 | __raw_rm_writeq(cpu_to_be64(start), invalidate); |
8e0a1611 | 1905 | else |
3ad26e5c | 1906 | __raw_writeq(cpu_to_be64(start), invalidate); |
4cce9550 GS |
1907 | start += inc; |
1908 | } | |
1909 | } | |
1910 | ||
f0228c41 BH |
1911 | static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) |
1912 | { | |
1913 | struct pnv_phb *phb = pe->phb; | |
1914 | ||
1915 | if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) | |
1916 | pnv_pci_phb3_tce_invalidate_pe(pe); | |
1917 | else | |
1918 | opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE, | |
1919 | pe->pe_number, 0, 0, 0); | |
1920 | } | |
1921 | ||
e57080f1 AK |
1922 | static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, |
1923 | unsigned long index, unsigned long npages, bool rm) | |
1924 | { | |
1925 | struct iommu_table_group_link *tgl; | |
1926 | ||
1927 | list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { | |
1928 | struct pnv_ioda_pe *pe = container_of(tgl->table_group, | |
1929 | struct pnv_ioda_pe, table_group); | |
f0228c41 BH |
1930 | struct pnv_phb *phb = pe->phb; |
1931 | unsigned int shift = tbl->it_page_shift; | |
1932 | ||
1933 | if (phb->type == PNV_PHB_NPU) { | |
0bbcdb43 AK |
1934 | /* |
1935 | * The NVLink hardware does not support TCE kill | |
1936 | * per TCE entry so we have to invalidate | |
1937 | * the entire cache for it. | |
1938 | */ | |
f0228c41 | 1939 | pnv_pci_phb3_tce_invalidate_entire(phb, rm); |
85674868 AK |
1940 | continue; |
1941 | } | |
f0228c41 BH |
1942 | if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) |
1943 | pnv_pci_phb3_tce_invalidate(pe, rm, shift, | |
1944 | index, npages); | |
1945 | else if (rm) | |
1946 | opal_rm_pci_tce_kill(phb->opal_id, | |
1947 | OPAL_PCI_TCE_KILL_PAGES, | |
1948 | pe->pe_number, 1u << shift, | |
1949 | index << shift, npages); | |
1950 | else | |
1951 | opal_pci_tce_kill(phb->opal_id, | |
1952 | OPAL_PCI_TCE_KILL_PAGES, | |
1953 | pe->pe_number, 1u << shift, | |
1954 | index << shift, npages); | |
e57080f1 AK |
1955 | } |
1956 | } | |
1957 | ||
decbda25 AK |
1958 | static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, |
1959 | long npages, unsigned long uaddr, | |
1960 | enum dma_data_direction direction, | |
00085f1e | 1961 | unsigned long attrs) |
4cce9550 | 1962 | { |
decbda25 AK |
1963 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, |
1964 | attrs); | |
4cce9550 | 1965 | |
08acce1c | 1966 | if (!ret) |
decbda25 AK |
1967 | pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); |
1968 | ||
1969 | return ret; | |
1970 | } | |
1971 | ||
05c6cfb9 AK |
1972 | #ifdef CONFIG_IOMMU_API |
1973 | static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index, | |
1974 | unsigned long *hpa, enum dma_data_direction *direction) | |
1975 | { | |
1976 | long ret = pnv_tce_xchg(tbl, index, hpa, direction); | |
1977 | ||
08acce1c | 1978 | if (!ret) |
05c6cfb9 AK |
1979 | pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false); |
1980 | ||
1981 | return ret; | |
1982 | } | |
1983 | #endif | |
1984 | ||
decbda25 AK |
1985 | static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, |
1986 | long npages) | |
1987 | { | |
1988 | pnv_tce_free(tbl, index, npages); | |
1989 | ||
08acce1c | 1990 | pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); |
4cce9550 GS |
1991 | } |
1992 | ||
4793d65d AK |
1993 | static void pnv_ioda2_table_free(struct iommu_table *tbl) |
1994 | { | |
1995 | pnv_pci_ioda2_table_free_pages(tbl); | |
1996 | iommu_free_table(tbl, "pnv"); | |
1997 | } | |
1998 | ||
da004c36 | 1999 | static struct iommu_table_ops pnv_ioda2_iommu_ops = { |
decbda25 | 2000 | .set = pnv_ioda2_tce_build, |
05c6cfb9 AK |
2001 | #ifdef CONFIG_IOMMU_API |
2002 | .exchange = pnv_ioda2_tce_xchg, | |
2003 | #endif | |
decbda25 | 2004 | .clear = pnv_ioda2_tce_free, |
da004c36 | 2005 | .get = pnv_tce_get, |
4793d65d | 2006 | .free = pnv_ioda2_table_free, |
da004c36 AK |
2007 | }; |
2008 | ||
801846d1 GS |
2009 | static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data) |
2010 | { | |
2011 | unsigned int *weight = (unsigned int *)data; | |
2012 | ||
2013 | /* This is quite simplistic. The "base" weight of a device | |
2014 | * is 10. 0 means no DMA is to be accounted for it. | |
2015 | */ | |
2016 | if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) | |
2017 | return 0; | |
2018 | ||
2019 | if (dev->class == PCI_CLASS_SERIAL_USB_UHCI || | |
2020 | dev->class == PCI_CLASS_SERIAL_USB_OHCI || | |
2021 | dev->class == PCI_CLASS_SERIAL_USB_EHCI) | |
2022 | *weight += 3; | |
2023 | else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID) | |
2024 | *weight += 15; | |
2025 | else | |
2026 | *weight += 10; | |
2027 | ||
2028 | return 0; | |
2029 | } | |
2030 | ||
2031 | static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe) | |
2032 | { | |
2033 | unsigned int weight = 0; | |
2034 | ||
2035 | /* SRIOV VF has same DMA32 weight as its PF */ | |
2036 | #ifdef CONFIG_PCI_IOV | |
2037 | if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) { | |
2038 | pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight); | |
2039 | return weight; | |
2040 | } | |
2041 | #endif | |
2042 | ||
2043 | if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) { | |
2044 | pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight); | |
2045 | } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) { | |
2046 | struct pci_dev *pdev; | |
2047 | ||
2048 | list_for_each_entry(pdev, &pe->pbus->devices, bus_list) | |
2049 | pnv_pci_ioda_dev_dma_weight(pdev, &weight); | |
2050 | } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) { | |
2051 | pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight); | |
2052 | } | |
2053 | ||
2054 | return weight; | |
2055 | } | |
2056 | ||
b30d936f | 2057 | static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, |
2b923ed1 | 2058 | struct pnv_ioda_pe *pe) |
184cd4a3 BH |
2059 | { |
2060 | ||
2061 | struct page *tce_mem = NULL; | |
184cd4a3 | 2062 | struct iommu_table *tbl; |
2b923ed1 GS |
2063 | unsigned int weight, total_weight = 0; |
2064 | unsigned int tce32_segsz, base, segs, avail, i; | |
184cd4a3 BH |
2065 | int64_t rc; |
2066 | void *addr; | |
2067 | ||
184cd4a3 BH |
2068 | /* XXX FIXME: Handle 64-bit only DMA devices */ |
2069 | /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */ | |
2070 | /* XXX FIXME: Allocate multi-level tables on PHB3 */ | |
2b923ed1 GS |
2071 | weight = pnv_pci_ioda_pe_dma_weight(pe); |
2072 | if (!weight) | |
2073 | return; | |
2074 | ||
2075 | pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight, | |
2076 | &total_weight); | |
2077 | segs = (weight * phb->ioda.dma32_count) / total_weight; | |
2078 | if (!segs) | |
2079 | segs = 1; | |
184cd4a3 | 2080 | |
2b923ed1 GS |
2081 | /* |
2082 | * Allocate contiguous DMA32 segments. We begin with the expected | |
2083 | * number of segments. With one more attempt, the number of DMA32 | |
2084 | * segments to be allocated is decreased by one until one segment | |
2085 | * is allocated successfully. | |
2086 | */ | |
2087 | do { | |
2088 | for (base = 0; base <= phb->ioda.dma32_count - segs; base++) { | |
2089 | for (avail = 0, i = base; i < base + segs; i++) { | |
2090 | if (phb->ioda.dma32_segmap[i] == | |
2091 | IODA_INVALID_PE) | |
2092 | avail++; | |
2093 | } | |
2094 | ||
2095 | if (avail == segs) | |
2096 | goto found; | |
2097 | } | |
2098 | } while (--segs); | |
2099 | ||
2100 | if (!segs) { | |
2101 | pe_warn(pe, "No available DMA32 segments\n"); | |
2102 | return; | |
2103 | } | |
2104 | ||
2105 | found: | |
0eaf4def | 2106 | tbl = pnv_pci_table_alloc(phb->hose->node); |
b348aa65 AK |
2107 | iommu_register_group(&pe->table_group, phb->hose->global_number, |
2108 | pe->pe_number); | |
0eaf4def | 2109 | pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group); |
c5773822 | 2110 | |
184cd4a3 | 2111 | /* Grab a 32-bit TCE table */ |
2b923ed1 GS |
2112 | pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n", |
2113 | weight, total_weight, base, segs); | |
184cd4a3 | 2114 | pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", |
acce971c GS |
2115 | base * PNV_IODA1_DMA32_SEGSIZE, |
2116 | (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1); | |
184cd4a3 BH |
2117 | |
2118 | /* XXX Currently, we allocate one big contiguous table for the | |
2119 | * TCEs. We only really need one chunk per 256M of TCE space | |
2120 | * (ie per segment) but that's an optimization for later, it | |
2121 | * requires some added smarts with our get/put_tce implementation | |
acce971c GS |
2122 | * |
2123 | * Each TCE page is 4KB in size and each TCE entry occupies 8 | |
2124 | * bytes | |
184cd4a3 | 2125 | */ |
acce971c | 2126 | tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3); |
184cd4a3 | 2127 | tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, |
acce971c | 2128 | get_order(tce32_segsz * segs)); |
184cd4a3 BH |
2129 | if (!tce_mem) { |
2130 | pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); | |
2131 | goto fail; | |
2132 | } | |
2133 | addr = page_address(tce_mem); | |
acce971c | 2134 | memset(addr, 0, tce32_segsz * segs); |
184cd4a3 BH |
2135 | |
2136 | /* Configure HW */ | |
2137 | for (i = 0; i < segs; i++) { | |
2138 | rc = opal_pci_map_pe_dma_window(phb->opal_id, | |
2139 | pe->pe_number, | |
2140 | base + i, 1, | |
acce971c GS |
2141 | __pa(addr) + tce32_segsz * i, |
2142 | tce32_segsz, IOMMU_PAGE_SIZE_4K); | |
184cd4a3 BH |
2143 | if (rc) { |
2144 | pe_err(pe, " Failed to configure 32-bit TCE table," | |
2145 | " err %ld\n", rc); | |
2146 | goto fail; | |
2147 | } | |
2148 | } | |
2149 | ||
2b923ed1 GS |
2150 | /* Setup DMA32 segment mapping */ |
2151 | for (i = base; i < base + segs; i++) | |
2152 | phb->ioda.dma32_segmap[i] = pe->pe_number; | |
2153 | ||
184cd4a3 | 2154 | /* Setup linux iommu table */ |
acce971c GS |
2155 | pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs, |
2156 | base * PNV_IODA1_DMA32_SEGSIZE, | |
2157 | IOMMU_PAGE_SHIFT_4K); | |
184cd4a3 | 2158 | |
da004c36 | 2159 | tbl->it_ops = &pnv_ioda1_iommu_ops; |
4793d65d AK |
2160 | pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; |
2161 | pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; | |
184cd4a3 BH |
2162 | iommu_init_table(tbl, phb->hose->node); |
2163 | ||
781a868f | 2164 | if (pe->flags & PNV_IODA_PE_DEV) { |
4617082e AK |
2165 | /* |
2166 | * Setting table base here only for carrying iommu_group | |
2167 | * further down to let iommu_add_device() do the job. | |
2168 | * pnv_pci_ioda_dma_dev_setup will override it later anyway. | |
2169 | */ | |
2170 | set_iommu_table_base(&pe->pdev->dev, tbl); | |
2171 | iommu_add_device(&pe->pdev->dev); | |
c5773822 | 2172 | } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) |
ea30e99e | 2173 | pnv_ioda_setup_bus_dma(pe, pe->pbus); |
74251fe2 | 2174 | |
184cd4a3 BH |
2175 | return; |
2176 | fail: | |
2177 | /* XXX Failure: Try to fallback to 64-bit only ? */ | |
184cd4a3 | 2178 | if (tce_mem) |
acce971c | 2179 | __free_pages(tce_mem, get_order(tce32_segsz * segs)); |
0eaf4def AK |
2180 | if (tbl) { |
2181 | pnv_pci_unlink_table_and_group(tbl, &pe->table_group); | |
2182 | iommu_free_table(tbl, "pnv"); | |
2183 | } | |
184cd4a3 BH |
2184 | } |
2185 | ||
43cb60ab AK |
2186 | static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group, |
2187 | int num, struct iommu_table *tbl) | |
2188 | { | |
2189 | struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, | |
2190 | table_group); | |
2191 | struct pnv_phb *phb = pe->phb; | |
2192 | int64_t rc; | |
bbb845c4 AK |
2193 | const unsigned long size = tbl->it_indirect_levels ? |
2194 | tbl->it_level_size : tbl->it_size; | |
43cb60ab AK |
2195 | const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; |
2196 | const __u64 win_size = tbl->it_size << tbl->it_page_shift; | |
2197 | ||
4793d65d | 2198 | pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num, |
43cb60ab AK |
2199 | start_addr, start_addr + win_size - 1, |
2200 | IOMMU_PAGE_SIZE(tbl)); | |
2201 | ||
2202 | /* | |
2203 | * Map TCE table through TVT. The TVE index is the PE number | |
2204 | * shifted by 1 bit for 32-bits DMA space. | |
2205 | */ | |
2206 | rc = opal_pci_map_pe_dma_window(phb->opal_id, | |
2207 | pe->pe_number, | |
4793d65d | 2208 | (pe->pe_number << 1) + num, |
bbb845c4 | 2209 | tbl->it_indirect_levels + 1, |
43cb60ab | 2210 | __pa(tbl->it_base), |
bbb845c4 | 2211 | size << 3, |
43cb60ab AK |
2212 | IOMMU_PAGE_SIZE(tbl)); |
2213 | if (rc) { | |
2214 | pe_err(pe, "Failed to configure TCE table, err %ld\n", rc); | |
2215 | return rc; | |
2216 | } | |
2217 | ||
2218 | pnv_pci_link_table_and_group(phb->hose->node, num, | |
2219 | tbl, &pe->table_group); | |
a34ab7c3 | 2220 | pnv_pci_phb3_tce_invalidate_pe(pe); |
43cb60ab AK |
2221 | |
2222 | return 0; | |
2223 | } | |
2224 | ||
f87a8864 | 2225 | static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) |
cd15b048 | 2226 | { |
cd15b048 BH |
2227 | uint16_t window_id = (pe->pe_number << 1 ) + 1; |
2228 | int64_t rc; | |
2229 | ||
2230 | pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); | |
2231 | if (enable) { | |
2232 | phys_addr_t top = memblock_end_of_DRAM(); | |
2233 | ||
2234 | top = roundup_pow_of_two(top); | |
2235 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | |
2236 | pe->pe_number, | |
2237 | window_id, | |
2238 | pe->tce_bypass_base, | |
2239 | top); | |
2240 | } else { | |
2241 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | |
2242 | pe->pe_number, | |
2243 | window_id, | |
2244 | pe->tce_bypass_base, | |
2245 | 0); | |
cd15b048 BH |
2246 | } |
2247 | if (rc) | |
2248 | pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); | |
2249 | else | |
2250 | pe->tce_bypass_enabled = enable; | |
2251 | } | |
2252 | ||
4793d65d AK |
2253 | static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, |
2254 | __u32 page_shift, __u64 window_size, __u32 levels, | |
2255 | struct iommu_table *tbl); | |
2256 | ||
2257 | static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group, | |
2258 | int num, __u32 page_shift, __u64 window_size, __u32 levels, | |
2259 | struct iommu_table **ptbl) | |
2260 | { | |
2261 | struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, | |
2262 | table_group); | |
2263 | int nid = pe->phb->hose->node; | |
2264 | __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; | |
2265 | long ret; | |
2266 | struct iommu_table *tbl; | |
2267 | ||
2268 | tbl = pnv_pci_table_alloc(nid); | |
2269 | if (!tbl) | |
2270 | return -ENOMEM; | |
2271 | ||
2272 | ret = pnv_pci_ioda2_table_alloc_pages(nid, | |
2273 | bus_offset, page_shift, window_size, | |
2274 | levels, tbl); | |
2275 | if (ret) { | |
2276 | iommu_free_table(tbl, "pnv"); | |
2277 | return ret; | |
2278 | } | |
2279 | ||
2280 | tbl->it_ops = &pnv_ioda2_iommu_ops; | |
4793d65d AK |
2281 | |
2282 | *ptbl = tbl; | |
2283 | ||
2284 | return 0; | |
2285 | } | |
2286 | ||
46d3e1e1 AK |
2287 | static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) |
2288 | { | |
2289 | struct iommu_table *tbl = NULL; | |
2290 | long rc; | |
2291 | ||
fa144869 NA |
2292 | /* |
2293 | * crashkernel= specifies the kdump kernel's maximum memory at | |
2294 | * some offset and there is no guaranteed the result is a power | |
2295 | * of 2, which will cause errors later. | |
2296 | */ | |
2297 | const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); | |
2298 | ||
bb005455 NA |
2299 | /* |
2300 | * In memory constrained environments, e.g. kdump kernel, the | |
2301 | * DMA window can be larger than available memory, which will | |
2302 | * cause errors later. | |
2303 | */ | |
fa144869 | 2304 | const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); |
bb005455 | 2305 | |
46d3e1e1 AK |
2306 | rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, |
2307 | IOMMU_PAGE_SHIFT_4K, | |
bb005455 | 2308 | window_size, |
46d3e1e1 AK |
2309 | POWERNV_IOMMU_DEFAULT_LEVELS, &tbl); |
2310 | if (rc) { | |
2311 | pe_err(pe, "Failed to create 32-bit TCE table, err %ld", | |
2312 | rc); | |
2313 | return rc; | |
2314 | } | |
2315 | ||
2316 | iommu_init_table(tbl, pe->phb->hose->node); | |
2317 | ||
2318 | rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); | |
2319 | if (rc) { | |
2320 | pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", | |
2321 | rc); | |
2322 | pnv_ioda2_table_free(tbl); | |
2323 | return rc; | |
2324 | } | |
2325 | ||
2326 | if (!pnv_iommu_bypass_disabled) | |
2327 | pnv_pci_ioda2_set_bypass(pe, true); | |
2328 | ||
46d3e1e1 AK |
2329 | /* |
2330 | * Setting table base here only for carrying iommu_group | |
2331 | * further down to let iommu_add_device() do the job. | |
2332 | * pnv_pci_ioda_dma_dev_setup will override it later anyway. | |
2333 | */ | |
2334 | if (pe->flags & PNV_IODA_PE_DEV) | |
2335 | set_iommu_table_base(&pe->pdev->dev, tbl); | |
2336 | ||
2337 | return 0; | |
2338 | } | |
2339 | ||
b5926430 AK |
2340 | #if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV) |
2341 | static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, | |
2342 | int num) | |
2343 | { | |
2344 | struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, | |
2345 | table_group); | |
2346 | struct pnv_phb *phb = pe->phb; | |
2347 | long ret; | |
2348 | ||
2349 | pe_info(pe, "Removing DMA window #%d\n", num); | |
2350 | ||
2351 | ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, | |
2352 | (pe->pe_number << 1) + num, | |
2353 | 0/* levels */, 0/* table address */, | |
2354 | 0/* table size */, 0/* page size */); | |
2355 | if (ret) | |
2356 | pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); | |
2357 | else | |
a34ab7c3 | 2358 | pnv_pci_phb3_tce_invalidate_pe(pe); |
b5926430 AK |
2359 | |
2360 | pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); | |
2361 | ||
2362 | return ret; | |
2363 | } | |
2364 | #endif | |
2365 | ||
f87a8864 | 2366 | #ifdef CONFIG_IOMMU_API |
00547193 AK |
2367 | static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, |
2368 | __u64 window_size, __u32 levels) | |
2369 | { | |
2370 | unsigned long bytes = 0; | |
2371 | const unsigned window_shift = ilog2(window_size); | |
2372 | unsigned entries_shift = window_shift - page_shift; | |
2373 | unsigned table_shift = entries_shift + 3; | |
2374 | unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift); | |
2375 | unsigned long direct_table_size; | |
2376 | ||
2377 | if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) || | |
2378 | (window_size > memory_hotplug_max()) || | |
2379 | !is_power_of_2(window_size)) | |
2380 | return 0; | |
2381 | ||
2382 | /* Calculate a direct table size from window_size and levels */ | |
2383 | entries_shift = (entries_shift + levels - 1) / levels; | |
2384 | table_shift = entries_shift + 3; | |
2385 | table_shift = max_t(unsigned, table_shift, PAGE_SHIFT); | |
2386 | direct_table_size = 1UL << table_shift; | |
2387 | ||
2388 | for ( ; levels; --levels) { | |
2389 | bytes += _ALIGN_UP(tce_table_size, direct_table_size); | |
2390 | ||
2391 | tce_table_size /= direct_table_size; | |
2392 | tce_table_size <<= 3; | |
2393 | tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size); | |
2394 | } | |
2395 | ||
2396 | return bytes; | |
2397 | } | |
2398 | ||
f87a8864 | 2399 | static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) |
cd15b048 | 2400 | { |
f87a8864 AK |
2401 | struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, |
2402 | table_group); | |
46d3e1e1 AK |
2403 | /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */ |
2404 | struct iommu_table *tbl = pe->table_group.tables[0]; | |
cd15b048 | 2405 | |
f87a8864 | 2406 | pnv_pci_ioda2_set_bypass(pe, false); |
46d3e1e1 AK |
2407 | pnv_pci_ioda2_unset_window(&pe->table_group, 0); |
2408 | pnv_ioda2_table_free(tbl); | |
f87a8864 | 2409 | } |
cd15b048 | 2410 | |
f87a8864 AK |
2411 | static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) |
2412 | { | |
2413 | struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, | |
2414 | table_group); | |
2415 | ||
46d3e1e1 | 2416 | pnv_pci_ioda2_setup_default_config(pe); |
cd15b048 BH |
2417 | } |
2418 | ||
f87a8864 | 2419 | static struct iommu_table_group_ops pnv_pci_ioda2_ops = { |
00547193 | 2420 | .get_table_size = pnv_pci_ioda2_get_table_size, |
4793d65d AK |
2421 | .create_table = pnv_pci_ioda2_create_table, |
2422 | .set_window = pnv_pci_ioda2_set_window, | |
2423 | .unset_window = pnv_pci_ioda2_unset_window, | |
f87a8864 AK |
2424 | .take_ownership = pnv_ioda2_take_ownership, |
2425 | .release_ownership = pnv_ioda2_release_ownership, | |
2426 | }; | |
b5cb9ab1 AK |
2427 | |
2428 | static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque) | |
2429 | { | |
2430 | struct pci_controller *hose; | |
2431 | struct pnv_phb *phb; | |
2432 | struct pnv_ioda_pe **ptmppe = opaque; | |
2433 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | |
2434 | struct pci_dn *pdn = pci_get_pdn(pdev); | |
2435 | ||
2436 | if (!pdn || pdn->pe_number == IODA_INVALID_PE) | |
2437 | return 0; | |
2438 | ||
2439 | hose = pci_bus_to_host(pdev->bus); | |
2440 | phb = hose->private_data; | |
2441 | if (phb->type != PNV_PHB_NPU) | |
2442 | return 0; | |
2443 | ||
2444 | *ptmppe = &phb->ioda.pe_array[pdn->pe_number]; | |
2445 | ||
2446 | return 1; | |
2447 | } | |
2448 | ||
2449 | /* | |
2450 | * This returns PE of associated NPU. | |
2451 | * This assumes that NPU is in the same IOMMU group with GPU and there is | |
2452 | * no other PEs. | |
2453 | */ | |
2454 | static struct pnv_ioda_pe *gpe_table_group_to_npe( | |
2455 | struct iommu_table_group *table_group) | |
2456 | { | |
2457 | struct pnv_ioda_pe *npe = NULL; | |
2458 | int ret = iommu_group_for_each_dev(table_group->group, &npe, | |
2459 | gpe_table_group_to_npe_cb); | |
2460 | ||
2461 | BUG_ON(!ret || !npe); | |
2462 | ||
2463 | return npe; | |
2464 | } | |
2465 | ||
2466 | static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group, | |
2467 | int num, struct iommu_table *tbl) | |
2468 | { | |
2469 | long ret = pnv_pci_ioda2_set_window(table_group, num, tbl); | |
2470 | ||
2471 | if (ret) | |
2472 | return ret; | |
2473 | ||
2474 | ret = pnv_npu_set_window(gpe_table_group_to_npe(table_group), num, tbl); | |
2475 | if (ret) | |
2476 | pnv_pci_ioda2_unset_window(table_group, num); | |
2477 | ||
2478 | return ret; | |
2479 | } | |
2480 | ||
2481 | static long pnv_pci_ioda2_npu_unset_window( | |
2482 | struct iommu_table_group *table_group, | |
2483 | int num) | |
2484 | { | |
2485 | long ret = pnv_pci_ioda2_unset_window(table_group, num); | |
2486 | ||
2487 | if (ret) | |
2488 | return ret; | |
2489 | ||
2490 | return pnv_npu_unset_window(gpe_table_group_to_npe(table_group), num); | |
2491 | } | |
2492 | ||
2493 | static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group) | |
2494 | { | |
2495 | /* | |
2496 | * Detach NPU first as pnv_ioda2_take_ownership() will destroy | |
2497 | * the iommu_table if 32bit DMA is enabled. | |
2498 | */ | |
2499 | pnv_npu_take_ownership(gpe_table_group_to_npe(table_group)); | |
2500 | pnv_ioda2_take_ownership(table_group); | |
2501 | } | |
2502 | ||
2503 | static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = { | |
2504 | .get_table_size = pnv_pci_ioda2_get_table_size, | |
2505 | .create_table = pnv_pci_ioda2_create_table, | |
2506 | .set_window = pnv_pci_ioda2_npu_set_window, | |
2507 | .unset_window = pnv_pci_ioda2_npu_unset_window, | |
2508 | .take_ownership = pnv_ioda2_npu_take_ownership, | |
2509 | .release_ownership = pnv_ioda2_release_ownership, | |
2510 | }; | |
2511 | ||
2512 | static void pnv_pci_ioda_setup_iommu_api(void) | |
2513 | { | |
2514 | struct pci_controller *hose, *tmp; | |
2515 | struct pnv_phb *phb; | |
2516 | struct pnv_ioda_pe *pe, *gpe; | |
2517 | ||
2518 | /* | |
2519 | * Now we have all PHBs discovered, time to add NPU devices to | |
2520 | * the corresponding IOMMU groups. | |
2521 | */ | |
2522 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | |
2523 | phb = hose->private_data; | |
2524 | ||
2525 | if (phb->type != PNV_PHB_NPU) | |
2526 | continue; | |
2527 | ||
2528 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { | |
2529 | gpe = pnv_pci_npu_setup_iommu(pe); | |
2530 | if (gpe) | |
2531 | gpe->table_group.ops = &pnv_pci_ioda2_npu_ops; | |
2532 | } | |
2533 | } | |
2534 | } | |
2535 | #else /* !CONFIG_IOMMU_API */ | |
2536 | static void pnv_pci_ioda_setup_iommu_api(void) { }; | |
f87a8864 AK |
2537 | #endif |
2538 | ||
bbb845c4 AK |
2539 | static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, |
2540 | unsigned levels, unsigned long limit, | |
3ba3a73e | 2541 | unsigned long *current_offset, unsigned long *total_allocated) |
373f5657 GS |
2542 | { |
2543 | struct page *tce_mem = NULL; | |
bbb845c4 | 2544 | __be64 *addr, *tmp; |
aca6913f | 2545 | unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT; |
bbb845c4 AK |
2546 | unsigned long allocated = 1UL << (order + PAGE_SHIFT); |
2547 | unsigned entries = 1UL << (shift - 3); | |
2548 | long i; | |
aca6913f AK |
2549 | |
2550 | tce_mem = alloc_pages_node(nid, GFP_KERNEL, order); | |
2551 | if (!tce_mem) { | |
2552 | pr_err("Failed to allocate a TCE memory, order=%d\n", order); | |
2553 | return NULL; | |
2554 | } | |
2555 | addr = page_address(tce_mem); | |
bbb845c4 | 2556 | memset(addr, 0, allocated); |
3ba3a73e | 2557 | *total_allocated += allocated; |
bbb845c4 AK |
2558 | |
2559 | --levels; | |
2560 | if (!levels) { | |
2561 | *current_offset += allocated; | |
2562 | return addr; | |
2563 | } | |
2564 | ||
2565 | for (i = 0; i < entries; ++i) { | |
2566 | tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, | |
3ba3a73e | 2567 | levels, limit, current_offset, total_allocated); |
bbb845c4 AK |
2568 | if (!tmp) |
2569 | break; | |
2570 | ||
2571 | addr[i] = cpu_to_be64(__pa(tmp) | | |
2572 | TCE_PCI_READ | TCE_PCI_WRITE); | |
2573 | ||
2574 | if (*current_offset >= limit) | |
2575 | break; | |
2576 | } | |
aca6913f AK |
2577 | |
2578 | return addr; | |
2579 | } | |
2580 | ||
bbb845c4 AK |
2581 | static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr, |
2582 | unsigned long size, unsigned level); | |
2583 | ||
aca6913f | 2584 | static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, |
bbb845c4 AK |
2585 | __u32 page_shift, __u64 window_size, __u32 levels, |
2586 | struct iommu_table *tbl) | |
aca6913f | 2587 | { |
373f5657 | 2588 | void *addr; |
3ba3a73e | 2589 | unsigned long offset = 0, level_shift, total_allocated = 0; |
aca6913f AK |
2590 | const unsigned window_shift = ilog2(window_size); |
2591 | unsigned entries_shift = window_shift - page_shift; | |
2592 | unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); | |
2593 | const unsigned long tce_table_size = 1UL << table_shift; | |
2594 | ||
bbb845c4 AK |
2595 | if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS)) |
2596 | return -EINVAL; | |
2597 | ||
aca6913f AK |
2598 | if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size)) |
2599 | return -EINVAL; | |
2600 | ||
bbb845c4 AK |
2601 | /* Adjust direct table size from window_size and levels */ |
2602 | entries_shift = (entries_shift + levels - 1) / levels; | |
2603 | level_shift = entries_shift + 3; | |
2604 | level_shift = max_t(unsigned, level_shift, PAGE_SHIFT); | |
2605 | ||
aca6913f | 2606 | /* Allocate TCE table */ |
bbb845c4 | 2607 | addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, |
3ba3a73e | 2608 | levels, tce_table_size, &offset, &total_allocated); |
bbb845c4 AK |
2609 | |
2610 | /* addr==NULL means that the first level allocation failed */ | |
aca6913f AK |
2611 | if (!addr) |
2612 | return -ENOMEM; | |
2613 | ||
bbb845c4 AK |
2614 | /* |
2615 | * First level was allocated but some lower level failed as | |
2616 | * we did not allocate as much as we wanted, | |
2617 | * release partially allocated table. | |
2618 | */ | |
2619 | if (offset < tce_table_size) { | |
2620 | pnv_pci_ioda2_table_do_free_pages(addr, | |
2621 | 1ULL << (level_shift - 3), levels - 1); | |
2622 | return -ENOMEM; | |
2623 | } | |
2624 | ||
aca6913f AK |
2625 | /* Setup linux iommu table */ |
2626 | pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset, | |
2627 | page_shift); | |
bbb845c4 AK |
2628 | tbl->it_level_size = 1ULL << (level_shift - 3); |
2629 | tbl->it_indirect_levels = levels - 1; | |
3ba3a73e | 2630 | tbl->it_allocated_size = total_allocated; |
aca6913f AK |
2631 | |
2632 | pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", | |
2633 | window_size, tce_table_size, bus_offset); | |
2634 | ||
2635 | return 0; | |
2636 | } | |
2637 | ||
bbb845c4 AK |
2638 | static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr, |
2639 | unsigned long size, unsigned level) | |
2640 | { | |
2641 | const unsigned long addr_ul = (unsigned long) addr & | |
2642 | ~(TCE_PCI_READ | TCE_PCI_WRITE); | |
2643 | ||
2644 | if (level) { | |
2645 | long i; | |
2646 | u64 *tmp = (u64 *) addr_ul; | |
2647 | ||
2648 | for (i = 0; i < size; ++i) { | |
2649 | unsigned long hpa = be64_to_cpu(tmp[i]); | |
2650 | ||
2651 | if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE))) | |
2652 | continue; | |
2653 | ||
2654 | pnv_pci_ioda2_table_do_free_pages(__va(hpa), size, | |
2655 | level - 1); | |
2656 | } | |
2657 | } | |
2658 | ||
2659 | free_pages(addr_ul, get_order(size << 3)); | |
2660 | } | |
2661 | ||
aca6913f AK |
2662 | static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl) |
2663 | { | |
bbb845c4 AK |
2664 | const unsigned long size = tbl->it_indirect_levels ? |
2665 | tbl->it_level_size : tbl->it_size; | |
2666 | ||
aca6913f AK |
2667 | if (!tbl->it_size) |
2668 | return; | |
2669 | ||
bbb845c4 AK |
2670 | pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size, |
2671 | tbl->it_indirect_levels); | |
aca6913f AK |
2672 | } |
2673 | ||
2674 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | |
2675 | struct pnv_ioda_pe *pe) | |
2676 | { | |
373f5657 GS |
2677 | int64_t rc; |
2678 | ||
ccd1c191 GS |
2679 | if (!pnv_pci_ioda_pe_dma_weight(pe)) |
2680 | return; | |
2681 | ||
f87a8864 AK |
2682 | /* TVE #1 is selected by PCI address bit 59 */ |
2683 | pe->tce_bypass_base = 1ull << 59; | |
2684 | ||
b348aa65 AK |
2685 | iommu_register_group(&pe->table_group, phb->hose->global_number, |
2686 | pe->pe_number); | |
c5773822 | 2687 | |
373f5657 | 2688 | /* The PE will reserve all possible 32-bits space */ |
373f5657 | 2689 | pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", |
aca6913f | 2690 | phb->ioda.m32_pci_base); |
373f5657 | 2691 | |
aca6913f | 2692 | /* Setup linux iommu table */ |
4793d65d AK |
2693 | pe->table_group.tce32_start = 0; |
2694 | pe->table_group.tce32_size = phb->ioda.m32_pci_base; | |
2695 | pe->table_group.max_dynamic_windows_supported = | |
2696 | IOMMU_TABLE_GROUP_MAX_TABLES; | |
2697 | pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; | |
2698 | pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M; | |
e5aad1e6 AK |
2699 | #ifdef CONFIG_IOMMU_API |
2700 | pe->table_group.ops = &pnv_pci_ioda2_ops; | |
2701 | #endif | |
2702 | ||
46d3e1e1 | 2703 | rc = pnv_pci_ioda2_setup_default_config(pe); |
801846d1 | 2704 | if (rc) |
46d3e1e1 | 2705 | return; |
373f5657 | 2706 | |
46d3e1e1 | 2707 | if (pe->flags & PNV_IODA_PE_DEV) |
4617082e | 2708 | iommu_add_device(&pe->pdev->dev); |
46d3e1e1 | 2709 | else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) |
ea30e99e | 2710 | pnv_ioda_setup_bus_dma(pe, pe->pbus); |
373f5657 GS |
2711 | } |
2712 | ||
184cd4a3 | 2713 | #ifdef CONFIG_PCI_MSI |
137436c9 GS |
2714 | static void pnv_ioda2_msi_eoi(struct irq_data *d) |
2715 | { | |
2716 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | |
2717 | struct irq_chip *chip = irq_data_get_irq_chip(d); | |
2718 | struct pnv_phb *phb = container_of(chip, struct pnv_phb, | |
2719 | ioda.irq_chip); | |
2720 | int64_t rc; | |
2721 | ||
2722 | rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); | |
2723 | WARN_ON_ONCE(rc); | |
2724 | ||
2725 | icp_native_eoi(d); | |
2726 | } | |
2727 | ||
fd9a1c26 | 2728 | |
f456834a | 2729 | void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) |
fd9a1c26 IM |
2730 | { |
2731 | struct irq_data *idata; | |
2732 | struct irq_chip *ichip; | |
2733 | ||
fb111334 BH |
2734 | /* The MSI EOI OPAL call is only needed on PHB3 */ |
2735 | if (phb->model != PNV_PHB_MODEL_PHB3) | |
fd9a1c26 IM |
2736 | return; |
2737 | ||
2738 | if (!phb->ioda.irq_chip_init) { | |
2739 | /* | |
2740 | * First time we setup an MSI IRQ, we need to setup the | |
2741 | * corresponding IRQ chip to route correctly. | |
2742 | */ | |
2743 | idata = irq_get_irq_data(virq); | |
2744 | ichip = irq_data_get_irq_chip(idata); | |
2745 | phb->ioda.irq_chip_init = 1; | |
2746 | phb->ioda.irq_chip = *ichip; | |
2747 | phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; | |
2748 | } | |
2749 | irq_set_chip(virq, &phb->ioda.irq_chip); | |
2750 | } | |
2751 | ||
184cd4a3 | 2752 | static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, |
137436c9 GS |
2753 | unsigned int hwirq, unsigned int virq, |
2754 | unsigned int is_64, struct msi_msg *msg) | |
184cd4a3 BH |
2755 | { |
2756 | struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); | |
2757 | unsigned int xive_num = hwirq - phb->msi_base; | |
3a1a4661 | 2758 | __be32 data; |
184cd4a3 BH |
2759 | int rc; |
2760 | ||
2761 | /* No PE assigned ? bail out ... no MSI for you ! */ | |
2762 | if (pe == NULL) | |
2763 | return -ENXIO; | |
2764 | ||
2765 | /* Check if we have an MVE */ | |
2766 | if (pe->mve_number < 0) | |
2767 | return -ENXIO; | |
2768 | ||
b72c1f65 | 2769 | /* Force 32-bit MSI on some broken devices */ |
36074381 | 2770 | if (dev->no_64bit_msi) |
b72c1f65 BH |
2771 | is_64 = 0; |
2772 | ||
184cd4a3 BH |
2773 | /* Assign XIVE to PE */ |
2774 | rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); | |
2775 | if (rc) { | |
2776 | pr_warn("%s: OPAL error %d setting XIVE %d PE\n", | |
2777 | pci_name(dev), rc, xive_num); | |
2778 | return -EIO; | |
2779 | } | |
2780 | ||
2781 | if (is_64) { | |
3a1a4661 BH |
2782 | __be64 addr64; |
2783 | ||
184cd4a3 BH |
2784 | rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, |
2785 | &addr64, &data); | |
2786 | if (rc) { | |
2787 | pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", | |
2788 | pci_name(dev), rc); | |
2789 | return -EIO; | |
2790 | } | |
3a1a4661 BH |
2791 | msg->address_hi = be64_to_cpu(addr64) >> 32; |
2792 | msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful; | |
184cd4a3 | 2793 | } else { |
3a1a4661 BH |
2794 | __be32 addr32; |
2795 | ||
184cd4a3 BH |
2796 | rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, |
2797 | &addr32, &data); | |
2798 | if (rc) { | |
2799 | pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", | |
2800 | pci_name(dev), rc); | |
2801 | return -EIO; | |
2802 | } | |
2803 | msg->address_hi = 0; | |
3a1a4661 | 2804 | msg->address_lo = be32_to_cpu(addr32); |
184cd4a3 | 2805 | } |
3a1a4661 | 2806 | msg->data = be32_to_cpu(data); |
184cd4a3 | 2807 | |
f456834a | 2808 | pnv_set_msi_irq_chip(phb, virq); |
137436c9 | 2809 | |
184cd4a3 BH |
2810 | pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," |
2811 | " address=%x_%08x data=%x PE# %d\n", | |
2812 | pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num, | |
2813 | msg->address_hi, msg->address_lo, data, pe->pe_number); | |
2814 | ||
2815 | return 0; | |
2816 | } | |
2817 | ||
2818 | static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) | |
2819 | { | |
fb1b55d6 | 2820 | unsigned int count; |
184cd4a3 BH |
2821 | const __be32 *prop = of_get_property(phb->hose->dn, |
2822 | "ibm,opal-msi-ranges", NULL); | |
2823 | if (!prop) { | |
2824 | /* BML Fallback */ | |
2825 | prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); | |
2826 | } | |
2827 | if (!prop) | |
2828 | return; | |
2829 | ||
2830 | phb->msi_base = be32_to_cpup(prop); | |
fb1b55d6 GS |
2831 | count = be32_to_cpup(prop + 1); |
2832 | if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) { | |
184cd4a3 BH |
2833 | pr_err("PCI %d: Failed to allocate MSI bitmap !\n", |
2834 | phb->hose->global_number); | |
2835 | return; | |
2836 | } | |
fb1b55d6 | 2837 | |
184cd4a3 BH |
2838 | phb->msi_setup = pnv_pci_ioda_msi_setup; |
2839 | phb->msi32_support = 1; | |
2840 | pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", | |
fb1b55d6 | 2841 | count, phb->msi_base); |
184cd4a3 BH |
2842 | } |
2843 | #else | |
2844 | static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } | |
2845 | #endif /* CONFIG_PCI_MSI */ | |
2846 | ||
6e628c7d WY |
2847 | #ifdef CONFIG_PCI_IOV |
2848 | static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) | |
2849 | { | |
f2dd0afe WY |
2850 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
2851 | struct pnv_phb *phb = hose->private_data; | |
2852 | const resource_size_t gate = phb->ioda.m64_segsize >> 2; | |
6e628c7d WY |
2853 | struct resource *res; |
2854 | int i; | |
dfcc8d45 | 2855 | resource_size_t size, total_vf_bar_sz; |
6e628c7d | 2856 | struct pci_dn *pdn; |
5b88ec22 | 2857 | int mul, total_vfs; |
6e628c7d WY |
2858 | |
2859 | if (!pdev->is_physfn || pdev->is_added) | |
2860 | return; | |
2861 | ||
6e628c7d WY |
2862 | pdn = pci_get_pdn(pdev); |
2863 | pdn->vfs_expanded = 0; | |
ee8222fe | 2864 | pdn->m64_single_mode = false; |
6e628c7d | 2865 | |
5b88ec22 | 2866 | total_vfs = pci_sriov_get_totalvfs(pdev); |
92b8f137 | 2867 | mul = phb->ioda.total_pe_num; |
dfcc8d45 | 2868 | total_vf_bar_sz = 0; |
5b88ec22 WY |
2869 | |
2870 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { | |
2871 | res = &pdev->resource[i + PCI_IOV_RESOURCES]; | |
2872 | if (!res->flags || res->parent) | |
2873 | continue; | |
5958d19a | 2874 | if (!pnv_pci_is_m64(phb, res)) { |
b0331854 WY |
2875 | dev_warn(&pdev->dev, "Don't support SR-IOV with" |
2876 | " non M64 VF BAR%d: %pR. \n", | |
5b88ec22 | 2877 | i, res); |
b0331854 | 2878 | goto truncate_iov; |
5b88ec22 WY |
2879 | } |
2880 | ||
dfcc8d45 WY |
2881 | total_vf_bar_sz += pci_iov_resource_size(pdev, |
2882 | i + PCI_IOV_RESOURCES); | |
5b88ec22 | 2883 | |
f2dd0afe WY |
2884 | /* |
2885 | * If bigger than quarter of M64 segment size, just round up | |
2886 | * power of two. | |
2887 | * | |
2888 | * Generally, one M64 BAR maps one IOV BAR. To avoid conflict | |
2889 | * with other devices, IOV BAR size is expanded to be | |
2890 | * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64 | |
2891 | * segment size , the expanded size would equal to half of the | |
2892 | * whole M64 space size, which will exhaust the M64 Space and | |
2893 | * limit the system flexibility. This is a design decision to | |
2894 | * set the boundary to quarter of the M64 segment size. | |
2895 | */ | |
dfcc8d45 | 2896 | if (total_vf_bar_sz > gate) { |
5b88ec22 | 2897 | mul = roundup_pow_of_two(total_vfs); |
dfcc8d45 WY |
2898 | dev_info(&pdev->dev, |
2899 | "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n", | |
2900 | total_vf_bar_sz, gate, mul); | |
ee8222fe | 2901 | pdn->m64_single_mode = true; |
5b88ec22 WY |
2902 | break; |
2903 | } | |
2904 | } | |
2905 | ||
6e628c7d WY |
2906 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
2907 | res = &pdev->resource[i + PCI_IOV_RESOURCES]; | |
2908 | if (!res->flags || res->parent) | |
2909 | continue; | |
6e628c7d | 2910 | |
6e628c7d | 2911 | size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES); |
ee8222fe WY |
2912 | /* |
2913 | * On PHB3, the minimum size alignment of M64 BAR in single | |
2914 | * mode is 32MB. | |
2915 | */ | |
2916 | if (pdn->m64_single_mode && (size < SZ_32M)) | |
2917 | goto truncate_iov; | |
2918 | dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res); | |
5b88ec22 | 2919 | res->end = res->start + size * mul - 1; |
6e628c7d WY |
2920 | dev_dbg(&pdev->dev, " %pR\n", res); |
2921 | dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)", | |
5b88ec22 | 2922 | i, res, mul); |
6e628c7d | 2923 | } |
5b88ec22 | 2924 | pdn->vfs_expanded = mul; |
b0331854 WY |
2925 | |
2926 | return; | |
2927 | ||
2928 | truncate_iov: | |
2929 | /* To save MMIO space, IOV BAR is truncated. */ | |
2930 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { | |
2931 | res = &pdev->resource[i + PCI_IOV_RESOURCES]; | |
2932 | res->flags = 0; | |
2933 | res->end = res->start - 1; | |
2934 | } | |
6e628c7d WY |
2935 | } |
2936 | #endif /* CONFIG_PCI_IOV */ | |
2937 | ||
23e79425 GS |
2938 | static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, |
2939 | struct resource *res) | |
2940 | { | |
2941 | struct pnv_phb *phb = pe->phb; | |
2942 | struct pci_bus_region region; | |
2943 | int index; | |
2944 | int64_t rc; | |
2945 | ||
2946 | if (!res || !res->flags || res->start > res->end) | |
2947 | return; | |
2948 | ||
2949 | if (res->flags & IORESOURCE_IO) { | |
2950 | region.start = res->start - phb->ioda.io_pci_base; | |
2951 | region.end = res->end - phb->ioda.io_pci_base; | |
2952 | index = region.start / phb->ioda.io_segsize; | |
2953 | ||
2954 | while (index < phb->ioda.total_pe_num && | |
2955 | region.start <= region.end) { | |
2956 | phb->ioda.io_segmap[index] = pe->pe_number; | |
2957 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, | |
2958 | pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); | |
2959 | if (rc != OPAL_SUCCESS) { | |
2960 | pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n", | |
2961 | __func__, rc, index, pe->pe_number); | |
2962 | break; | |
2963 | } | |
2964 | ||
2965 | region.start += phb->ioda.io_segsize; | |
2966 | index++; | |
2967 | } | |
2968 | } else if ((res->flags & IORESOURCE_MEM) && | |
5958d19a | 2969 | !pnv_pci_is_m64(phb, res)) { |
23e79425 GS |
2970 | region.start = res->start - |
2971 | phb->hose->mem_offset[0] - | |
2972 | phb->ioda.m32_pci_base; | |
2973 | region.end = res->end - | |
2974 | phb->hose->mem_offset[0] - | |
2975 | phb->ioda.m32_pci_base; | |
2976 | index = region.start / phb->ioda.m32_segsize; | |
2977 | ||
2978 | while (index < phb->ioda.total_pe_num && | |
2979 | region.start <= region.end) { | |
2980 | phb->ioda.m32_segmap[index] = pe->pe_number; | |
2981 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, | |
2982 | pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); | |
2983 | if (rc != OPAL_SUCCESS) { | |
2984 | pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d", | |
2985 | __func__, rc, index, pe->pe_number); | |
2986 | break; | |
2987 | } | |
2988 | ||
2989 | region.start += phb->ioda.m32_segsize; | |
2990 | index++; | |
2991 | } | |
2992 | } | |
2993 | } | |
2994 | ||
11685bec GS |
2995 | /* |
2996 | * This function is supposed to be called on basis of PE from top | |
2997 | * to bottom style. So the the I/O or MMIO segment assigned to | |
2998 | * parent PE could be overrided by its child PEs if necessary. | |
2999 | */ | |
23e79425 | 3000 | static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) |
11685bec | 3001 | { |
69d733e7 | 3002 | struct pci_dev *pdev; |
23e79425 | 3003 | int i; |
11685bec GS |
3004 | |
3005 | /* | |
3006 | * NOTE: We only care PCI bus based PE for now. For PCI | |
3007 | * device based PE, for example SRIOV sensitive VF should | |
3008 | * be figured out later. | |
3009 | */ | |
3010 | BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); | |
3011 | ||
69d733e7 GS |
3012 | list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { |
3013 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) | |
3014 | pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); | |
3015 | ||
3016 | /* | |
3017 | * If the PE contains all subordinate PCI buses, the | |
3018 | * windows of the child bridges should be mapped to | |
3019 | * the PE as well. | |
3020 | */ | |
3021 | if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) | |
3022 | continue; | |
3023 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | |
3024 | pnv_ioda_setup_pe_res(pe, | |
3025 | &pdev->resource[PCI_BRIDGE_RESOURCES + i]); | |
3026 | } | |
11685bec GS |
3027 | } |
3028 | ||
37c367f2 GS |
3029 | static void pnv_pci_ioda_create_dbgfs(void) |
3030 | { | |
3031 | #ifdef CONFIG_DEBUG_FS | |
3032 | struct pci_controller *hose, *tmp; | |
3033 | struct pnv_phb *phb; | |
3034 | char name[16]; | |
3035 | ||
3036 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | |
3037 | phb = hose->private_data; | |
3038 | ||
ccd1c191 GS |
3039 | /* Notify initialization of PHB done */ |
3040 | phb->initialized = 1; | |
3041 | ||
37c367f2 GS |
3042 | sprintf(name, "PCI%04x", hose->global_number); |
3043 | phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root); | |
3044 | if (!phb->dbgfs) | |
3045 | pr_warning("%s: Error on creating debugfs on PHB#%x\n", | |
3046 | __func__, hose->global_number); | |
3047 | } | |
3048 | #endif /* CONFIG_DEBUG_FS */ | |
3049 | } | |
3050 | ||
cad5cef6 | 3051 | static void pnv_pci_ioda_fixup(void) |
fb446ad0 GS |
3052 | { |
3053 | pnv_pci_ioda_setup_PEs(); | |
ccd1c191 | 3054 | pnv_pci_ioda_setup_iommu_api(); |
37c367f2 GS |
3055 | pnv_pci_ioda_create_dbgfs(); |
3056 | ||
e9cc17d4 | 3057 | #ifdef CONFIG_EEH |
e9cc17d4 | 3058 | eeh_init(); |
dadcd6d6 | 3059 | eeh_addr_cache_build(); |
e9cc17d4 | 3060 | #endif |
fb446ad0 GS |
3061 | } |
3062 | ||
271fd03a GS |
3063 | /* |
3064 | * Returns the alignment for I/O or memory windows for P2P | |
3065 | * bridges. That actually depends on how PEs are segmented. | |
3066 | * For now, we return I/O or M32 segment size for PE sensitive | |
3067 | * P2P bridges. Otherwise, the default values (4KiB for I/O, | |
3068 | * 1MiB for memory) will be returned. | |
3069 | * | |
3070 | * The current PCI bus might be put into one PE, which was | |
3071 | * create against the parent PCI bridge. For that case, we | |
3072 | * needn't enlarge the alignment so that we can save some | |
3073 | * resources. | |
3074 | */ | |
3075 | static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, | |
3076 | unsigned long type) | |
3077 | { | |
3078 | struct pci_dev *bridge; | |
3079 | struct pci_controller *hose = pci_bus_to_host(bus); | |
3080 | struct pnv_phb *phb = hose->private_data; | |
3081 | int num_pci_bridges = 0; | |
3082 | ||
3083 | bridge = bus->self; | |
3084 | while (bridge) { | |
3085 | if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { | |
3086 | num_pci_bridges++; | |
3087 | if (num_pci_bridges >= 2) | |
3088 | return 1; | |
3089 | } | |
3090 | ||
3091 | bridge = bridge->bus->self; | |
3092 | } | |
3093 | ||
5958d19a BH |
3094 | /* |
3095 | * We fall back to M32 if M64 isn't supported. We enforce the M64 | |
3096 | * alignment for any 64-bit resource, PCIe doesn't care and | |
3097 | * bridges only do 64-bit prefetchable anyway. | |
3098 | */ | |
3099 | if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) | |
262af557 | 3100 | return phb->ioda.m64_segsize; |
271fd03a GS |
3101 | if (type & IORESOURCE_MEM) |
3102 | return phb->ioda.m32_segsize; | |
3103 | ||
3104 | return phb->ioda.io_segsize; | |
3105 | } | |
3106 | ||
40e2a47e GS |
3107 | /* |
3108 | * We are updating root port or the upstream port of the | |
3109 | * bridge behind the root port with PHB's windows in order | |
3110 | * to accommodate the changes on required resources during | |
3111 | * PCI (slot) hotplug, which is connected to either root | |
3112 | * port or the downstream ports of PCIe switch behind the | |
3113 | * root port. | |
3114 | */ | |
3115 | static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, | |
3116 | unsigned long type) | |
3117 | { | |
3118 | struct pci_controller *hose = pci_bus_to_host(bus); | |
3119 | struct pnv_phb *phb = hose->private_data; | |
3120 | struct pci_dev *bridge = bus->self; | |
3121 | struct resource *r, *w; | |
3122 | bool msi_region = false; | |
3123 | int i; | |
3124 | ||
3125 | /* Check if we need apply fixup to the bridge's windows */ | |
3126 | if (!pci_is_root_bus(bridge->bus) && | |
3127 | !pci_is_root_bus(bridge->bus->self->bus)) | |
3128 | return; | |
3129 | ||
3130 | /* Fixup the resources */ | |
3131 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { | |
3132 | r = &bridge->resource[PCI_BRIDGE_RESOURCES + i]; | |
3133 | if (!r->flags || !r->parent) | |
3134 | continue; | |
3135 | ||
3136 | w = NULL; | |
3137 | if (r->flags & type & IORESOURCE_IO) | |
3138 | w = &hose->io_resource; | |
5958d19a | 3139 | else if (pnv_pci_is_m64(phb, r) && |
40e2a47e GS |
3140 | (type & IORESOURCE_PREFETCH) && |
3141 | phb->ioda.m64_segsize) | |
3142 | w = &hose->mem_resources[1]; | |
3143 | else if (r->flags & type & IORESOURCE_MEM) { | |
3144 | w = &hose->mem_resources[0]; | |
3145 | msi_region = true; | |
3146 | } | |
3147 | ||
3148 | r->start = w->start; | |
3149 | r->end = w->end; | |
3150 | ||
3151 | /* The 64KB 32-bits MSI region shouldn't be included in | |
3152 | * the 32-bits bridge window. Otherwise, we can see strange | |
3153 | * issues. One of them is EEH error observed on Garrison. | |
3154 | * | |
3155 | * Exclude top 1MB region which is the minimal alignment of | |
3156 | * 32-bits bridge window. | |
3157 | */ | |
3158 | if (msi_region) { | |
3159 | r->end += 0x10000; | |
3160 | r->end -= 0x100000; | |
3161 | } | |
3162 | } | |
3163 | } | |
3164 | ||
ccd1c191 GS |
3165 | static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type) |
3166 | { | |
3167 | struct pci_controller *hose = pci_bus_to_host(bus); | |
3168 | struct pnv_phb *phb = hose->private_data; | |
3169 | struct pci_dev *bridge = bus->self; | |
3170 | struct pnv_ioda_pe *pe; | |
3171 | bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); | |
3172 | ||
40e2a47e GS |
3173 | /* Extend bridge's windows if necessary */ |
3174 | pnv_pci_fixup_bridge_resources(bus, type); | |
3175 | ||
63803c39 GS |
3176 | /* The PE for root bus should be realized before any one else */ |
3177 | if (!phb->ioda.root_pe_populated) { | |
3178 | pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false); | |
3179 | if (pe) { | |
3180 | phb->ioda.root_pe_idx = pe->pe_number; | |
3181 | phb->ioda.root_pe_populated = true; | |
3182 | } | |
3183 | } | |
3184 | ||
ccd1c191 GS |
3185 | /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ |
3186 | if (list_empty(&bus->devices)) | |
3187 | return; | |
3188 | ||
3189 | /* Reserve PEs according to used M64 resources */ | |
3190 | if (phb->reserve_m64_pe) | |
3191 | phb->reserve_m64_pe(bus, NULL, all); | |
3192 | ||
3193 | /* | |
3194 | * Assign PE. We might run here because of partial hotplug. | |
3195 | * For the case, we just pick up the existing PE and should | |
3196 | * not allocate resources again. | |
3197 | */ | |
3198 | pe = pnv_ioda_setup_bus_PE(bus, all); | |
3199 | if (!pe) | |
3200 | return; | |
3201 | ||
3202 | pnv_ioda_setup_pe_seg(pe); | |
3203 | switch (phb->type) { | |
3204 | case PNV_PHB_IODA1: | |
3205 | pnv_pci_ioda1_setup_dma_pe(phb, pe); | |
3206 | break; | |
3207 | case PNV_PHB_IODA2: | |
3208 | pnv_pci_ioda2_setup_dma_pe(phb, pe); | |
3209 | break; | |
3210 | default: | |
3211 | pr_warn("%s: No DMA for PHB#%d (type %d)\n", | |
3212 | __func__, phb->hose->global_number, phb->type); | |
3213 | } | |
3214 | } | |
3215 | ||
5350ab3f WY |
3216 | #ifdef CONFIG_PCI_IOV |
3217 | static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, | |
3218 | int resno) | |
3219 | { | |
ee8222fe WY |
3220 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
3221 | struct pnv_phb *phb = hose->private_data; | |
5350ab3f | 3222 | struct pci_dn *pdn = pci_get_pdn(pdev); |
7fbe7a93 | 3223 | resource_size_t align; |
5350ab3f | 3224 | |
7fbe7a93 WY |
3225 | /* |
3226 | * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the | |
3227 | * SR-IOV. While from hardware perspective, the range mapped by M64 | |
3228 | * BAR should be size aligned. | |
3229 | * | |
ee8222fe WY |
3230 | * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra |
3231 | * powernv-specific hardware restriction is gone. But if just use the | |
3232 | * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with | |
3233 | * in one segment of M64 #15, which introduces the PE conflict between | |
3234 | * PF and VF. Based on this, the minimum alignment of an IOV BAR is | |
3235 | * m64_segsize. | |
3236 | * | |
7fbe7a93 WY |
3237 | * This function returns the total IOV BAR size if M64 BAR is in |
3238 | * Shared PE mode or just VF BAR size if not. | |
ee8222fe WY |
3239 | * If the M64 BAR is in Single PE mode, return the VF BAR size or |
3240 | * M64 segment size if IOV BAR size is less. | |
7fbe7a93 | 3241 | */ |
5350ab3f | 3242 | align = pci_iov_resource_size(pdev, resno); |
7fbe7a93 WY |
3243 | if (!pdn->vfs_expanded) |
3244 | return align; | |
ee8222fe WY |
3245 | if (pdn->m64_single_mode) |
3246 | return max(align, (resource_size_t)phb->ioda.m64_segsize); | |
5350ab3f | 3247 | |
7fbe7a93 | 3248 | return pdn->vfs_expanded * align; |
5350ab3f WY |
3249 | } |
3250 | #endif /* CONFIG_PCI_IOV */ | |
3251 | ||
184cd4a3 BH |
3252 | /* Prevent enabling devices for which we couldn't properly |
3253 | * assign a PE | |
3254 | */ | |
4361b034 | 3255 | bool pnv_pci_enable_device_hook(struct pci_dev *dev) |
184cd4a3 | 3256 | { |
db1266c8 GS |
3257 | struct pci_controller *hose = pci_bus_to_host(dev->bus); |
3258 | struct pnv_phb *phb = hose->private_data; | |
3259 | struct pci_dn *pdn; | |
184cd4a3 | 3260 | |
db1266c8 GS |
3261 | /* The function is probably called while the PEs have |
3262 | * not be created yet. For example, resource reassignment | |
3263 | * during PCI probe period. We just skip the check if | |
3264 | * PEs isn't ready. | |
3265 | */ | |
3266 | if (!phb->initialized) | |
c88c2a18 | 3267 | return true; |
db1266c8 | 3268 | |
b72c1f65 | 3269 | pdn = pci_get_pdn(dev); |
184cd4a3 | 3270 | if (!pdn || pdn->pe_number == IODA_INVALID_PE) |
c88c2a18 | 3271 | return false; |
db1266c8 | 3272 | |
c88c2a18 | 3273 | return true; |
184cd4a3 BH |
3274 | } |
3275 | ||
c5f7700b GS |
3276 | static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group, |
3277 | int num) | |
3278 | { | |
3279 | struct pnv_ioda_pe *pe = container_of(table_group, | |
3280 | struct pnv_ioda_pe, table_group); | |
3281 | struct pnv_phb *phb = pe->phb; | |
3282 | unsigned int idx; | |
3283 | long rc; | |
3284 | ||
3285 | pe_info(pe, "Removing DMA window #%d\n", num); | |
3286 | for (idx = 0; idx < phb->ioda.dma32_count; idx++) { | |
3287 | if (phb->ioda.dma32_segmap[idx] != pe->pe_number) | |
3288 | continue; | |
3289 | ||
3290 | rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, | |
3291 | idx, 0, 0ul, 0ul, 0ul); | |
3292 | if (rc != OPAL_SUCCESS) { | |
3293 | pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n", | |
3294 | rc, idx); | |
3295 | return rc; | |
3296 | } | |
3297 | ||
3298 | phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE; | |
3299 | } | |
3300 | ||
3301 | pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); | |
3302 | return OPAL_SUCCESS; | |
3303 | } | |
3304 | ||
3305 | static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) | |
3306 | { | |
3307 | unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); | |
3308 | struct iommu_table *tbl = pe->table_group.tables[0]; | |
3309 | int64_t rc; | |
3310 | ||
3311 | if (!weight) | |
3312 | return; | |
3313 | ||
3314 | rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0); | |
3315 | if (rc != OPAL_SUCCESS) | |
3316 | return; | |
3317 | ||
a34ab7c3 | 3318 | pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false); |
c5f7700b GS |
3319 | if (pe->table_group.group) { |
3320 | iommu_group_put(pe->table_group.group); | |
3321 | WARN_ON(pe->table_group.group); | |
3322 | } | |
3323 | ||
3324 | free_pages(tbl->it_base, get_order(tbl->it_size << 3)); | |
3325 | iommu_free_table(tbl, "pnv"); | |
3326 | } | |
3327 | ||
3328 | static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) | |
3329 | { | |
3330 | struct iommu_table *tbl = pe->table_group.tables[0]; | |
3331 | unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); | |
3332 | #ifdef CONFIG_IOMMU_API | |
3333 | int64_t rc; | |
3334 | #endif | |
3335 | ||
3336 | if (!weight) | |
3337 | return; | |
3338 | ||
3339 | #ifdef CONFIG_IOMMU_API | |
3340 | rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); | |
3341 | if (rc) | |
3342 | pe_warn(pe, "OPAL error %ld release DMA window\n", rc); | |
3343 | #endif | |
3344 | ||
3345 | pnv_pci_ioda2_set_bypass(pe, false); | |
3346 | if (pe->table_group.group) { | |
3347 | iommu_group_put(pe->table_group.group); | |
3348 | WARN_ON(pe->table_group.group); | |
3349 | } | |
3350 | ||
3351 | pnv_pci_ioda2_table_free_pages(tbl); | |
3352 | iommu_free_table(tbl, "pnv"); | |
3353 | } | |
3354 | ||
3355 | static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, | |
3356 | unsigned short win, | |
3357 | unsigned int *map) | |
3358 | { | |
3359 | struct pnv_phb *phb = pe->phb; | |
3360 | int idx; | |
3361 | int64_t rc; | |
3362 | ||
3363 | for (idx = 0; idx < phb->ioda.total_pe_num; idx++) { | |
3364 | if (map[idx] != pe->pe_number) | |
3365 | continue; | |
3366 | ||
3367 | if (win == OPAL_M64_WINDOW_TYPE) | |
3368 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, | |
3369 | phb->ioda.reserved_pe_idx, win, | |
3370 | idx / PNV_IODA1_M64_SEGS, | |
3371 | idx % PNV_IODA1_M64_SEGS); | |
3372 | else | |
3373 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, | |
3374 | phb->ioda.reserved_pe_idx, win, 0, idx); | |
3375 | ||
3376 | if (rc != OPAL_SUCCESS) | |
3377 | pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n", | |
3378 | rc, win, idx); | |
3379 | ||
3380 | map[idx] = IODA_INVALID_PE; | |
3381 | } | |
3382 | } | |
3383 | ||
3384 | static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) | |
3385 | { | |
3386 | struct pnv_phb *phb = pe->phb; | |
3387 | ||
3388 | if (phb->type == PNV_PHB_IODA1) { | |
3389 | pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE, | |
3390 | phb->ioda.io_segmap); | |
3391 | pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, | |
3392 | phb->ioda.m32_segmap); | |
3393 | pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE, | |
3394 | phb->ioda.m64_segmap); | |
3395 | } else if (phb->type == PNV_PHB_IODA2) { | |
3396 | pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, | |
3397 | phb->ioda.m32_segmap); | |
3398 | } | |
3399 | } | |
3400 | ||
3401 | static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) | |
3402 | { | |
3403 | struct pnv_phb *phb = pe->phb; | |
3404 | struct pnv_ioda_pe *slave, *tmp; | |
3405 | ||
c5f7700b GS |
3406 | list_del(&pe->list); |
3407 | switch (phb->type) { | |
3408 | case PNV_PHB_IODA1: | |
3409 | pnv_pci_ioda1_release_pe_dma(pe); | |
3410 | break; | |
3411 | case PNV_PHB_IODA2: | |
3412 | pnv_pci_ioda2_release_pe_dma(pe); | |
3413 | break; | |
3414 | default: | |
3415 | WARN_ON(1); | |
3416 | } | |
3417 | ||
3418 | pnv_ioda_release_pe_seg(pe); | |
3419 | pnv_ioda_deconfigure_pe(pe->phb, pe); | |
b314427a GS |
3420 | |
3421 | /* Release slave PEs in the compound PE */ | |
3422 | if (pe->flags & PNV_IODA_PE_MASTER) { | |
3423 | list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { | |
3424 | list_del(&slave->list); | |
3425 | pnv_ioda_free_pe(slave); | |
3426 | } | |
3427 | } | |
3428 | ||
6eaed166 GS |
3429 | /* |
3430 | * The PE for root bus can be removed because of hotplug in EEH | |
3431 | * recovery for fenced PHB error. We need to mark the PE dead so | |
3432 | * that it can be populated again in PCI hot add path. The PE | |
3433 | * shouldn't be destroyed as it's the global reserved resource. | |
3434 | */ | |
3435 | if (phb->ioda.root_pe_populated && | |
3436 | phb->ioda.root_pe_idx == pe->pe_number) | |
3437 | phb->ioda.root_pe_populated = false; | |
3438 | else | |
3439 | pnv_ioda_free_pe(pe); | |
c5f7700b GS |
3440 | } |
3441 | ||
3442 | static void pnv_pci_release_device(struct pci_dev *pdev) | |
3443 | { | |
3444 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | |
3445 | struct pnv_phb *phb = hose->private_data; | |
3446 | struct pci_dn *pdn = pci_get_pdn(pdev); | |
3447 | struct pnv_ioda_pe *pe; | |
3448 | ||
3449 | if (pdev->is_virtfn) | |
3450 | return; | |
3451 | ||
3452 | if (!pdn || pdn->pe_number == IODA_INVALID_PE) | |
3453 | return; | |
3454 | ||
29bf282d GS |
3455 | /* |
3456 | * PCI hotplug can happen as part of EEH error recovery. The @pdn | |
3457 | * isn't removed and added afterwards in this scenario. We should | |
3458 | * set the PE number in @pdn to an invalid one. Otherwise, the PE's | |
3459 | * device count is decreased on removing devices while failing to | |
3460 | * be increased on adding devices. It leads to unbalanced PE's device | |
3461 | * count and eventually make normal PCI hotplug path broken. | |
3462 | */ | |
c5f7700b | 3463 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
29bf282d GS |
3464 | pdn->pe_number = IODA_INVALID_PE; |
3465 | ||
c5f7700b GS |
3466 | WARN_ON(--pe->device_count < 0); |
3467 | if (pe->device_count == 0) | |
3468 | pnv_ioda_release_pe(pe); | |
3469 | } | |
3470 | ||
7a8e6bbf | 3471 | static void pnv_pci_ioda_shutdown(struct pci_controller *hose) |
73ed148a | 3472 | { |
7a8e6bbf MN |
3473 | struct pnv_phb *phb = hose->private_data; |
3474 | ||
d1a85eee | 3475 | opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE, |
73ed148a BH |
3476 | OPAL_ASSERT_RESET); |
3477 | } | |
3478 | ||
92ae0353 | 3479 | static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { |
cb4224c5 GS |
3480 | .dma_dev_setup = pnv_pci_dma_dev_setup, |
3481 | .dma_bus_setup = pnv_pci_dma_bus_setup, | |
92ae0353 | 3482 | #ifdef CONFIG_PCI_MSI |
cb4224c5 GS |
3483 | .setup_msi_irqs = pnv_setup_msi_irqs, |
3484 | .teardown_msi_irqs = pnv_teardown_msi_irqs, | |
92ae0353 | 3485 | #endif |
cb4224c5 | 3486 | .enable_device_hook = pnv_pci_enable_device_hook, |
c5f7700b | 3487 | .release_device = pnv_pci_release_device, |
cb4224c5 | 3488 | .window_alignment = pnv_pci_window_alignment, |
ccd1c191 | 3489 | .setup_bridge = pnv_pci_setup_bridge, |
cb4224c5 GS |
3490 | .reset_secondary_bus = pnv_pci_reset_secondary_bus, |
3491 | .dma_set_mask = pnv_pci_ioda_dma_set_mask, | |
3492 | .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask, | |
3493 | .shutdown = pnv_pci_ioda_shutdown, | |
92ae0353 DA |
3494 | }; |
3495 | ||
f9f83456 AK |
3496 | static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask) |
3497 | { | |
3498 | dev_err_once(&npdev->dev, | |
3499 | "%s operation unsupported for NVLink devices\n", | |
3500 | __func__); | |
3501 | return -EPERM; | |
3502 | } | |
3503 | ||
5d2aa710 | 3504 | static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { |
cb4224c5 | 3505 | .dma_dev_setup = pnv_pci_dma_dev_setup, |
5d2aa710 | 3506 | #ifdef CONFIG_PCI_MSI |
cb4224c5 GS |
3507 | .setup_msi_irqs = pnv_setup_msi_irqs, |
3508 | .teardown_msi_irqs = pnv_teardown_msi_irqs, | |
5d2aa710 | 3509 | #endif |
cb4224c5 GS |
3510 | .enable_device_hook = pnv_pci_enable_device_hook, |
3511 | .window_alignment = pnv_pci_window_alignment, | |
3512 | .reset_secondary_bus = pnv_pci_reset_secondary_bus, | |
3513 | .dma_set_mask = pnv_npu_dma_set_mask, | |
3514 | .shutdown = pnv_pci_ioda_shutdown, | |
5d2aa710 AP |
3515 | }; |
3516 | ||
4361b034 IM |
3517 | #ifdef CONFIG_CXL_BASE |
3518 | const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = { | |
3519 | .dma_dev_setup = pnv_pci_dma_dev_setup, | |
3520 | .dma_bus_setup = pnv_pci_dma_bus_setup, | |
a2f67d5e IM |
3521 | #ifdef CONFIG_PCI_MSI |
3522 | .setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs, | |
3523 | .teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs, | |
3524 | #endif | |
4361b034 IM |
3525 | .enable_device_hook = pnv_cxl_enable_device_hook, |
3526 | .disable_device = pnv_cxl_disable_device, | |
3527 | .release_device = pnv_pci_release_device, | |
3528 | .window_alignment = pnv_pci_window_alignment, | |
3529 | .setup_bridge = pnv_pci_setup_bridge, | |
3530 | .reset_secondary_bus = pnv_pci_reset_secondary_bus, | |
3531 | .dma_set_mask = pnv_pci_ioda_dma_set_mask, | |
3532 | .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask, | |
3533 | .shutdown = pnv_pci_ioda_shutdown, | |
3534 | }; | |
3535 | #endif | |
3536 | ||
e51df2c1 AB |
3537 | static void __init pnv_pci_init_ioda_phb(struct device_node *np, |
3538 | u64 hub_id, int ioda_type) | |
184cd4a3 BH |
3539 | { |
3540 | struct pci_controller *hose; | |
184cd4a3 | 3541 | struct pnv_phb *phb; |
2b923ed1 GS |
3542 | unsigned long size, m64map_off, m32map_off, pemap_off; |
3543 | unsigned long iomap_off = 0, dma32map_off = 0; | |
fd141d1a | 3544 | struct resource r; |
c681b93c | 3545 | const __be64 *prop64; |
3a1a4661 | 3546 | const __be32 *prop32; |
f1b7cc3e | 3547 | int len; |
3fa23ff8 | 3548 | unsigned int segno; |
184cd4a3 BH |
3549 | u64 phb_id; |
3550 | void *aux; | |
3551 | long rc; | |
3552 | ||
08a45b32 BH |
3553 | if (!of_device_is_available(np)) |
3554 | return; | |
3555 | ||
9497a1c1 GS |
3556 | pr_info("Initializing %s PHB (%s)\n", |
3557 | pnv_phb_names[ioda_type], of_node_full_name(np)); | |
184cd4a3 BH |
3558 | |
3559 | prop64 = of_get_property(np, "ibm,opal-phbid", NULL); | |
3560 | if (!prop64) { | |
3561 | pr_err(" Missing \"ibm,opal-phbid\" property !\n"); | |
3562 | return; | |
3563 | } | |
3564 | phb_id = be64_to_cpup(prop64); | |
3565 | pr_debug(" PHB-ID : 0x%016llx\n", phb_id); | |
3566 | ||
e39f223f | 3567 | phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0); |
58d714ec GS |
3568 | |
3569 | /* Allocate PCI controller */ | |
58d714ec GS |
3570 | phb->hose = hose = pcibios_alloc_controller(np); |
3571 | if (!phb->hose) { | |
3572 | pr_err(" Can't allocate PCI controller for %s\n", | |
184cd4a3 | 3573 | np->full_name); |
e39f223f | 3574 | memblock_free(__pa(phb), sizeof(struct pnv_phb)); |
184cd4a3 BH |
3575 | return; |
3576 | } | |
3577 | ||
3578 | spin_lock_init(&phb->lock); | |
f1b7cc3e GS |
3579 | prop32 = of_get_property(np, "bus-range", &len); |
3580 | if (prop32 && len == 8) { | |
3a1a4661 BH |
3581 | hose->first_busno = be32_to_cpu(prop32[0]); |
3582 | hose->last_busno = be32_to_cpu(prop32[1]); | |
f1b7cc3e GS |
3583 | } else { |
3584 | pr_warn(" Broken <bus-range> on %s\n", np->full_name); | |
3585 | hose->first_busno = 0; | |
3586 | hose->last_busno = 0xff; | |
3587 | } | |
184cd4a3 | 3588 | hose->private_data = phb; |
e9cc17d4 | 3589 | phb->hub_id = hub_id; |
184cd4a3 | 3590 | phb->opal_id = phb_id; |
aa0c033f | 3591 | phb->type = ioda_type; |
781a868f | 3592 | mutex_init(&phb->ioda.pe_alloc_mutex); |
184cd4a3 | 3593 | |
cee72d5b BH |
3594 | /* Detect specific models for error handling */ |
3595 | if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) | |
3596 | phb->model = PNV_PHB_MODEL_P7IOC; | |
f3d40c25 | 3597 | else if (of_device_is_compatible(np, "ibm,power8-pciex")) |
aa0c033f | 3598 | phb->model = PNV_PHB_MODEL_PHB3; |
5d2aa710 AP |
3599 | else if (of_device_is_compatible(np, "ibm,power8-npu-pciex")) |
3600 | phb->model = PNV_PHB_MODEL_NPU; | |
cee72d5b BH |
3601 | else |
3602 | phb->model = PNV_PHB_MODEL_UNKNOWN; | |
3603 | ||
aa0c033f | 3604 | /* Parse 32-bit and IO ranges (if any) */ |
2f1ec02e | 3605 | pci_process_bridge_OF_ranges(hose, np, !hose->global_number); |
184cd4a3 | 3606 | |
aa0c033f | 3607 | /* Get registers */ |
fd141d1a BH |
3608 | if (!of_address_to_resource(np, 0, &r)) { |
3609 | phb->regs_phys = r.start; | |
3610 | phb->regs = ioremap(r.start, resource_size(&r)); | |
3611 | if (phb->regs == NULL) | |
3612 | pr_err(" Failed to map registers !\n"); | |
3613 | } | |
577c8c88 | 3614 | |
184cd4a3 | 3615 | /* Initialize more IODA stuff */ |
92b8f137 | 3616 | phb->ioda.total_pe_num = 1; |
aa0c033f | 3617 | prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); |
36954dc7 | 3618 | if (prop32) |
92b8f137 | 3619 | phb->ioda.total_pe_num = be32_to_cpup(prop32); |
36954dc7 GS |
3620 | prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); |
3621 | if (prop32) | |
92b8f137 | 3622 | phb->ioda.reserved_pe_idx = be32_to_cpup(prop32); |
262af557 | 3623 | |
c127562a GS |
3624 | /* Invalidate RID to PE# mapping */ |
3625 | for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++) | |
3626 | phb->ioda.pe_rmap[segno] = IODA_INVALID_PE; | |
3627 | ||
262af557 GC |
3628 | /* Parse 64-bit MMIO range */ |
3629 | pnv_ioda_parse_m64_window(phb); | |
3630 | ||
184cd4a3 | 3631 | phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); |
aa0c033f | 3632 | /* FW Has already off top 64k of M32 space (MSI space) */ |
184cd4a3 BH |
3633 | phb->ioda.m32_size += 0x10000; |
3634 | ||
92b8f137 | 3635 | phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num; |
3fd47f06 | 3636 | phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; |
184cd4a3 | 3637 | phb->ioda.io_size = hose->pci_io_size; |
92b8f137 | 3638 | phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num; |
184cd4a3 BH |
3639 | phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ |
3640 | ||
2b923ed1 GS |
3641 | /* Calculate how many 32-bit TCE segments we have */ |
3642 | phb->ioda.dma32_count = phb->ioda.m32_pci_base / | |
3643 | PNV_IODA1_DMA32_SEGSIZE; | |
3644 | ||
c35d2a8c | 3645 | /* Allocate aux data & arrays. We don't have IO ports on PHB3 */ |
92a86756 AK |
3646 | size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, |
3647 | sizeof(unsigned long)); | |
93289d8c GS |
3648 | m64map_off = size; |
3649 | size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); | |
184cd4a3 | 3650 | m32map_off = size; |
92b8f137 | 3651 | size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]); |
c35d2a8c GS |
3652 | if (phb->type == PNV_PHB_IODA1) { |
3653 | iomap_off = size; | |
92b8f137 | 3654 | size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]); |
2b923ed1 GS |
3655 | dma32map_off = size; |
3656 | size += phb->ioda.dma32_count * | |
3657 | sizeof(phb->ioda.dma32_segmap[0]); | |
c35d2a8c | 3658 | } |
184cd4a3 | 3659 | pemap_off = size; |
92b8f137 | 3660 | size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); |
e39f223f | 3661 | aux = memblock_virt_alloc(size, 0); |
184cd4a3 | 3662 | phb->ioda.pe_alloc = aux; |
93289d8c | 3663 | phb->ioda.m64_segmap = aux + m64map_off; |
184cd4a3 | 3664 | phb->ioda.m32_segmap = aux + m32map_off; |
93289d8c GS |
3665 | for (segno = 0; segno < phb->ioda.total_pe_num; segno++) { |
3666 | phb->ioda.m64_segmap[segno] = IODA_INVALID_PE; | |
3fa23ff8 | 3667 | phb->ioda.m32_segmap[segno] = IODA_INVALID_PE; |
93289d8c | 3668 | } |
3fa23ff8 | 3669 | if (phb->type == PNV_PHB_IODA1) { |
c35d2a8c | 3670 | phb->ioda.io_segmap = aux + iomap_off; |
3fa23ff8 GS |
3671 | for (segno = 0; segno < phb->ioda.total_pe_num; segno++) |
3672 | phb->ioda.io_segmap[segno] = IODA_INVALID_PE; | |
2b923ed1 GS |
3673 | |
3674 | phb->ioda.dma32_segmap = aux + dma32map_off; | |
3675 | for (segno = 0; segno < phb->ioda.dma32_count; segno++) | |
3676 | phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE; | |
3fa23ff8 | 3677 | } |
184cd4a3 | 3678 | phb->ioda.pe_array = aux + pemap_off; |
63803c39 GS |
3679 | |
3680 | /* | |
3681 | * Choose PE number for root bus, which shouldn't have | |
3682 | * M64 resources consumed by its child devices. To pick | |
3683 | * the PE number adjacent to the reserved one if possible. | |
3684 | */ | |
3685 | pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx); | |
3686 | if (phb->ioda.reserved_pe_idx == 0) { | |
3687 | phb->ioda.root_pe_idx = 1; | |
3688 | pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); | |
3689 | } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) { | |
3690 | phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1; | |
3691 | pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); | |
3692 | } else { | |
3693 | phb->ioda.root_pe_idx = IODA_INVALID_PE; | |
3694 | } | |
184cd4a3 BH |
3695 | |
3696 | INIT_LIST_HEAD(&phb->ioda.pe_list); | |
781a868f | 3697 | mutex_init(&phb->ioda.pe_list_mutex); |
184cd4a3 BH |
3698 | |
3699 | /* Calculate how many 32-bit TCE segments we have */ | |
2b923ed1 | 3700 | phb->ioda.dma32_count = phb->ioda.m32_pci_base / |
acce971c | 3701 | PNV_IODA1_DMA32_SEGSIZE; |
184cd4a3 | 3702 | |
aa0c033f | 3703 | #if 0 /* We should really do that ... */ |
184cd4a3 BH |
3704 | rc = opal_pci_set_phb_mem_window(opal->phb_id, |
3705 | window_type, | |
3706 | window_num, | |
3707 | starting_real_address, | |
3708 | starting_pci_address, | |
3709 | segment_size); | |
3710 | #endif | |
3711 | ||
262af557 | 3712 | pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n", |
92b8f137 | 3713 | phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx, |
262af557 GC |
3714 | phb->ioda.m32_size, phb->ioda.m32_segsize); |
3715 | if (phb->ioda.m64_size) | |
3716 | pr_info(" M64: 0x%lx [segment=0x%lx]\n", | |
3717 | phb->ioda.m64_size, phb->ioda.m64_segsize); | |
3718 | if (phb->ioda.io_size) | |
3719 | pr_info(" IO: 0x%x [segment=0x%x]\n", | |
3720 | phb->ioda.io_size, phb->ioda.io_segsize); | |
3721 | ||
184cd4a3 | 3722 | |
184cd4a3 | 3723 | phb->hose->ops = &pnv_pci_ops; |
49dec922 GS |
3724 | phb->get_pe_state = pnv_ioda_get_pe_state; |
3725 | phb->freeze_pe = pnv_ioda_freeze_pe; | |
3726 | phb->unfreeze_pe = pnv_ioda_unfreeze_pe; | |
184cd4a3 | 3727 | |
184cd4a3 BH |
3728 | /* Setup MSI support */ |
3729 | pnv_pci_init_ioda_msis(phb); | |
3730 | ||
c40a4210 GS |
3731 | /* |
3732 | * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here | |
3733 | * to let the PCI core do resource assignment. It's supposed | |
3734 | * that the PCI core will do correct I/O and MMIO alignment | |
3735 | * for the P2P bridge bars so that each PCI bus (excluding | |
3736 | * the child P2P bridges) can form individual PE. | |
184cd4a3 | 3737 | */ |
fb446ad0 | 3738 | ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; |
5d2aa710 | 3739 | |
f9f83456 | 3740 | if (phb->type == PNV_PHB_NPU) { |
5d2aa710 | 3741 | hose->controller_ops = pnv_npu_ioda_controller_ops; |
f9f83456 AK |
3742 | } else { |
3743 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | |
5d2aa710 | 3744 | hose->controller_ops = pnv_pci_ioda_controller_ops; |
f9f83456 | 3745 | } |
ad30cb99 | 3746 | |
6e628c7d WY |
3747 | #ifdef CONFIG_PCI_IOV |
3748 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; | |
5350ab3f | 3749 | ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; |
ad30cb99 ME |
3750 | #endif |
3751 | ||
c40a4210 | 3752 | pci_add_flags(PCI_REASSIGN_ALL_RSRC); |
184cd4a3 BH |
3753 | |
3754 | /* Reset IODA tables to a clean state */ | |
d1a85eee | 3755 | rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET); |
184cd4a3 | 3756 | if (rc) |
f11fe552 | 3757 | pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); |
361f2a2a GS |
3758 | |
3759 | /* If we're running in kdump kerenl, the previous kerenl never | |
3760 | * shutdown PCI devices correctly. We already got IODA table | |
3761 | * cleaned out. So we have to issue PHB reset to stop all PCI | |
3762 | * transactions from previous kerenl. | |
3763 | */ | |
3764 | if (is_kdump_kernel()) { | |
3765 | pr_info(" Issue PHB reset ...\n"); | |
cadf364d GS |
3766 | pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); |
3767 | pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); | |
361f2a2a | 3768 | } |
262af557 | 3769 | |
9e9e8935 GS |
3770 | /* Remove M64 resource if we can't configure it successfully */ |
3771 | if (!phb->init_m64 || phb->init_m64(phb)) | |
262af557 | 3772 | hose->mem_resources[1].flags = 0; |
aa0c033f GS |
3773 | } |
3774 | ||
67975005 | 3775 | void __init pnv_pci_init_ioda2_phb(struct device_node *np) |
aa0c033f | 3776 | { |
e9cc17d4 | 3777 | pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); |
184cd4a3 BH |
3778 | } |
3779 | ||
5d2aa710 AP |
3780 | void __init pnv_pci_init_npu_phb(struct device_node *np) |
3781 | { | |
3782 | pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU); | |
3783 | } | |
3784 | ||
184cd4a3 BH |
3785 | void __init pnv_pci_init_ioda_hub(struct device_node *np) |
3786 | { | |
3787 | struct device_node *phbn; | |
c681b93c | 3788 | const __be64 *prop64; |
184cd4a3 BH |
3789 | u64 hub_id; |
3790 | ||
3791 | pr_info("Probing IODA IO-Hub %s\n", np->full_name); | |
3792 | ||
3793 | prop64 = of_get_property(np, "ibm,opal-hubid", NULL); | |
3794 | if (!prop64) { | |
3795 | pr_err(" Missing \"ibm,opal-hubid\" property !\n"); | |
3796 | return; | |
3797 | } | |
3798 | hub_id = be64_to_cpup(prop64); | |
3799 | pr_devel(" HUB-ID : 0x%016llx\n", hub_id); | |
3800 | ||
3801 | /* Count child PHBs */ | |
3802 | for_each_child_of_node(np, phbn) { | |
3803 | /* Look for IODA1 PHBs */ | |
3804 | if (of_device_is_compatible(phbn, "ibm,ioda-phb")) | |
e9cc17d4 | 3805 | pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1); |
184cd4a3 BH |
3806 | } |
3807 | } |