]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
3c8c90ab LV |
2 | * Copyright IBM Corporation 2001, 2005, 2006 |
3 | * Copyright Dave Engebretsen & Todd Inglett 2001 | |
4 | * Copyright Linas Vepstas 2005, 2006 | |
cb3bc9d0 | 5 | * Copyright 2001-2012 IBM Corporation. |
69376502 | 6 | * |
1da177e4 LT |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
69376502 | 11 | * |
1da177e4 LT |
12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
69376502 | 16 | * |
1da177e4 LT |
17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
3c8c90ab LV |
20 | * |
21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> | |
1da177e4 LT |
22 | */ |
23 | ||
6dee3fb9 | 24 | #include <linux/delay.h> |
7f52a526 | 25 | #include <linux/debugfs.h> |
cb3bc9d0 | 26 | #include <linux/sched.h> |
1da177e4 LT |
27 | #include <linux/init.h> |
28 | #include <linux/list.h> | |
1da177e4 | 29 | #include <linux/pci.h> |
a3032ca9 | 30 | #include <linux/iommu.h> |
1da177e4 LT |
31 | #include <linux/proc_fs.h> |
32 | #include <linux/rbtree.h> | |
66f9af83 | 33 | #include <linux/reboot.h> |
1da177e4 LT |
34 | #include <linux/seq_file.h> |
35 | #include <linux/spinlock.h> | |
66b15db6 | 36 | #include <linux/export.h> |
acaa6176 SR |
37 | #include <linux/of.h> |
38 | ||
60063497 | 39 | #include <linux/atomic.h> |
1e54b938 | 40 | #include <asm/debug.h> |
1da177e4 | 41 | #include <asm/eeh.h> |
172ca926 | 42 | #include <asm/eeh_event.h> |
1da177e4 | 43 | #include <asm/io.h> |
212d16cd | 44 | #include <asm/iommu.h> |
1da177e4 | 45 | #include <asm/machdep.h> |
172ca926 | 46 | #include <asm/ppc-pci.h> |
1da177e4 | 47 | #include <asm/rtas.h> |
1da177e4 | 48 | |
1da177e4 LT |
49 | |
50 | /** Overview: | |
8ee26530 | 51 | * EEH, or "Enhanced Error Handling" is a PCI bridge technology for |
1da177e4 LT |
52 | * dealing with PCI bus errors that can't be dealt with within the |
53 | * usual PCI framework, except by check-stopping the CPU. Systems | |
54 | * that are designed for high-availability/reliability cannot afford | |
55 | * to crash due to a "mere" PCI error, thus the need for EEH. | |
56 | * An EEH-capable bridge operates by converting a detected error | |
57 | * into a "slot freeze", taking the PCI adapter off-line, making | |
58 | * the slot behave, from the OS'es point of view, as if the slot | |
59 | * were "empty": all reads return 0xff's and all writes are silently | |
60 | * ignored. EEH slot isolation events can be triggered by parity | |
61 | * errors on the address or data busses (e.g. during posted writes), | |
69376502 LV |
62 | * which in turn might be caused by low voltage on the bus, dust, |
63 | * vibration, humidity, radioactivity or plain-old failed hardware. | |
1da177e4 LT |
64 | * |
65 | * Note, however, that one of the leading causes of EEH slot | |
66 | * freeze events are buggy device drivers, buggy device microcode, | |
67 | * or buggy device hardware. This is because any attempt by the | |
68 | * device to bus-master data to a memory address that is not | |
69 | * assigned to the device will trigger a slot freeze. (The idea | |
70 | * is to prevent devices-gone-wild from corrupting system memory). | |
71 | * Buggy hardware/drivers will have a miserable time co-existing | |
72 | * with EEH. | |
73 | * | |
74 | * Ideally, a PCI device driver, when suspecting that an isolation | |
25985edc | 75 | * event has occurred (e.g. by reading 0xff's), will then ask EEH |
1da177e4 LT |
76 | * whether this is the case, and then take appropriate steps to |
77 | * reset the PCI slot, the PCI device, and then resume operations. | |
78 | * However, until that day, the checking is done here, with the | |
79 | * eeh_check_failure() routine embedded in the MMIO macros. If | |
80 | * the slot is found to be isolated, an "EEH Event" is synthesized | |
81 | * and sent out for processing. | |
82 | */ | |
83 | ||
5c1344e9 | 84 | /* If a device driver keeps reading an MMIO register in an interrupt |
f36c5227 MM |
85 | * handler after a slot isolation event, it might be broken. |
86 | * This sets the threshold for how many read attempts we allow | |
87 | * before printing an error message. | |
1da177e4 | 88 | */ |
2fd30be8 | 89 | #define EEH_MAX_FAILS 2100000 |
1da177e4 | 90 | |
17213c3b | 91 | /* Time to wait for a PCI slot to report status, in milliseconds */ |
fb48dc22 | 92 | #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000) |
9c547768 | 93 | |
d7bb8862 | 94 | /* |
8a5ad356 GS |
95 | * EEH probe mode support, which is part of the flags, |
96 | * is to support multiple platforms for EEH. Some platforms | |
97 | * like pSeries do PCI emunation based on device tree. | |
98 | * However, other platforms like powernv probe PCI devices | |
99 | * from hardware. The flag is used to distinguish that. | |
100 | * In addition, struct eeh_ops::probe would be invoked for | |
101 | * particular OF node or PCI device so that the corresponding | |
102 | * PE would be created there. | |
d7bb8862 | 103 | */ |
8a5ad356 GS |
104 | int eeh_subsystem_flags; |
105 | EXPORT_SYMBOL(eeh_subsystem_flags); | |
106 | ||
1b28f170 GS |
107 | /* |
108 | * EEH allowed maximal frozen times. If one particular PE's | |
109 | * frozen count in last hour exceeds this limit, the PE will | |
110 | * be forced to be offline permanently. | |
111 | */ | |
112 | int eeh_max_freezes = 5; | |
113 | ||
8a5ad356 GS |
114 | /* Platform dependent EEH operations */ |
115 | struct eeh_ops *eeh_ops = NULL; | |
d7bb8862 | 116 | |
fd761fd8 | 117 | /* Lock to avoid races due to multiple reports of an error */ |
4907581d | 118 | DEFINE_RAW_SPINLOCK(confirm_error_lock); |
35066c0d | 119 | EXPORT_SYMBOL_GPL(confirm_error_lock); |
fd761fd8 | 120 | |
212d16cd GS |
121 | /* Lock to protect passed flags */ |
122 | static DEFINE_MUTEX(eeh_dev_mutex); | |
123 | ||
17213c3b LV |
124 | /* Buffer for reporting pci register dumps. Its here in BSS, and |
125 | * not dynamically alloced, so that it ends up in RMO where RTAS | |
126 | * can access it. | |
127 | */ | |
f2e0be5e | 128 | #define EEH_PCI_REGS_LOG_LEN 8192 |
d99bb1db LV |
129 | static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN]; |
130 | ||
e575f8db GS |
131 | /* |
132 | * The struct is used to maintain the EEH global statistic | |
133 | * information. Besides, the EEH global statistics will be | |
134 | * exported to user space through procfs | |
135 | */ | |
136 | struct eeh_stats { | |
137 | u64 no_device; /* PCI device not found */ | |
138 | u64 no_dn; /* OF node not found */ | |
139 | u64 no_cfg_addr; /* Config address not found */ | |
140 | u64 ignored_check; /* EEH check skipped */ | |
141 | u64 total_mmio_ffs; /* Total EEH checks */ | |
142 | u64 false_positives; /* Unnecessary EEH checks */ | |
143 | u64 slot_resets; /* PE reset */ | |
144 | }; | |
145 | ||
146 | static struct eeh_stats eeh_stats; | |
1da177e4 | 147 | |
7f52a526 GS |
148 | static int __init eeh_setup(char *str) |
149 | { | |
150 | if (!strcmp(str, "off")) | |
05b1721d | 151 | eeh_add_flag(EEH_FORCE_DISABLED); |
a450e8f5 GS |
152 | else if (!strcmp(str, "early_log")) |
153 | eeh_add_flag(EEH_EARLY_DUMP_LOG); | |
7f52a526 GS |
154 | |
155 | return 1; | |
156 | } | |
157 | __setup("eeh=", eeh_setup); | |
158 | ||
f2e0be5e GS |
159 | /* |
160 | * This routine captures assorted PCI configuration space data | |
161 | * for the indicated PCI device, and puts them into a buffer | |
162 | * for RTAS error logging. | |
d99bb1db | 163 | */ |
f2e0be5e | 164 | static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) |
d99bb1db | 165 | { |
0bd78587 | 166 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); |
d99bb1db | 167 | u32 cfg; |
fcf9892b | 168 | int cap, i; |
0ed352dd GS |
169 | int n = 0, l = 0; |
170 | char buffer[128]; | |
d99bb1db | 171 | |
10560b9a | 172 | n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", |
0bd78587 GS |
173 | edev->phb->global_number, pdn->busno, |
174 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | |
10560b9a | 175 | pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", |
0bd78587 GS |
176 | edev->phb->global_number, pdn->busno, |
177 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | |
fcf9892b | 178 | |
0bd78587 | 179 | eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg); |
fcf9892b | 180 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); |
2d86c385 | 181 | pr_warn("EEH: PCI device/vendor: %08x\n", cfg); |
fcf9892b | 182 | |
0bd78587 | 183 | eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg); |
d99bb1db | 184 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); |
2d86c385 | 185 | pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); |
fcf9892b | 186 | |
0b9369f4 | 187 | /* Gather bridge-specific registers */ |
2a18dfc6 | 188 | if (edev->mode & EEH_DEV_BRIDGE) { |
0bd78587 | 189 | eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg); |
0b9369f4 | 190 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); |
2d86c385 | 191 | pr_warn("EEH: Bridge secondary status: %04x\n", cfg); |
0b9369f4 | 192 | |
0bd78587 | 193 | eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg); |
0b9369f4 | 194 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); |
2d86c385 | 195 | pr_warn("EEH: Bridge control: %04x\n", cfg); |
0b9369f4 LV |
196 | } |
197 | ||
fcf9892b | 198 | /* Dump out the PCI-X command and status regs */ |
2a18dfc6 | 199 | cap = edev->pcix_cap; |
fcf9892b | 200 | if (cap) { |
0bd78587 | 201 | eeh_ops->read_config(pdn, cap, 4, &cfg); |
fcf9892b | 202 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); |
2d86c385 | 203 | pr_warn("EEH: PCI-X cmd: %08x\n", cfg); |
fcf9892b | 204 | |
0bd78587 | 205 | eeh_ops->read_config(pdn, cap+4, 4, &cfg); |
fcf9892b | 206 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); |
2d86c385 | 207 | pr_warn("EEH: PCI-X status: %08x\n", cfg); |
fcf9892b LV |
208 | } |
209 | ||
2a18dfc6 GS |
210 | /* If PCI-E capable, dump PCI-E cap 10 */ |
211 | cap = edev->pcie_cap; | |
212 | if (cap) { | |
fcf9892b | 213 | n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); |
2d86c385 | 214 | pr_warn("EEH: PCI-E capabilities and status follow:\n"); |
fcf9892b LV |
215 | |
216 | for (i=0; i<=8; i++) { | |
0bd78587 | 217 | eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); |
fcf9892b | 218 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
0ed352dd GS |
219 | |
220 | if ((i % 4) == 0) { | |
221 | if (i != 0) | |
222 | pr_warn("%s\n", buffer); | |
223 | ||
224 | l = scnprintf(buffer, sizeof(buffer), | |
225 | "EEH: PCI-E %02x: %08x ", | |
226 | 4*i, cfg); | |
227 | } else { | |
228 | l += scnprintf(buffer+l, sizeof(buffer)-l, | |
229 | "%08x ", cfg); | |
230 | } | |
231 | ||
fcf9892b | 232 | } |
0ed352dd GS |
233 | |
234 | pr_warn("%s\n", buffer); | |
2a18dfc6 | 235 | } |
fcf9892b | 236 | |
2a18dfc6 GS |
237 | /* If AER capable, dump it */ |
238 | cap = edev->aer_cap; | |
239 | if (cap) { | |
240 | n += scnprintf(buf+n, len-n, "pci-e AER:\n"); | |
241 | pr_warn("EEH: PCI-E AER capability register set follows:\n"); | |
242 | ||
0ed352dd | 243 | for (i=0; i<=13; i++) { |
0bd78587 | 244 | eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); |
2a18dfc6 | 245 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
0ed352dd GS |
246 | |
247 | if ((i % 4) == 0) { | |
248 | if (i != 0) | |
249 | pr_warn("%s\n", buffer); | |
250 | ||
251 | l = scnprintf(buffer, sizeof(buffer), | |
252 | "EEH: PCI-E AER %02x: %08x ", | |
253 | 4*i, cfg); | |
254 | } else { | |
255 | l += scnprintf(buffer+l, sizeof(buffer)-l, | |
256 | "%08x ", cfg); | |
257 | } | |
fcf9892b | 258 | } |
0ed352dd GS |
259 | |
260 | pr_warn("%s\n", buffer); | |
fcf9892b | 261 | } |
0b9369f4 | 262 | |
d99bb1db LV |
263 | return n; |
264 | } | |
265 | ||
f2e0be5e GS |
266 | static void *eeh_dump_pe_log(void *data, void *flag) |
267 | { | |
268 | struct eeh_pe *pe = data; | |
269 | struct eeh_dev *edev, *tmp; | |
270 | size_t *plen = flag; | |
271 | ||
272 | eeh_pe_for_each_dev(pe, edev, tmp) | |
273 | *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, | |
274 | EEH_PCI_REGS_LOG_LEN - *plen); | |
275 | ||
276 | return NULL; | |
277 | } | |
278 | ||
cb3bc9d0 GS |
279 | /** |
280 | * eeh_slot_error_detail - Generate combined log including driver log and error log | |
ff477966 | 281 | * @pe: EEH PE |
cb3bc9d0 GS |
282 | * @severity: temporary or permanent error log |
283 | * | |
284 | * This routine should be called to generate the combined log, which | |
285 | * is comprised of driver log and error log. The driver log is figured | |
286 | * out from the config space of the corresponding PCI device, while | |
287 | * the error log is fetched through platform dependent function call. | |
288 | */ | |
ff477966 | 289 | void eeh_slot_error_detail(struct eeh_pe *pe, int severity) |
d99bb1db LV |
290 | { |
291 | size_t loglen = 0; | |
d99bb1db | 292 | |
c35ae179 GS |
293 | /* |
294 | * When the PHB is fenced or dead, it's pointless to collect | |
295 | * the data from PCI config space because it should return | |
296 | * 0xFF's. For ER, we still retrieve the data from the PCI | |
297 | * config space. | |
78954700 GS |
298 | * |
299 | * For pHyp, we have to enable IO for log retrieval. Otherwise, | |
300 | * 0xFF's is always returned from PCI config space. | |
c35ae179 | 301 | */ |
9e049375 | 302 | if (!(pe->type & EEH_PE_PHB)) { |
dc561fb9 | 303 | if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) |
78954700 | 304 | eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); |
25980013 GS |
305 | |
306 | /* | |
307 | * The config space of some PCI devices can't be accessed | |
308 | * when their PEs are in frozen state. Otherwise, fenced | |
309 | * PHB might be seen. Those PEs are identified with flag | |
310 | * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED | |
311 | * is set automatically when the PE is put to EEH_PE_ISOLATED. | |
312 | * | |
313 | * Restoring BARs possibly triggers PCI config access in | |
314 | * (OPAL) firmware and then causes fenced PHB. If the | |
315 | * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's | |
316 | * pointless to restore BARs and dump config space. | |
317 | */ | |
c35ae179 | 318 | eeh_ops->configure_bridge(pe); |
25980013 GS |
319 | if (!(pe->state & EEH_PE_CFG_BLOCKED)) { |
320 | eeh_pe_restore_bars(pe); | |
c35ae179 | 321 | |
25980013 GS |
322 | pci_regs_buf[0] = 0; |
323 | eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); | |
324 | } | |
c35ae179 | 325 | } |
ff477966 GS |
326 | |
327 | eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); | |
d99bb1db LV |
328 | } |
329 | ||
1da177e4 | 330 | /** |
cb3bc9d0 GS |
331 | * eeh_token_to_phys - Convert EEH address token to phys address |
332 | * @token: I/O token, should be address in the form 0xA.... | |
333 | * | |
334 | * This routine should be called to convert virtual I/O address | |
335 | * to physical one. | |
1da177e4 LT |
336 | */ |
337 | static inline unsigned long eeh_token_to_phys(unsigned long token) | |
338 | { | |
339 | pte_t *ptep; | |
340 | unsigned long pa; | |
12bc9f6f | 341 | int hugepage_shift; |
1da177e4 | 342 | |
12bc9f6f | 343 | /* |
691e95fd AK |
344 | * We won't find hugepages here(this is iomem). Hence we are not |
345 | * worried about _PAGE_SPLITTING/collapse. Also we will not hit | |
346 | * page table free, because of init_mm. | |
12bc9f6f | 347 | */ |
891121e6 AK |
348 | ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, |
349 | NULL, &hugepage_shift); | |
1da177e4 LT |
350 | if (!ptep) |
351 | return token; | |
12bc9f6f | 352 | WARN_ON(hugepage_shift); |
1da177e4 LT |
353 | pa = pte_pfn(*ptep) << PAGE_SHIFT; |
354 | ||
355 | return pa | (token & (PAGE_SIZE-1)); | |
356 | } | |
357 | ||
b95cd2cd GS |
358 | /* |
359 | * On PowerNV platform, we might already have fenced PHB there. | |
360 | * For that case, it's meaningless to recover frozen PE. Intead, | |
361 | * We have to handle fenced PHB firstly. | |
362 | */ | |
363 | static int eeh_phb_check_failure(struct eeh_pe *pe) | |
364 | { | |
365 | struct eeh_pe *phb_pe; | |
366 | unsigned long flags; | |
367 | int ret; | |
368 | ||
05b1721d | 369 | if (!eeh_has_flag(EEH_PROBE_MODE_DEV)) |
b95cd2cd GS |
370 | return -EPERM; |
371 | ||
372 | /* Find the PHB PE */ | |
373 | phb_pe = eeh_phb_pe_get(pe->phb); | |
374 | if (!phb_pe) { | |
1f52f176 | 375 | pr_warn("%s Can't find PE for PHB#%x\n", |
0dae2743 | 376 | __func__, pe->phb->global_number); |
b95cd2cd GS |
377 | return -EEXIST; |
378 | } | |
379 | ||
380 | /* If the PHB has been in problematic state */ | |
381 | eeh_serialize_lock(&flags); | |
9e049375 | 382 | if (phb_pe->state & EEH_PE_ISOLATED) { |
b95cd2cd GS |
383 | ret = 0; |
384 | goto out; | |
385 | } | |
386 | ||
387 | /* Check PHB state */ | |
388 | ret = eeh_ops->get_state(phb_pe, NULL); | |
389 | if ((ret < 0) || | |
390 | (ret == EEH_STATE_NOT_SUPPORT) || | |
391 | (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == | |
392 | (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { | |
393 | ret = 0; | |
394 | goto out; | |
395 | } | |
396 | ||
397 | /* Isolate the PHB and send event */ | |
398 | eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); | |
399 | eeh_serialize_unlock(flags); | |
b95cd2cd | 400 | |
357b2f3d GS |
401 | pr_err("EEH: PHB#%x failure detected, location: %s\n", |
402 | phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe)); | |
56ca4fde | 403 | dump_stack(); |
5293bf97 | 404 | eeh_send_failure_event(phb_pe); |
b95cd2cd GS |
405 | |
406 | return 1; | |
407 | out: | |
408 | eeh_serialize_unlock(flags); | |
409 | return ret; | |
410 | } | |
411 | ||
1da177e4 | 412 | /** |
f8f7d63f GS |
413 | * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze |
414 | * @edev: eeh device | |
1da177e4 LT |
415 | * |
416 | * Check for an EEH failure for the given device node. Call this | |
417 | * routine if the result of a read was all 0xff's and you want to | |
418 | * find out if this is due to an EEH slot freeze. This routine | |
419 | * will query firmware for the EEH status. | |
420 | * | |
421 | * Returns 0 if there has not been an EEH error; otherwise returns | |
69376502 | 422 | * a non-zero value and queues up a slot isolation event notification. |
1da177e4 LT |
423 | * |
424 | * It is safe to call this routine in an interrupt context. | |
425 | */ | |
f8f7d63f | 426 | int eeh_dev_check_failure(struct eeh_dev *edev) |
1da177e4 LT |
427 | { |
428 | int ret; | |
1ad7a72c | 429 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); |
1da177e4 | 430 | unsigned long flags; |
c6406d8f | 431 | struct pci_dn *pdn; |
f8f7d63f | 432 | struct pci_dev *dev; |
357b2f3d | 433 | struct eeh_pe *pe, *parent_pe, *phb_pe; |
fd761fd8 | 434 | int rc = 0; |
c6406d8f | 435 | const char *location = NULL; |
1da177e4 | 436 | |
e575f8db | 437 | eeh_stats.total_mmio_ffs++; |
1da177e4 | 438 | |
2ec5a0ad | 439 | if (!eeh_enabled()) |
1da177e4 LT |
440 | return 0; |
441 | ||
f8f7d63f | 442 | if (!edev) { |
e575f8db | 443 | eeh_stats.no_dn++; |
1da177e4 | 444 | return 0; |
177bc936 | 445 | } |
f8f7d63f | 446 | dev = eeh_dev_to_pci_dev(edev); |
2a58222f | 447 | pe = eeh_dev_to_pe(edev); |
1da177e4 LT |
448 | |
449 | /* Access to IO BARs might get this far and still not want checking. */ | |
66523d9f | 450 | if (!pe) { |
e575f8db | 451 | eeh_stats.ignored_check++; |
c6406d8f GS |
452 | pr_debug("EEH: Ignored check for %s\n", |
453 | eeh_pci_name(dev)); | |
1da177e4 LT |
454 | return 0; |
455 | } | |
456 | ||
66523d9f | 457 | if (!pe->addr && !pe->config_addr) { |
e575f8db | 458 | eeh_stats.no_cfg_addr++; |
1da177e4 LT |
459 | return 0; |
460 | } | |
461 | ||
b95cd2cd GS |
462 | /* |
463 | * On PowerNV platform, we might already have fenced PHB | |
464 | * there and we need take care of that firstly. | |
465 | */ | |
466 | ret = eeh_phb_check_failure(pe); | |
467 | if (ret > 0) | |
468 | return ret; | |
469 | ||
05ec424e GS |
470 | /* |
471 | * If the PE isn't owned by us, we shouldn't check the | |
472 | * state. Instead, let the owner handle it if the PE has | |
473 | * been frozen. | |
474 | */ | |
475 | if (eeh_pe_passed(pe)) | |
476 | return 0; | |
477 | ||
fd761fd8 LV |
478 | /* If we already have a pending isolation event for this |
479 | * slot, we know it's bad already, we don't need to check. | |
480 | * Do this checking under a lock; as multiple PCI devices | |
481 | * in one slot might report errors simultaneously, and we | |
482 | * only want one error recovery routine running. | |
1da177e4 | 483 | */ |
4907581d | 484 | eeh_serialize_lock(&flags); |
fd761fd8 | 485 | rc = 1; |
66523d9f GS |
486 | if (pe->state & EEH_PE_ISOLATED) { |
487 | pe->check_count++; | |
488 | if (pe->check_count % EEH_MAX_FAILS == 0) { | |
c6406d8f GS |
489 | pdn = eeh_dev_to_pdn(edev); |
490 | if (pdn->node) | |
491 | location = of_get_property(pdn->node, "ibm,loc-code", NULL); | |
cb3bc9d0 | 492 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " |
f36c5227 | 493 | "location=%s driver=%s pci addr=%s\n", |
c6406d8f GS |
494 | pe->check_count, |
495 | location ? location : "unknown", | |
778a785f | 496 | eeh_driver_name(dev), eeh_pci_name(dev)); |
cb3bc9d0 | 497 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", |
778a785f | 498 | eeh_driver_name(dev)); |
5c1344e9 | 499 | dump_stack(); |
1da177e4 | 500 | } |
fd761fd8 | 501 | goto dn_unlock; |
1da177e4 LT |
502 | } |
503 | ||
504 | /* | |
505 | * Now test for an EEH failure. This is VERY expensive. | |
506 | * Note that the eeh_config_addr may be a parent device | |
507 | * in the case of a device behind a bridge, or it may be | |
508 | * function zero of a multi-function device. | |
509 | * In any case they must share a common PHB. | |
510 | */ | |
66523d9f | 511 | ret = eeh_ops->get_state(pe, NULL); |
76e6faf7 | 512 | |
39d16e29 | 513 | /* Note that config-io to empty slots may fail; |
cb3bc9d0 | 514 | * they are empty when they don't have children. |
eb594a47 GS |
515 | * We will punt with the following conditions: Failure to get |
516 | * PE's state, EEH not support and Permanently unavailable | |
517 | * state, PE is in good state. | |
cb3bc9d0 | 518 | */ |
eb594a47 GS |
519 | if ((ret < 0) || |
520 | (ret == EEH_STATE_NOT_SUPPORT) || | |
1ad7a72c | 521 | ((ret & active_flags) == active_flags)) { |
e575f8db | 522 | eeh_stats.false_positives++; |
66523d9f | 523 | pe->false_positives++; |
fd761fd8 LV |
524 | rc = 0; |
525 | goto dn_unlock; | |
76e6faf7 LV |
526 | } |
527 | ||
1ad7a72c GS |
528 | /* |
529 | * It should be corner case that the parent PE has been | |
530 | * put into frozen state as well. We should take care | |
531 | * that at first. | |
532 | */ | |
533 | parent_pe = pe->parent; | |
534 | while (parent_pe) { | |
535 | /* Hit the ceiling ? */ | |
536 | if (parent_pe->type & EEH_PE_PHB) | |
537 | break; | |
538 | ||
539 | /* Frozen parent PE ? */ | |
540 | ret = eeh_ops->get_state(parent_pe, NULL); | |
541 | if (ret > 0 && | |
542 | (ret & active_flags) != active_flags) | |
543 | pe = parent_pe; | |
544 | ||
545 | /* Next parent level */ | |
546 | parent_pe = parent_pe->parent; | |
547 | } | |
548 | ||
e575f8db | 549 | eeh_stats.slot_resets++; |
a84f273c | 550 | |
fd761fd8 LV |
551 | /* Avoid repeated reports of this failure, including problems |
552 | * with other functions on this device, and functions under | |
cb3bc9d0 GS |
553 | * bridges. |
554 | */ | |
66523d9f | 555 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); |
4907581d | 556 | eeh_serialize_unlock(flags); |
1da177e4 | 557 | |
1da177e4 LT |
558 | /* Most EEH events are due to device driver bugs. Having |
559 | * a stack trace will help the device-driver authors figure | |
cb3bc9d0 GS |
560 | * out what happened. So print that out. |
561 | */ | |
357b2f3d GS |
562 | phb_pe = eeh_phb_pe_get(pe->phb); |
563 | pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", | |
564 | pe->phb->global_number, pe->addr); | |
565 | pr_err("EEH: PE location: %s, PHB location: %s\n", | |
566 | eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe)); | |
56ca4fde GS |
567 | dump_stack(); |
568 | ||
5293bf97 GS |
569 | eeh_send_failure_event(pe); |
570 | ||
fd761fd8 LV |
571 | return 1; |
572 | ||
573 | dn_unlock: | |
4907581d | 574 | eeh_serialize_unlock(flags); |
fd761fd8 | 575 | return rc; |
1da177e4 LT |
576 | } |
577 | ||
f8f7d63f | 578 | EXPORT_SYMBOL_GPL(eeh_dev_check_failure); |
1da177e4 LT |
579 | |
580 | /** | |
cb3bc9d0 | 581 | * eeh_check_failure - Check if all 1's data is due to EEH slot freeze |
3e938052 | 582 | * @token: I/O address |
1da177e4 | 583 | * |
3e938052 | 584 | * Check for an EEH failure at the given I/O address. Call this |
1da177e4 | 585 | * routine if the result of a read was all 0xff's and you want to |
3e938052 | 586 | * find out if this is due to an EEH slot freeze event. This routine |
1da177e4 LT |
587 | * will query firmware for the EEH status. |
588 | * | |
589 | * Note this routine is safe to call in an interrupt context. | |
590 | */ | |
3e938052 | 591 | int eeh_check_failure(const volatile void __iomem *token) |
1da177e4 LT |
592 | { |
593 | unsigned long addr; | |
f8f7d63f | 594 | struct eeh_dev *edev; |
1da177e4 LT |
595 | |
596 | /* Finding the phys addr + pci device; this is pretty quick. */ | |
597 | addr = eeh_token_to_phys((unsigned long __force) token); | |
3ab96a02 | 598 | edev = eeh_addr_cache_get_dev(addr); |
f8f7d63f | 599 | if (!edev) { |
e575f8db | 600 | eeh_stats.no_device++; |
3e938052 | 601 | return 0; |
177bc936 | 602 | } |
1da177e4 | 603 | |
3e938052 | 604 | return eeh_dev_check_failure(edev); |
1da177e4 | 605 | } |
1da177e4 LT |
606 | EXPORT_SYMBOL(eeh_check_failure); |
607 | ||
6dee3fb9 | 608 | |
47b5c838 | 609 | /** |
cce4b2d2 | 610 | * eeh_pci_enable - Enable MMIO or DMA transfers for this slot |
ff477966 | 611 | * @pe: EEH PE |
cb3bc9d0 GS |
612 | * |
613 | * This routine should be called to reenable frozen MMIO or DMA | |
614 | * so that it would work correctly again. It's useful while doing | |
615 | * recovery or log collection on the indicated device. | |
47b5c838 | 616 | */ |
ff477966 | 617 | int eeh_pci_enable(struct eeh_pe *pe, int function) |
47b5c838 | 618 | { |
4d4f577e | 619 | int active_flag, rc; |
78954700 GS |
620 | |
621 | /* | |
622 | * pHyp doesn't allow to enable IO or DMA on unfrozen PE. | |
623 | * Also, it's pointless to enable them on unfrozen PE. So | |
4d4f577e | 624 | * we have to check before enabling IO or DMA. |
78954700 | 625 | */ |
4d4f577e GS |
626 | switch (function) { |
627 | case EEH_OPT_THAW_MMIO: | |
872ee2d6 | 628 | active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED; |
4d4f577e GS |
629 | break; |
630 | case EEH_OPT_THAW_DMA: | |
631 | active_flag = EEH_STATE_DMA_ACTIVE; | |
632 | break; | |
633 | case EEH_OPT_DISABLE: | |
634 | case EEH_OPT_ENABLE: | |
635 | case EEH_OPT_FREEZE_PE: | |
636 | active_flag = 0; | |
637 | break; | |
638 | default: | |
639 | pr_warn("%s: Invalid function %d\n", | |
640 | __func__, function); | |
641 | return -EINVAL; | |
642 | } | |
643 | ||
644 | /* | |
645 | * Check if IO or DMA has been enabled before | |
646 | * enabling them. | |
647 | */ | |
648 | if (active_flag) { | |
78954700 GS |
649 | rc = eeh_ops->get_state(pe, NULL); |
650 | if (rc < 0) | |
651 | return rc; | |
652 | ||
4d4f577e GS |
653 | /* Needn't enable it at all */ |
654 | if (rc == EEH_STATE_NOT_SUPPORT) | |
655 | return 0; | |
656 | ||
657 | /* It's already enabled */ | |
658 | if (rc & active_flag) | |
78954700 GS |
659 | return 0; |
660 | } | |
47b5c838 | 661 | |
4d4f577e GS |
662 | |
663 | /* Issue the request */ | |
ff477966 | 664 | rc = eeh_ops->set_option(pe, function); |
47b5c838 | 665 | if (rc) |
78954700 | 666 | pr_warn("%s: Unexpected state change %d on " |
1f52f176 | 667 | "PHB#%x-PE#%x, err=%d\n", |
78954700 GS |
668 | __func__, function, pe->phb->global_number, |
669 | pe->addr, rc); | |
47b5c838 | 670 | |
4d4f577e GS |
671 | /* Check if the request is finished successfully */ |
672 | if (active_flag) { | |
673 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); | |
949e9b82 | 674 | if (rc < 0) |
4d4f577e | 675 | return rc; |
78954700 | 676 | |
4d4f577e GS |
677 | if (rc & active_flag) |
678 | return 0; | |
78954700 | 679 | |
4d4f577e GS |
680 | return -EIO; |
681 | } | |
fa1be476 | 682 | |
47b5c838 LV |
683 | return rc; |
684 | } | |
685 | ||
28158cd1 GS |
686 | static void *eeh_disable_and_save_dev_state(void *data, void *userdata) |
687 | { | |
688 | struct eeh_dev *edev = data; | |
689 | struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); | |
690 | struct pci_dev *dev = userdata; | |
691 | ||
692 | /* | |
693 | * The caller should have disabled and saved the | |
694 | * state for the specified device | |
695 | */ | |
696 | if (!pdev || pdev == dev) | |
697 | return NULL; | |
698 | ||
699 | /* Ensure we have D0 power state */ | |
700 | pci_set_power_state(pdev, PCI_D0); | |
701 | ||
702 | /* Save device state */ | |
703 | pci_save_state(pdev); | |
704 | ||
705 | /* | |
706 | * Disable device to avoid any DMA traffic and | |
707 | * interrupt from the device | |
708 | */ | |
709 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); | |
710 | ||
711 | return NULL; | |
712 | } | |
713 | ||
714 | static void *eeh_restore_dev_state(void *data, void *userdata) | |
715 | { | |
716 | struct eeh_dev *edev = data; | |
0bd78587 | 717 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); |
28158cd1 GS |
718 | struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); |
719 | struct pci_dev *dev = userdata; | |
720 | ||
721 | if (!pdev) | |
722 | return NULL; | |
723 | ||
724 | /* Apply customization from firmware */ | |
0bd78587 GS |
725 | if (pdn && eeh_ops->restore_config) |
726 | eeh_ops->restore_config(pdn); | |
28158cd1 GS |
727 | |
728 | /* The caller should restore state for the specified device */ | |
729 | if (pdev != dev) | |
502f159c | 730 | pci_restore_state(pdev); |
28158cd1 GS |
731 | |
732 | return NULL; | |
733 | } | |
734 | ||
00c2ae35 | 735 | /** |
31f6a4ad | 736 | * pcibios_set_pcie_reset_state - Set PCI-E reset state |
cb3bc9d0 GS |
737 | * @dev: pci device struct |
738 | * @state: reset state to enter | |
00c2ae35 BK |
739 | * |
740 | * Return value: | |
741 | * 0 if success | |
cb3bc9d0 | 742 | */ |
00c2ae35 BK |
743 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) |
744 | { | |
c270a24c | 745 | struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); |
2a58222f | 746 | struct eeh_pe *pe = eeh_dev_to_pe(edev); |
c270a24c GS |
747 | |
748 | if (!pe) { | |
749 | pr_err("%s: No PE found on PCI device %s\n", | |
750 | __func__, pci_name(dev)); | |
751 | return -EINVAL; | |
752 | } | |
00c2ae35 BK |
753 | |
754 | switch (state) { | |
755 | case pcie_deassert_reset: | |
c270a24c | 756 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
28158cd1 | 757 | eeh_unfreeze_pe(pe, false); |
9312bc5b WY |
758 | if (!(pe->type & EEH_PE_VF)) |
759 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | |
28158cd1 | 760 | eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); |
1ae79b78 | 761 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); |
00c2ae35 BK |
762 | break; |
763 | case pcie_hot_reset: | |
39bfd715 | 764 | eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED); |
28158cd1 GS |
765 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); |
766 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | |
9312bc5b WY |
767 | if (!(pe->type & EEH_PE_VF)) |
768 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | |
c270a24c | 769 | eeh_ops->reset(pe, EEH_RESET_HOT); |
00c2ae35 BK |
770 | break; |
771 | case pcie_warm_reset: | |
39bfd715 | 772 | eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED); |
28158cd1 GS |
773 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); |
774 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | |
9312bc5b WY |
775 | if (!(pe->type & EEH_PE_VF)) |
776 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | |
c270a24c | 777 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
00c2ae35 BK |
778 | break; |
779 | default: | |
1ae79b78 | 780 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED); |
00c2ae35 BK |
781 | return -EINVAL; |
782 | }; | |
783 | ||
784 | return 0; | |
785 | } | |
786 | ||
cb5b5624 | 787 | /** |
c270a24c GS |
788 | * eeh_set_pe_freset - Check the required reset for the indicated device |
789 | * @data: EEH device | |
790 | * @flag: return value | |
cb3bc9d0 GS |
791 | * |
792 | * Each device might have its preferred reset type: fundamental or | |
793 | * hot reset. The routine is used to collected the information for | |
794 | * the indicated device and its children so that the bunch of the | |
795 | * devices could be reset properly. | |
796 | */ | |
c270a24c | 797 | static void *eeh_set_dev_freset(void *data, void *flag) |
cb3bc9d0 GS |
798 | { |
799 | struct pci_dev *dev; | |
c270a24c GS |
800 | unsigned int *freset = (unsigned int *)flag; |
801 | struct eeh_dev *edev = (struct eeh_dev *)data; | |
6dee3fb9 | 802 | |
c270a24c | 803 | dev = eeh_dev_to_pci_dev(edev); |
cb3bc9d0 GS |
804 | if (dev) |
805 | *freset |= dev->needs_freset; | |
806 | ||
c270a24c | 807 | return NULL; |
cb3bc9d0 GS |
808 | } |
809 | ||
810 | /** | |
6654c936 | 811 | * eeh_pe_reset_full - Complete a full reset process on the indicated PE |
c270a24c | 812 | * @pe: EEH PE |
cb3bc9d0 | 813 | * |
6654c936 RC |
814 | * This function executes a full reset procedure on a PE, including setting |
815 | * the appropriate flags, performing a fundamental or hot reset, and then | |
816 | * deactivating the reset status. It is designed to be used within the EEH | |
817 | * subsystem, as opposed to eeh_pe_reset which is exported to drivers and | |
818 | * only performs a single operation at a time. | |
819 | * | |
820 | * This function will attempt to reset a PE three times before failing. | |
cb3bc9d0 | 821 | */ |
6654c936 | 822 | int eeh_pe_reset_full(struct eeh_pe *pe) |
6dee3fb9 | 823 | { |
6654c936 RC |
824 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); |
825 | int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED); | |
826 | int type = EEH_RESET_HOT; | |
308fc4f8 | 827 | unsigned int freset = 0; |
6654c936 | 828 | int i, state, ret; |
6e19314c | 829 | |
6654c936 RC |
830 | /* |
831 | * Determine the type of reset to perform - hot or fundamental. | |
832 | * Hot reset is the default operation, unless any device under the | |
833 | * PE requires a fundamental reset. | |
a84f273c | 834 | */ |
c270a24c | 835 | eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset); |
308fc4f8 RL |
836 | |
837 | if (freset) | |
6654c936 | 838 | type = EEH_RESET_FUNDAMENTAL; |
e1029263 | 839 | |
6654c936 RC |
840 | /* Mark the PE as in reset state and block config space accesses */ |
841 | eeh_pe_state_mark(pe, reset_state); | |
28bf36f9 | 842 | |
6654c936 | 843 | /* Make three attempts at resetting the bus */ |
b85743ee | 844 | for (i = 0; i < 3; i++) { |
6654c936 RC |
845 | ret = eeh_pe_reset(pe, type); |
846 | if (ret) | |
847 | break; | |
6dee3fb9 | 848 | |
6654c936 RC |
849 | ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE); |
850 | if (ret) | |
851 | break; | |
852 | ||
853 | /* Wait until the PE is in a functioning state */ | |
b85743ee | 854 | state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
6654c936 RC |
855 | if ((state & active_flags) == active_flags) |
856 | break; | |
e1029263 | 857 | |
b85743ee | 858 | if (state < 0) { |
1f52f176 | 859 | pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x", |
c270a24c | 860 | __func__, pe->phb->global_number, pe->addr); |
b85743ee | 861 | ret = -ENOTRECOVERABLE; |
6654c936 | 862 | break; |
e1029263 | 863 | } |
b85743ee | 864 | |
6654c936 | 865 | /* Set error in case this is our last attempt */ |
b85743ee GS |
866 | ret = -EIO; |
867 | pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n", | |
868 | __func__, state, pe->phb->global_number, pe->addr, (i + 1)); | |
6dee3fb9 | 869 | } |
b6495c0c | 870 | |
6654c936 | 871 | eeh_pe_state_clear(pe, reset_state); |
b85743ee | 872 | return ret; |
6dee3fb9 LV |
873 | } |
874 | ||
8b553f32 | 875 | /** |
cb3bc9d0 | 876 | * eeh_save_bars - Save device bars |
f631acd3 | 877 | * @edev: PCI device associated EEH device |
8b553f32 LV |
878 | * |
879 | * Save the values of the device bars. Unlike the restore | |
880 | * routine, this routine is *not* recursive. This is because | |
31116f0b | 881 | * PCI devices are added individually; but, for the restore, |
8b553f32 LV |
882 | * an entire slot is reset at a time. |
883 | */ | |
d7bb8862 | 884 | void eeh_save_bars(struct eeh_dev *edev) |
8b553f32 | 885 | { |
0bd78587 | 886 | struct pci_dn *pdn; |
8b553f32 LV |
887 | int i; |
888 | ||
0bd78587 GS |
889 | pdn = eeh_dev_to_pdn(edev); |
890 | if (!pdn) | |
8b553f32 | 891 | return; |
a84f273c | 892 | |
8b553f32 | 893 | for (i = 0; i < 16; i++) |
0bd78587 | 894 | eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]); |
bf898ec5 GS |
895 | |
896 | /* | |
897 | * For PCI bridges including root port, we need enable bus | |
898 | * master explicitly. Otherwise, it can't fetch IODA table | |
899 | * entries correctly. So we cache the bit in advance so that | |
900 | * we can restore it after reset, either PHB range or PE range. | |
901 | */ | |
902 | if (edev->mode & EEH_DEV_BRIDGE) | |
903 | edev->config_space[1] |= PCI_COMMAND_MASTER; | |
8b553f32 LV |
904 | } |
905 | ||
aa1e6374 GS |
906 | /** |
907 | * eeh_ops_register - Register platform dependent EEH operations | |
908 | * @ops: platform dependent EEH operations | |
909 | * | |
910 | * Register the platform dependent EEH operation callback | |
911 | * functions. The platform should call this function before | |
912 | * any other EEH operations. | |
913 | */ | |
914 | int __init eeh_ops_register(struct eeh_ops *ops) | |
915 | { | |
916 | if (!ops->name) { | |
0dae2743 | 917 | pr_warn("%s: Invalid EEH ops name for %p\n", |
aa1e6374 GS |
918 | __func__, ops); |
919 | return -EINVAL; | |
920 | } | |
921 | ||
922 | if (eeh_ops && eeh_ops != ops) { | |
0dae2743 | 923 | pr_warn("%s: EEH ops of platform %s already existing (%s)\n", |
aa1e6374 GS |
924 | __func__, eeh_ops->name, ops->name); |
925 | return -EEXIST; | |
926 | } | |
927 | ||
928 | eeh_ops = ops; | |
929 | ||
930 | return 0; | |
931 | } | |
932 | ||
933 | /** | |
934 | * eeh_ops_unregister - Unreigster platform dependent EEH operations | |
935 | * @name: name of EEH platform operations | |
936 | * | |
937 | * Unregister the platform dependent EEH operation callback | |
938 | * functions. | |
939 | */ | |
940 | int __exit eeh_ops_unregister(const char *name) | |
941 | { | |
942 | if (!name || !strlen(name)) { | |
0dae2743 | 943 | pr_warn("%s: Invalid EEH ops name\n", |
aa1e6374 GS |
944 | __func__); |
945 | return -EINVAL; | |
946 | } | |
947 | ||
948 | if (eeh_ops && !strcmp(eeh_ops->name, name)) { | |
949 | eeh_ops = NULL; | |
950 | return 0; | |
951 | } | |
952 | ||
953 | return -EEXIST; | |
954 | } | |
955 | ||
66f9af83 GS |
956 | static int eeh_reboot_notifier(struct notifier_block *nb, |
957 | unsigned long action, void *unused) | |
958 | { | |
05b1721d | 959 | eeh_clear_flag(EEH_ENABLED); |
66f9af83 GS |
960 | return NOTIFY_DONE; |
961 | } | |
962 | ||
963 | static struct notifier_block eeh_reboot_nb = { | |
964 | .notifier_call = eeh_reboot_notifier, | |
965 | }; | |
966 | ||
cb3bc9d0 GS |
967 | /** |
968 | * eeh_init - EEH initialization | |
969 | * | |
1da177e4 LT |
970 | * Initialize EEH by trying to enable it for all of the adapters in the system. |
971 | * As a side effect we can determine here if eeh is supported at all. | |
972 | * Note that we leave EEH on so failed config cycles won't cause a machine | |
973 | * check. If a user turns off EEH for a particular adapter they are really | |
974 | * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't | |
975 | * grant access to a slot if EEH isn't enabled, and so we always enable | |
976 | * EEH for all slots/all devices. | |
977 | * | |
978 | * The eeh-force-off option disables EEH checking globally, for all slots. | |
979 | * Even if force-off is set, the EEH hardware is still enabled, so that | |
980 | * newer systems can boot. | |
981 | */ | |
eeb6361f | 982 | int eeh_init(void) |
1da177e4 | 983 | { |
1a5c2e63 | 984 | struct pci_controller *hose, *tmp; |
ff57b454 | 985 | struct pci_dn *pdn; |
51fb5f56 GS |
986 | static int cnt = 0; |
987 | int ret = 0; | |
988 | ||
989 | /* | |
990 | * We have to delay the initialization on PowerNV after | |
991 | * the PCI hierarchy tree has been built because the PEs | |
992 | * are figured out based on PCI devices instead of device | |
993 | * tree nodes | |
994 | */ | |
995 | if (machine_is(powernv) && cnt++ <= 0) | |
996 | return ret; | |
e2af155c | 997 | |
66f9af83 GS |
998 | /* Register reboot notifier */ |
999 | ret = register_reboot_notifier(&eeh_reboot_nb); | |
1000 | if (ret) { | |
1001 | pr_warn("%s: Failed to register notifier (%d)\n", | |
1002 | __func__, ret); | |
1003 | return ret; | |
1004 | } | |
1005 | ||
e2af155c GS |
1006 | /* call platform initialization function */ |
1007 | if (!eeh_ops) { | |
0dae2743 | 1008 | pr_warn("%s: Platform EEH operation not found\n", |
e2af155c | 1009 | __func__); |
35e5cfe2 | 1010 | return -EEXIST; |
221195fb | 1011 | } else if ((ret = eeh_ops->init())) |
35e5cfe2 | 1012 | return ret; |
1da177e4 | 1013 | |
c8608558 GS |
1014 | /* Initialize EEH event */ |
1015 | ret = eeh_event_init(); | |
1016 | if (ret) | |
1017 | return ret; | |
1018 | ||
1a5c2e63 | 1019 | /* Enable EEH for all adapters */ |
ff57b454 GS |
1020 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
1021 | pdn = hose->pci_data; | |
1022 | traverse_pci_dn(pdn, eeh_ops->probe, NULL); | |
1da177e4 LT |
1023 | } |
1024 | ||
21fd21f5 GS |
1025 | /* |
1026 | * Call platform post-initialization. Actually, It's good chance | |
1027 | * to inform platform that EEH is ready to supply service if the | |
1028 | * I/O cache stuff has been built up. | |
1029 | */ | |
1030 | if (eeh_ops->post_init) { | |
1031 | ret = eeh_ops->post_init(); | |
1032 | if (ret) | |
1033 | return ret; | |
1034 | } | |
1035 | ||
2ec5a0ad | 1036 | if (eeh_enabled()) |
d7bb8862 | 1037 | pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); |
1da177e4 | 1038 | else |
91ac730b | 1039 | pr_info("EEH: No capable adapters found\n"); |
35e5cfe2 GS |
1040 | |
1041 | return ret; | |
1da177e4 LT |
1042 | } |
1043 | ||
35e5cfe2 GS |
1044 | core_initcall_sync(eeh_init); |
1045 | ||
1da177e4 | 1046 | /** |
c6406d8f | 1047 | * eeh_add_device_early - Enable EEH for the indicated device node |
ff57b454 | 1048 | * @pdn: PCI device node for which to set up EEH |
1da177e4 LT |
1049 | * |
1050 | * This routine must be used to perform EEH initialization for PCI | |
1051 | * devices that were added after system boot (e.g. hotplug, dlpar). | |
1052 | * This routine must be called before any i/o is performed to the | |
1053 | * adapter (inluding any config-space i/o). | |
1054 | * Whether this actually enables EEH or not for this device depends | |
1055 | * on the CEC architecture, type of the device, on earlier boot | |
1056 | * command-line arguments & etc. | |
1057 | */ | |
ff57b454 | 1058 | void eeh_add_device_early(struct pci_dn *pdn) |
1da177e4 LT |
1059 | { |
1060 | struct pci_controller *phb; | |
ff57b454 | 1061 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); |
1da177e4 | 1062 | |
c2078d9e | 1063 | if (!edev) |
1da177e4 | 1064 | return; |
f751f841 | 1065 | |
d91dafc0 GS |
1066 | if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) |
1067 | return; | |
1068 | ||
f751f841 | 1069 | /* USB Bus children of PCI devices will not have BUID's */ |
ff57b454 GS |
1070 | phb = edev->phb; |
1071 | if (NULL == phb || | |
1072 | (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid)) | |
1da177e4 | 1073 | return; |
1da177e4 | 1074 | |
ff57b454 | 1075 | eeh_ops->probe(pdn, NULL); |
1da177e4 | 1076 | } |
1da177e4 | 1077 | |
cb3bc9d0 GS |
1078 | /** |
1079 | * eeh_add_device_tree_early - Enable EEH for the indicated device | |
ff57b454 | 1080 | * @pdn: PCI device node |
cb3bc9d0 GS |
1081 | * |
1082 | * This routine must be used to perform EEH initialization for the | |
1083 | * indicated PCI device that was added after system boot (e.g. | |
1084 | * hotplug, dlpar). | |
1085 | */ | |
ff57b454 | 1086 | void eeh_add_device_tree_early(struct pci_dn *pdn) |
e2a296ee | 1087 | { |
ff57b454 GS |
1088 | struct pci_dn *n; |
1089 | ||
1090 | if (!pdn) | |
1091 | return; | |
acaa6176 | 1092 | |
ff57b454 GS |
1093 | list_for_each_entry(n, &pdn->child_list, list) |
1094 | eeh_add_device_tree_early(n); | |
1095 | eeh_add_device_early(pdn); | |
e2a296ee LV |
1096 | } |
1097 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); | |
1098 | ||
1da177e4 | 1099 | /** |
cb3bc9d0 | 1100 | * eeh_add_device_late - Perform EEH initialization for the indicated pci device |
1da177e4 LT |
1101 | * @dev: pci device for which to set up EEH |
1102 | * | |
1103 | * This routine must be used to complete EEH initialization for PCI | |
1104 | * devices that were added after system boot (e.g. hotplug, dlpar). | |
1105 | */ | |
f2856491 | 1106 | void eeh_add_device_late(struct pci_dev *dev) |
1da177e4 | 1107 | { |
c6406d8f | 1108 | struct pci_dn *pdn; |
f631acd3 | 1109 | struct eeh_dev *edev; |
56b0fca3 | 1110 | |
2ec5a0ad | 1111 | if (!dev || !eeh_enabled()) |
1da177e4 LT |
1112 | return; |
1113 | ||
57b066ff | 1114 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); |
1da177e4 | 1115 | |
c6406d8f GS |
1116 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
1117 | edev = pdn_to_eeh_dev(pdn); | |
f631acd3 | 1118 | if (edev->pdev == dev) { |
57b066ff BH |
1119 | pr_debug("EEH: Already referenced !\n"); |
1120 | return; | |
1121 | } | |
f5c57710 GS |
1122 | |
1123 | /* | |
1124 | * The EEH cache might not be removed correctly because of | |
1125 | * unbalanced kref to the device during unplug time, which | |
1126 | * relies on pcibios_release_device(). So we have to remove | |
1127 | * that here explicitly. | |
1128 | */ | |
1129 | if (edev->pdev) { | |
1130 | eeh_rmv_from_parent_pe(edev); | |
1131 | eeh_addr_cache_rmv_dev(edev->pdev); | |
1132 | eeh_sysfs_remove_device(edev->pdev); | |
ab55d218 | 1133 | edev->mode &= ~EEH_DEV_SYSFS; |
f5c57710 | 1134 | |
f26c7a03 GS |
1135 | /* |
1136 | * We definitely should have the PCI device removed | |
1137 | * though it wasn't correctly. So we needn't call | |
1138 | * into error handler afterwards. | |
1139 | */ | |
1140 | edev->mode |= EEH_DEV_NO_HANDLER; | |
1141 | ||
f5c57710 GS |
1142 | edev->pdev = NULL; |
1143 | dev->dev.archdata.edev = NULL; | |
1144 | } | |
57b066ff | 1145 | |
e642d11b DA |
1146 | if (eeh_has_flag(EEH_PROBE_MODE_DEV)) |
1147 | eeh_ops->probe(pdn, NULL); | |
1148 | ||
f631acd3 GS |
1149 | edev->pdev = dev; |
1150 | dev->dev.archdata.edev = edev; | |
56b0fca3 | 1151 | |
3ab96a02 | 1152 | eeh_addr_cache_insert_dev(dev); |
1da177e4 | 1153 | } |
794e085e | 1154 | |
cb3bc9d0 GS |
1155 | /** |
1156 | * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus | |
1157 | * @bus: PCI bus | |
1158 | * | |
1159 | * This routine must be used to perform EEH initialization for PCI | |
1160 | * devices which are attached to the indicated PCI bus. The PCI bus | |
1161 | * is added after system boot through hotplug or dlpar. | |
1162 | */ | |
794e085e NF |
1163 | void eeh_add_device_tree_late(struct pci_bus *bus) |
1164 | { | |
1165 | struct pci_dev *dev; | |
1166 | ||
1167 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
a84f273c GS |
1168 | eeh_add_device_late(dev); |
1169 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | |
1170 | struct pci_bus *subbus = dev->subordinate; | |
1171 | if (subbus) | |
1172 | eeh_add_device_tree_late(subbus); | |
1173 | } | |
794e085e NF |
1174 | } |
1175 | } | |
1176 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_late); | |
1da177e4 | 1177 | |
6a040ce7 TLSC |
1178 | /** |
1179 | * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus | |
1180 | * @bus: PCI bus | |
1181 | * | |
1182 | * This routine must be used to add EEH sysfs files for PCI | |
1183 | * devices which are attached to the indicated PCI bus. The PCI bus | |
1184 | * is added after system boot through hotplug or dlpar. | |
1185 | */ | |
1186 | void eeh_add_sysfs_files(struct pci_bus *bus) | |
1187 | { | |
1188 | struct pci_dev *dev; | |
1189 | ||
1190 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
1191 | eeh_sysfs_add_device(dev); | |
1192 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | |
1193 | struct pci_bus *subbus = dev->subordinate; | |
1194 | if (subbus) | |
1195 | eeh_add_sysfs_files(subbus); | |
1196 | } | |
1197 | } | |
1198 | } | |
1199 | EXPORT_SYMBOL_GPL(eeh_add_sysfs_files); | |
1200 | ||
1da177e4 | 1201 | /** |
cb3bc9d0 | 1202 | * eeh_remove_device - Undo EEH setup for the indicated pci device |
1da177e4 LT |
1203 | * @dev: pci device to be removed |
1204 | * | |
794e085e NF |
1205 | * This routine should be called when a device is removed from |
1206 | * a running system (e.g. by hotplug or dlpar). It unregisters | |
1207 | * the PCI device from the EEH subsystem. I/O errors affecting | |
1208 | * this device will no longer be detected after this call; thus, | |
1209 | * i/o errors affecting this slot may leave this device unusable. | |
1da177e4 | 1210 | */ |
807a827d | 1211 | void eeh_remove_device(struct pci_dev *dev) |
1da177e4 | 1212 | { |
f631acd3 GS |
1213 | struct eeh_dev *edev; |
1214 | ||
2ec5a0ad | 1215 | if (!dev || !eeh_enabled()) |
1da177e4 | 1216 | return; |
f631acd3 | 1217 | edev = pci_dev_to_eeh_dev(dev); |
1da177e4 LT |
1218 | |
1219 | /* Unregister the device with the EEH/PCI address search system */ | |
57b066ff | 1220 | pr_debug("EEH: Removing device %s\n", pci_name(dev)); |
56b0fca3 | 1221 | |
f5c57710 | 1222 | if (!edev || !edev->pdev || !edev->pe) { |
57b066ff BH |
1223 | pr_debug("EEH: Not referenced !\n"); |
1224 | return; | |
b055a9e1 | 1225 | } |
f5c57710 GS |
1226 | |
1227 | /* | |
1228 | * During the hotplug for EEH error recovery, we need the EEH | |
1229 | * device attached to the parent PE in order for BAR restore | |
1230 | * a bit later. So we keep it for BAR restore and remove it | |
1231 | * from the parent PE during the BAR resotre. | |
1232 | */ | |
f631acd3 | 1233 | edev->pdev = NULL; |
67086e32 WY |
1234 | |
1235 | /* | |
1236 | * The flag "in_error" is used to trace EEH devices for VFs | |
1237 | * in error state or not. It's set in eeh_report_error(). If | |
1238 | * it's not set, eeh_report_{reset,resume}() won't be called | |
1239 | * for the VF EEH device. | |
1240 | */ | |
1241 | edev->in_error = false; | |
f631acd3 | 1242 | dev->dev.archdata.edev = NULL; |
f5c57710 GS |
1243 | if (!(edev->pe->state & EEH_PE_KEEP)) |
1244 | eeh_rmv_from_parent_pe(edev); | |
1245 | else | |
1246 | edev->mode |= EEH_DEV_DISCONNECTED; | |
57b066ff | 1247 | |
f26c7a03 GS |
1248 | /* |
1249 | * We're removing from the PCI subsystem, that means | |
1250 | * the PCI device driver can't support EEH or not | |
1251 | * well. So we rely on hotplug completely to do recovery | |
1252 | * for the specific PCI device. | |
1253 | */ | |
1254 | edev->mode |= EEH_DEV_NO_HANDLER; | |
1255 | ||
3ab96a02 | 1256 | eeh_addr_cache_rmv_dev(dev); |
57b066ff | 1257 | eeh_sysfs_remove_device(dev); |
ab55d218 | 1258 | edev->mode &= ~EEH_DEV_SYSFS; |
1da177e4 | 1259 | } |
1da177e4 | 1260 | |
4eeeff0e GS |
1261 | int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) |
1262 | { | |
1263 | int ret; | |
1264 | ||
1265 | ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); | |
1266 | if (ret) { | |
1267 | pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n", | |
1268 | __func__, ret, pe->phb->global_number, pe->addr); | |
1269 | return ret; | |
1270 | } | |
1271 | ||
1272 | ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); | |
1273 | if (ret) { | |
1274 | pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n", | |
1275 | __func__, ret, pe->phb->global_number, pe->addr); | |
1276 | return ret; | |
1277 | } | |
1278 | ||
1279 | /* Clear software isolated state */ | |
1280 | if (sw_state && (pe->state & EEH_PE_ISOLATED)) | |
1281 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | |
1282 | ||
1283 | return ret; | |
1284 | } | |
1285 | ||
5cfb20b9 GS |
1286 | |
1287 | static struct pci_device_id eeh_reset_ids[] = { | |
1288 | { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ | |
1289 | { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ | |
b1d76a7d | 1290 | { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */ |
5cfb20b9 GS |
1291 | { 0 } |
1292 | }; | |
1293 | ||
1294 | static int eeh_pe_change_owner(struct eeh_pe *pe) | |
1295 | { | |
1296 | struct eeh_dev *edev, *tmp; | |
1297 | struct pci_dev *pdev; | |
1298 | struct pci_device_id *id; | |
1299 | int flags, ret; | |
1300 | ||
1301 | /* Check PE state */ | |
1302 | flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | |
1303 | ret = eeh_ops->get_state(pe, NULL); | |
1304 | if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT) | |
1305 | return 0; | |
1306 | ||
1307 | /* Unfrozen PE, nothing to do */ | |
1308 | if ((ret & flags) == flags) | |
1309 | return 0; | |
1310 | ||
1311 | /* Frozen PE, check if it needs PE level reset */ | |
1312 | eeh_pe_for_each_dev(pe, edev, tmp) { | |
1313 | pdev = eeh_dev_to_pci_dev(edev); | |
1314 | if (!pdev) | |
1315 | continue; | |
1316 | ||
1317 | for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) { | |
1318 | if (id->vendor != PCI_ANY_ID && | |
1319 | id->vendor != pdev->vendor) | |
1320 | continue; | |
1321 | if (id->device != PCI_ANY_ID && | |
1322 | id->device != pdev->device) | |
1323 | continue; | |
1324 | if (id->subvendor != PCI_ANY_ID && | |
1325 | id->subvendor != pdev->subsystem_vendor) | |
1326 | continue; | |
1327 | if (id->subdevice != PCI_ANY_ID && | |
1328 | id->subdevice != pdev->subsystem_device) | |
1329 | continue; | |
1330 | ||
d6d63d72 | 1331 | return eeh_pe_reset_and_recover(pe); |
5cfb20b9 GS |
1332 | } |
1333 | } | |
1334 | ||
1335 | return eeh_unfreeze_pe(pe, true); | |
5cfb20b9 GS |
1336 | } |
1337 | ||
212d16cd GS |
1338 | /** |
1339 | * eeh_dev_open - Increase count of pass through devices for PE | |
1340 | * @pdev: PCI device | |
1341 | * | |
1342 | * Increase count of passed through devices for the indicated | |
1343 | * PE. In the result, the EEH errors detected on the PE won't be | |
1344 | * reported. The PE owner will be responsible for detection | |
1345 | * and recovery. | |
1346 | */ | |
1347 | int eeh_dev_open(struct pci_dev *pdev) | |
1348 | { | |
1349 | struct eeh_dev *edev; | |
404079c8 | 1350 | int ret = -ENODEV; |
212d16cd GS |
1351 | |
1352 | mutex_lock(&eeh_dev_mutex); | |
1353 | ||
1354 | /* No PCI device ? */ | |
1355 | if (!pdev) | |
1356 | goto out; | |
1357 | ||
1358 | /* No EEH device or PE ? */ | |
1359 | edev = pci_dev_to_eeh_dev(pdev); | |
1360 | if (!edev || !edev->pe) | |
1361 | goto out; | |
1362 | ||
404079c8 GS |
1363 | /* |
1364 | * The PE might have been put into frozen state, but we | |
1365 | * didn't detect that yet. The passed through PCI devices | |
1366 | * in frozen PE won't work properly. Clear the frozen state | |
1367 | * in advance. | |
1368 | */ | |
5cfb20b9 | 1369 | ret = eeh_pe_change_owner(edev->pe); |
4eeeff0e GS |
1370 | if (ret) |
1371 | goto out; | |
404079c8 | 1372 | |
212d16cd GS |
1373 | /* Increase PE's pass through count */ |
1374 | atomic_inc(&edev->pe->pass_dev_cnt); | |
1375 | mutex_unlock(&eeh_dev_mutex); | |
1376 | ||
1377 | return 0; | |
1378 | out: | |
1379 | mutex_unlock(&eeh_dev_mutex); | |
404079c8 | 1380 | return ret; |
212d16cd GS |
1381 | } |
1382 | EXPORT_SYMBOL_GPL(eeh_dev_open); | |
1383 | ||
1384 | /** | |
1385 | * eeh_dev_release - Decrease count of pass through devices for PE | |
1386 | * @pdev: PCI device | |
1387 | * | |
1388 | * Decrease count of pass through devices for the indicated PE. If | |
1389 | * there is no passed through device in PE, the EEH errors detected | |
1390 | * on the PE will be reported and handled as usual. | |
1391 | */ | |
1392 | void eeh_dev_release(struct pci_dev *pdev) | |
1393 | { | |
1394 | struct eeh_dev *edev; | |
1395 | ||
1396 | mutex_lock(&eeh_dev_mutex); | |
1397 | ||
1398 | /* No PCI device ? */ | |
1399 | if (!pdev) | |
1400 | goto out; | |
1401 | ||
1402 | /* No EEH device ? */ | |
1403 | edev = pci_dev_to_eeh_dev(pdev); | |
1404 | if (!edev || !edev->pe || !eeh_pe_passed(edev->pe)) | |
1405 | goto out; | |
1406 | ||
1407 | /* Decrease PE's pass through count */ | |
54f9a64a | 1408 | WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0); |
5cfb20b9 | 1409 | eeh_pe_change_owner(edev->pe); |
212d16cd GS |
1410 | out: |
1411 | mutex_unlock(&eeh_dev_mutex); | |
1412 | } | |
1413 | EXPORT_SYMBOL(eeh_dev_release); | |
1414 | ||
2194dc27 BH |
1415 | #ifdef CONFIG_IOMMU_API |
1416 | ||
a3032ca9 GS |
1417 | static int dev_has_iommu_table(struct device *dev, void *data) |
1418 | { | |
1419 | struct pci_dev *pdev = to_pci_dev(dev); | |
1420 | struct pci_dev **ppdev = data; | |
a3032ca9 GS |
1421 | |
1422 | if (!dev) | |
1423 | return 0; | |
1424 | ||
ea30e99e | 1425 | if (dev->iommu_group) { |
a3032ca9 GS |
1426 | *ppdev = pdev; |
1427 | return 1; | |
1428 | } | |
1429 | ||
1430 | return 0; | |
1431 | } | |
1432 | ||
212d16cd GS |
1433 | /** |
1434 | * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE | |
1435 | * @group: IOMMU group | |
1436 | * | |
1437 | * The routine is called to convert IOMMU group to EEH PE. | |
1438 | */ | |
1439 | struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group) | |
1440 | { | |
212d16cd GS |
1441 | struct pci_dev *pdev = NULL; |
1442 | struct eeh_dev *edev; | |
a3032ca9 | 1443 | int ret; |
212d16cd GS |
1444 | |
1445 | /* No IOMMU group ? */ | |
1446 | if (!group) | |
1447 | return NULL; | |
1448 | ||
a3032ca9 GS |
1449 | ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table); |
1450 | if (!ret || !pdev) | |
212d16cd GS |
1451 | return NULL; |
1452 | ||
1453 | /* No EEH device or PE ? */ | |
1454 | edev = pci_dev_to_eeh_dev(pdev); | |
1455 | if (!edev || !edev->pe) | |
1456 | return NULL; | |
1457 | ||
1458 | return edev->pe; | |
1459 | } | |
537e5400 | 1460 | EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe); |
212d16cd | 1461 | |
2194dc27 BH |
1462 | #endif /* CONFIG_IOMMU_API */ |
1463 | ||
212d16cd GS |
1464 | /** |
1465 | * eeh_pe_set_option - Set options for the indicated PE | |
1466 | * @pe: EEH PE | |
1467 | * @option: requested option | |
1468 | * | |
1469 | * The routine is called to enable or disable EEH functionality | |
1470 | * on the indicated PE, to enable IO or DMA for the frozen PE. | |
1471 | */ | |
1472 | int eeh_pe_set_option(struct eeh_pe *pe, int option) | |
1473 | { | |
1474 | int ret = 0; | |
1475 | ||
1476 | /* Invalid PE ? */ | |
1477 | if (!pe) | |
1478 | return -ENODEV; | |
1479 | ||
1480 | /* | |
1481 | * EEH functionality could possibly be disabled, just | |
1482 | * return error for the case. And the EEH functinality | |
1483 | * isn't expected to be disabled on one specific PE. | |
1484 | */ | |
1485 | switch (option) { | |
1486 | case EEH_OPT_ENABLE: | |
4eeeff0e | 1487 | if (eeh_enabled()) { |
5cfb20b9 | 1488 | ret = eeh_pe_change_owner(pe); |
212d16cd | 1489 | break; |
4eeeff0e | 1490 | } |
212d16cd GS |
1491 | ret = -EIO; |
1492 | break; | |
1493 | case EEH_OPT_DISABLE: | |
1494 | break; | |
1495 | case EEH_OPT_THAW_MMIO: | |
1496 | case EEH_OPT_THAW_DMA: | |
de5a6622 | 1497 | case EEH_OPT_FREEZE_PE: |
212d16cd GS |
1498 | if (!eeh_ops || !eeh_ops->set_option) { |
1499 | ret = -ENOENT; | |
1500 | break; | |
1501 | } | |
1502 | ||
4eeeff0e | 1503 | ret = eeh_pci_enable(pe, option); |
212d16cd GS |
1504 | break; |
1505 | default: | |
1506 | pr_debug("%s: Option %d out of range (%d, %d)\n", | |
1507 | __func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA); | |
1508 | ret = -EINVAL; | |
1509 | } | |
1510 | ||
1511 | return ret; | |
1512 | } | |
1513 | EXPORT_SYMBOL_GPL(eeh_pe_set_option); | |
1514 | ||
1515 | /** | |
1516 | * eeh_pe_get_state - Retrieve PE's state | |
1517 | * @pe: EEH PE | |
1518 | * | |
1519 | * Retrieve the PE's state, which includes 3 aspects: enabled | |
1520 | * DMA, enabled IO and asserted reset. | |
1521 | */ | |
1522 | int eeh_pe_get_state(struct eeh_pe *pe) | |
1523 | { | |
1524 | int result, ret = 0; | |
1525 | bool rst_active, dma_en, mmio_en; | |
1526 | ||
1527 | /* Existing PE ? */ | |
1528 | if (!pe) | |
1529 | return -ENODEV; | |
1530 | ||
1531 | if (!eeh_ops || !eeh_ops->get_state) | |
1532 | return -ENOENT; | |
1533 | ||
eca036ee GS |
1534 | /* |
1535 | * If the parent PE is owned by the host kernel and is undergoing | |
1536 | * error recovery, we should return the PE state as temporarily | |
1537 | * unavailable so that the error recovery on the guest is suspended | |
1538 | * until the recovery completes on the host. | |
1539 | */ | |
1540 | if (pe->parent && | |
1541 | !(pe->state & EEH_PE_REMOVED) && | |
1542 | (pe->parent->state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING))) | |
1543 | return EEH_PE_STATE_UNAVAIL; | |
1544 | ||
212d16cd GS |
1545 | result = eeh_ops->get_state(pe, NULL); |
1546 | rst_active = !!(result & EEH_STATE_RESET_ACTIVE); | |
1547 | dma_en = !!(result & EEH_STATE_DMA_ENABLED); | |
1548 | mmio_en = !!(result & EEH_STATE_MMIO_ENABLED); | |
1549 | ||
1550 | if (rst_active) | |
1551 | ret = EEH_PE_STATE_RESET; | |
1552 | else if (dma_en && mmio_en) | |
1553 | ret = EEH_PE_STATE_NORMAL; | |
1554 | else if (!dma_en && !mmio_en) | |
1555 | ret = EEH_PE_STATE_STOPPED_IO_DMA; | |
1556 | else if (!dma_en && mmio_en) | |
1557 | ret = EEH_PE_STATE_STOPPED_DMA; | |
1558 | else | |
1559 | ret = EEH_PE_STATE_UNAVAIL; | |
1560 | ||
1561 | return ret; | |
1562 | } | |
1563 | EXPORT_SYMBOL_GPL(eeh_pe_get_state); | |
1564 | ||
316233ff GS |
1565 | static int eeh_pe_reenable_devices(struct eeh_pe *pe) |
1566 | { | |
1567 | struct eeh_dev *edev, *tmp; | |
1568 | struct pci_dev *pdev; | |
1569 | int ret = 0; | |
1570 | ||
1571 | /* Restore config space */ | |
1572 | eeh_pe_restore_bars(pe); | |
1573 | ||
1574 | /* | |
1575 | * Reenable PCI devices as the devices passed | |
1576 | * through are always enabled before the reset. | |
1577 | */ | |
1578 | eeh_pe_for_each_dev(pe, edev, tmp) { | |
1579 | pdev = eeh_dev_to_pci_dev(edev); | |
1580 | if (!pdev) | |
1581 | continue; | |
1582 | ||
1583 | ret = pci_reenable_device(pdev); | |
1584 | if (ret) { | |
1585 | pr_warn("%s: Failure %d reenabling %s\n", | |
1586 | __func__, ret, pci_name(pdev)); | |
1587 | return ret; | |
1588 | } | |
1589 | } | |
1590 | ||
1591 | /* The PE is still in frozen state */ | |
c9dd0143 | 1592 | return eeh_unfreeze_pe(pe, true); |
316233ff GS |
1593 | } |
1594 | ||
6654c936 | 1595 | |
212d16cd GS |
1596 | /** |
1597 | * eeh_pe_reset - Issue PE reset according to specified type | |
1598 | * @pe: EEH PE | |
1599 | * @option: reset type | |
1600 | * | |
1601 | * The routine is called to reset the specified PE with the | |
1602 | * indicated type, either fundamental reset or hot reset. | |
1603 | * PE reset is the most important part for error recovery. | |
1604 | */ | |
1605 | int eeh_pe_reset(struct eeh_pe *pe, int option) | |
1606 | { | |
1607 | int ret = 0; | |
1608 | ||
1609 | /* Invalid PE ? */ | |
1610 | if (!pe) | |
1611 | return -ENODEV; | |
1612 | ||
1613 | if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset) | |
1614 | return -ENOENT; | |
1615 | ||
1616 | switch (option) { | |
1617 | case EEH_RESET_DEACTIVATE: | |
1618 | ret = eeh_ops->reset(pe, option); | |
8a6b3710 | 1619 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
212d16cd GS |
1620 | if (ret) |
1621 | break; | |
1622 | ||
316233ff | 1623 | ret = eeh_pe_reenable_devices(pe); |
212d16cd GS |
1624 | break; |
1625 | case EEH_RESET_HOT: | |
1626 | case EEH_RESET_FUNDAMENTAL: | |
0d5ee520 GS |
1627 | /* |
1628 | * Proactively freeze the PE to drop all MMIO access | |
1629 | * during reset, which should be banned as it's always | |
1630 | * cause recursive EEH error. | |
1631 | */ | |
1632 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | |
1633 | ||
8a6b3710 | 1634 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
212d16cd GS |
1635 | ret = eeh_ops->reset(pe, option); |
1636 | break; | |
1637 | default: | |
1638 | pr_debug("%s: Unsupported option %d\n", | |
1639 | __func__, option); | |
1640 | ret = -EINVAL; | |
1641 | } | |
1642 | ||
1643 | return ret; | |
1644 | } | |
1645 | EXPORT_SYMBOL_GPL(eeh_pe_reset); | |
1646 | ||
1647 | /** | |
1648 | * eeh_pe_configure - Configure PCI bridges after PE reset | |
1649 | * @pe: EEH PE | |
1650 | * | |
1651 | * The routine is called to restore the PCI config space for | |
1652 | * those PCI devices, especially PCI bridges affected by PE | |
1653 | * reset issued previously. | |
1654 | */ | |
1655 | int eeh_pe_configure(struct eeh_pe *pe) | |
1656 | { | |
1657 | int ret = 0; | |
1658 | ||
1659 | /* Invalid PE ? */ | |
1660 | if (!pe) | |
1661 | return -ENODEV; | |
1662 | ||
212d16cd GS |
1663 | return ret; |
1664 | } | |
1665 | EXPORT_SYMBOL_GPL(eeh_pe_configure); | |
1666 | ||
ec33d36e GS |
1667 | /** |
1668 | * eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE | |
1669 | * @pe: the indicated PE | |
1670 | * @type: error type | |
1671 | * @function: error function | |
1672 | * @addr: address | |
1673 | * @mask: address mask | |
1674 | * | |
1675 | * The routine is called to inject the specified PCI error, which | |
1676 | * is determined by @type and @function, to the indicated PE for | |
1677 | * testing purpose. | |
1678 | */ | |
1679 | int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, | |
1680 | unsigned long addr, unsigned long mask) | |
1681 | { | |
1682 | /* Invalid PE ? */ | |
1683 | if (!pe) | |
1684 | return -ENODEV; | |
1685 | ||
1686 | /* Unsupported operation ? */ | |
1687 | if (!eeh_ops || !eeh_ops->err_inject) | |
1688 | return -ENOENT; | |
1689 | ||
1690 | /* Check on PCI error type */ | |
1691 | if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) | |
1692 | return -EINVAL; | |
1693 | ||
1694 | /* Check on PCI error function */ | |
1695 | if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX) | |
1696 | return -EINVAL; | |
1697 | ||
1698 | return eeh_ops->err_inject(pe, type, func, addr, mask); | |
1699 | } | |
1700 | EXPORT_SYMBOL_GPL(eeh_pe_inject_err); | |
1701 | ||
1da177e4 LT |
1702 | static int proc_eeh_show(struct seq_file *m, void *v) |
1703 | { | |
2ec5a0ad | 1704 | if (!eeh_enabled()) { |
1da177e4 | 1705 | seq_printf(m, "EEH Subsystem is globally disabled\n"); |
e575f8db | 1706 | seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); |
1da177e4 LT |
1707 | } else { |
1708 | seq_printf(m, "EEH Subsystem is enabled\n"); | |
177bc936 | 1709 | seq_printf(m, |
e575f8db GS |
1710 | "no device=%llu\n" |
1711 | "no device node=%llu\n" | |
1712 | "no config address=%llu\n" | |
1713 | "check not wanted=%llu\n" | |
1714 | "eeh_total_mmio_ffs=%llu\n" | |
1715 | "eeh_false_positives=%llu\n" | |
1716 | "eeh_slot_resets=%llu\n", | |
1717 | eeh_stats.no_device, | |
1718 | eeh_stats.no_dn, | |
1719 | eeh_stats.no_cfg_addr, | |
1720 | eeh_stats.ignored_check, | |
1721 | eeh_stats.total_mmio_ffs, | |
1722 | eeh_stats.false_positives, | |
1723 | eeh_stats.slot_resets); | |
1da177e4 LT |
1724 | } |
1725 | ||
1726 | return 0; | |
1727 | } | |
1728 | ||
1729 | static int proc_eeh_open(struct inode *inode, struct file *file) | |
1730 | { | |
1731 | return single_open(file, proc_eeh_show, NULL); | |
1732 | } | |
1733 | ||
5dfe4c96 | 1734 | static const struct file_operations proc_eeh_operations = { |
1da177e4 LT |
1735 | .open = proc_eeh_open, |
1736 | .read = seq_read, | |
1737 | .llseek = seq_lseek, | |
1738 | .release = single_release, | |
1739 | }; | |
1740 | ||
7f52a526 GS |
1741 | #ifdef CONFIG_DEBUG_FS |
1742 | static int eeh_enable_dbgfs_set(void *data, u64 val) | |
1743 | { | |
1744 | if (val) | |
05b1721d | 1745 | eeh_clear_flag(EEH_FORCE_DISABLED); |
7f52a526 | 1746 | else |
05b1721d | 1747 | eeh_add_flag(EEH_FORCE_DISABLED); |
7f52a526 GS |
1748 | |
1749 | /* Notify the backend */ | |
1750 | if (eeh_ops->post_init) | |
1751 | eeh_ops->post_init(); | |
1752 | ||
1753 | return 0; | |
1754 | } | |
1755 | ||
1756 | static int eeh_enable_dbgfs_get(void *data, u64 *val) | |
1757 | { | |
1758 | if (eeh_enabled()) | |
1759 | *val = 0x1ul; | |
1760 | else | |
1761 | *val = 0x0ul; | |
1762 | return 0; | |
1763 | } | |
1764 | ||
1b28f170 GS |
1765 | static int eeh_freeze_dbgfs_set(void *data, u64 val) |
1766 | { | |
1767 | eeh_max_freezes = val; | |
1768 | return 0; | |
1769 | } | |
1770 | ||
1771 | static int eeh_freeze_dbgfs_get(void *data, u64 *val) | |
1772 | { | |
1773 | *val = eeh_max_freezes; | |
1774 | return 0; | |
1775 | } | |
1776 | ||
7f52a526 GS |
1777 | DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, |
1778 | eeh_enable_dbgfs_set, "0x%llx\n"); | |
1b28f170 GS |
1779 | DEFINE_SIMPLE_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get, |
1780 | eeh_freeze_dbgfs_set, "0x%llx\n"); | |
7f52a526 GS |
1781 | #endif |
1782 | ||
1da177e4 LT |
1783 | static int __init eeh_init_proc(void) |
1784 | { | |
7f52a526 | 1785 | if (machine_is(pseries) || machine_is(powernv)) { |
8feaa434 | 1786 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); |
7f52a526 GS |
1787 | #ifdef CONFIG_DEBUG_FS |
1788 | debugfs_create_file("eeh_enable", 0600, | |
1789 | powerpc_debugfs_root, NULL, | |
1790 | &eeh_enable_dbgfs_ops); | |
1b28f170 GS |
1791 | debugfs_create_file("eeh_max_freezes", 0600, |
1792 | powerpc_debugfs_root, NULL, | |
1793 | &eeh_freeze_dbgfs_ops); | |
7f52a526 GS |
1794 | #endif |
1795 | } | |
1796 | ||
1da177e4 LT |
1797 | return 0; |
1798 | } | |
1799 | __initcall(eeh_init_proc); |