]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kernel/eeh_driver.c
powerpc/ioda: Set "read" permission when "write" is set
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / eeh_driver.c
CommitLineData
77bd7415
LV
1/*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3c8c90ab
LV
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
77bd7415
LV
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
3c8c90ab 23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
77bd7415
LV
24 */
25#include <linux/delay.h>
77bd7415 26#include <linux/interrupt.h>
ac325acd 27#include <linux/irq.h>
feadf7c0 28#include <linux/module.h>
77bd7415
LV
29#include <linux/pci.h>
30#include <asm/eeh.h>
31#include <asm/eeh_event.h>
32#include <asm/ppc-pci.h>
33#include <asm/pci-bridge.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36
29f8bf1b
GS
37/**
38 * eeh_pcid_name - Retrieve name of PCI device driver
39 * @pdev: PCI device
40 *
41 * This routine is used to retrieve the name of PCI device driver
42 * if that's valid.
43 */
40a7cd92 44static inline const char *eeh_pcid_name(struct pci_dev *pdev)
77bd7415 45{
273d2803 46 if (pdev && pdev->dev.driver)
77bd7415
LV
47 return pdev->dev.driver->name;
48 return "";
49}
50
feadf7c0
GS
51/**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
60static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61{
62 if (!pdev || !pdev->driver)
63 return NULL;
64
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
67
68 return pdev->driver;
69}
70
71/**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
78static inline void eeh_pcid_put(struct pci_dev *pdev)
79{
80 if (!pdev || !pdev->driver)
81 return;
82
83 module_put(pdev->driver->driver.owner);
84}
85
8535ef05 86/**
29f8bf1b
GS
87 * eeh_disable_irq - Disable interrupt for the recovering device
88 * @dev: PCI device
89 *
90 * This routine must be called when reporting temporary or permanent
91 * error to the particular PCI device to disable interrupt of that
92 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
93 * do real work because EEH should freeze DMA transfers for those PCI
94 * devices encountering EEH errors, which includes MSI or MSI-X.
8535ef05
MM
95 */
96static void eeh_disable_irq(struct pci_dev *dev)
97{
40a7cd92 98 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
8535ef05
MM
99
100 /* Don't disable MSI and MSI-X interrupts. They are
101 * effectively disabled by the DMA Stopped state
102 * when an EEH error occurs.
29f8bf1b 103 */
8535ef05
MM
104 if (dev->msi_enabled || dev->msix_enabled)
105 return;
106
59e3f837 107 if (!irq_has_action(dev->irq))
8535ef05
MM
108 return;
109
dbbceee1 110 edev->mode |= EEH_DEV_IRQ_DISABLED;
8535ef05
MM
111 disable_irq_nosync(dev->irq);
112}
113
114/**
29f8bf1b
GS
115 * eeh_enable_irq - Enable interrupt for the recovering device
116 * @dev: PCI device
117 *
118 * This routine must be called to enable interrupt while failed
119 * device could be resumed.
8535ef05
MM
120 */
121static void eeh_enable_irq(struct pci_dev *dev)
122{
40a7cd92 123 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
8535ef05 124
dbbceee1
GS
125 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
126 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
b8a9a11b
TG
127 /*
128 * FIXME !!!!!
129 *
130 * This is just ass backwards. This maze has
131 * unbalanced irq_enable/disable calls. So instead of
132 * finding the root cause it works around the warning
133 * in the irq_enable code by conditionally calling
134 * into it.
135 *
136 * That's just wrong.The warning in the core code is
137 * there to tell people to fix their assymetries in
138 * their own code, not by abusing the core information
139 * to avoid it.
140 *
141 * I so wish that the assymetry would be the other way
142 * round and a few more irq_disable calls render that
143 * shit unusable forever.
144 *
145 * tglx
146 */
57310c3c 147 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
91150af3 148 enable_irq(dev->irq);
57310c3c 149 }
8535ef05
MM
150}
151
d2b0f6f7
GS
152static bool eeh_dev_removed(struct eeh_dev *edev)
153{
154 /* EEH device removed ? */
155 if (!edev || (edev->mode & EEH_DEV_REMOVED))
156 return true;
157
158 return false;
159}
160
5cfb20b9
GS
161static void *eeh_dev_save_state(void *data, void *userdata)
162{
163 struct eeh_dev *edev = data;
164 struct pci_dev *pdev;
165
166 if (!edev)
167 return NULL;
168
169 pdev = eeh_dev_to_pci_dev(edev);
170 if (!pdev)
171 return NULL;
172
173 pci_save_state(pdev);
174 return NULL;
175}
176
cb5b5624 177/**
29f8bf1b 178 * eeh_report_error - Report pci error to each device driver
9b3c76f0 179 * @data: eeh device
29f8bf1b 180 * @userdata: return value
a84f273c
GS
181 *
182 * Report an EEH error to each device driver, collect up and
183 * merge the device driver responses. Cumulative response
cb5b5624 184 * passed back in "userdata".
77bd7415 185 */
9b3c76f0 186static void *eeh_report_error(void *data, void *userdata)
77bd7415 187{
9b3c76f0
GS
188 struct eeh_dev *edev = (struct eeh_dev *)data;
189 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
18eb3b39 190 enum pci_ers_result rc, *res = userdata;
feadf7c0 191 struct pci_driver *driver;
77bd7415 192
d2b0f6f7
GS
193 if (!dev || eeh_dev_removed(edev))
194 return NULL;
77bd7415
LV
195 dev->error_state = pci_channel_io_frozen;
196
feadf7c0
GS
197 driver = eeh_pcid_get(dev);
198 if (!driver) return NULL;
77bd7415 199
8535ef05
MM
200 eeh_disable_irq(dev);
201
6a1ca373 202 if (!driver->err_handler ||
feadf7c0
GS
203 !driver->err_handler->error_detected) {
204 eeh_pcid_put(dev);
9b3c76f0 205 return NULL;
feadf7c0 206 }
77bd7415 207
29f8bf1b 208 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
2a50f144
LV
209
210 /* A driver that needs a reset trumps all others */
211 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
18eb3b39 212 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
70298c6e 213
feadf7c0 214 eeh_pcid_put(dev);
9b3c76f0 215 return NULL;
6a1ca373
LV
216}
217
218/**
29f8bf1b 219 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
9b3c76f0 220 * @data: eeh device
29f8bf1b 221 * @userdata: return value
6a1ca373 222 *
638799b3
LV
223 * Tells each device driver that IO ports, MMIO and config space I/O
224 * are now enabled. Collects up and merges the device driver responses.
225 * Cumulative response passed back in "userdata".
6a1ca373 226 */
9b3c76f0 227static void *eeh_report_mmio_enabled(void *data, void *userdata)
6a1ca373 228{
9b3c76f0
GS
229 struct eeh_dev *edev = (struct eeh_dev *)data;
230 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
6a1ca373 231 enum pci_ers_result rc, *res = userdata;
9b3c76f0 232 struct pci_driver *driver;
6a1ca373 233
d2b0f6f7
GS
234 if (!dev || eeh_dev_removed(edev))
235 return NULL;
236
feadf7c0
GS
237 driver = eeh_pcid_get(dev);
238 if (!driver) return NULL;
9b3c76f0 239
feadf7c0 240 if (!driver->err_handler ||
f26c7a03
GS
241 !driver->err_handler->mmio_enabled ||
242 (edev->mode & EEH_DEV_NO_HANDLER)) {
feadf7c0 243 eeh_pcid_put(dev);
9b3c76f0 244 return NULL;
feadf7c0 245 }
6a1ca373 246
29f8bf1b 247 rc = driver->err_handler->mmio_enabled(dev);
2a50f144
LV
248
249 /* A driver that needs a reset trumps all others */
250 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
6a1ca373 251 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
70298c6e 252
feadf7c0 253 eeh_pcid_put(dev);
9b3c76f0 254 return NULL;
77bd7415
LV
255}
256
cb5b5624 257/**
29f8bf1b 258 * eeh_report_reset - Tell device that slot has been reset
9b3c76f0 259 * @data: eeh device
29f8bf1b
GS
260 * @userdata: return value
261 *
262 * This routine must be called while EEH tries to reset particular
263 * PCI device so that the associated PCI device driver could take
264 * some actions, usually to save data the driver needs so that the
265 * driver can work again while the device is recovered.
77bd7415 266 */
9b3c76f0 267static void *eeh_report_reset(void *data, void *userdata)
77bd7415 268{
9b3c76f0
GS
269 struct eeh_dev *edev = (struct eeh_dev *)data;
270 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
6a1ca373 271 enum pci_ers_result rc, *res = userdata;
9b3c76f0 272 struct pci_driver *driver;
77bd7415 273
d2b0f6f7
GS
274 if (!dev || eeh_dev_removed(edev))
275 return NULL;
c58dc575
MM
276 dev->error_state = pci_channel_io_normal;
277
feadf7c0
GS
278 driver = eeh_pcid_get(dev);
279 if (!driver) return NULL;
280
8535ef05
MM
281 eeh_enable_irq(dev);
282
6a1ca373 283 if (!driver->err_handler ||
f26c7a03
GS
284 !driver->err_handler->slot_reset ||
285 (edev->mode & EEH_DEV_NO_HANDLER)) {
feadf7c0 286 eeh_pcid_put(dev);
9b3c76f0 287 return NULL;
feadf7c0 288 }
77bd7415 289
6a1ca373 290 rc = driver->err_handler->slot_reset(dev);
5794dbcb
LV
291 if ((*res == PCI_ERS_RESULT_NONE) ||
292 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
6a1ca373
LV
293 if (*res == PCI_ERS_RESULT_DISCONNECT &&
294 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
70298c6e 295
feadf7c0 296 eeh_pcid_put(dev);
9b3c76f0 297 return NULL;
77bd7415
LV
298}
299
5cfb20b9
GS
300static void *eeh_dev_restore_state(void *data, void *userdata)
301{
302 struct eeh_dev *edev = data;
303 struct pci_dev *pdev;
304
305 if (!edev)
306 return NULL;
307
308 pdev = eeh_dev_to_pci_dev(edev);
309 if (!pdev)
310 return NULL;
311
312 pci_restore_state(pdev);
313 return NULL;
314}
315
cb5b5624 316/**
29f8bf1b 317 * eeh_report_resume - Tell device to resume normal operations
9b3c76f0 318 * @data: eeh device
29f8bf1b
GS
319 * @userdata: return value
320 *
321 * This routine must be called to notify the device driver that it
322 * could resume so that the device driver can do some initialization
323 * to make the recovered device work again.
cb5b5624 324 */
9b3c76f0 325static void *eeh_report_resume(void *data, void *userdata)
77bd7415 326{
9b3c76f0
GS
327 struct eeh_dev *edev = (struct eeh_dev *)data;
328 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
329 struct pci_driver *driver;
330
d2b0f6f7
GS
331 if (!dev || eeh_dev_removed(edev))
332 return NULL;
77bd7415
LV
333 dev->error_state = pci_channel_io_normal;
334
feadf7c0
GS
335 driver = eeh_pcid_get(dev);
336 if (!driver) return NULL;
d0e70341 337
8535ef05
MM
338 eeh_enable_irq(dev);
339
d0e70341 340 if (!driver->err_handler ||
f26c7a03
GS
341 !driver->err_handler->resume ||
342 (edev->mode & EEH_DEV_NO_HANDLER)) {
343 edev->mode &= ~EEH_DEV_NO_HANDLER;
feadf7c0 344 eeh_pcid_put(dev);
9b3c76f0 345 return NULL;
feadf7c0 346 }
77bd7415
LV
347
348 driver->err_handler->resume(dev);
70298c6e 349
feadf7c0 350 eeh_pcid_put(dev);
9b3c76f0 351 return NULL;
77bd7415
LV
352}
353
cb5b5624 354/**
29f8bf1b 355 * eeh_report_failure - Tell device driver that device is dead.
9b3c76f0 356 * @data: eeh device
29f8bf1b 357 * @userdata: return value
cb5b5624
LV
358 *
359 * This informs the device driver that the device is permanently
360 * dead, and that no further recovery attempts will be made on it.
361 */
9b3c76f0 362static void *eeh_report_failure(void *data, void *userdata)
77bd7415 363{
9b3c76f0
GS
364 struct eeh_dev *edev = (struct eeh_dev *)data;
365 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
366 struct pci_driver *driver;
367
d2b0f6f7
GS
368 if (!dev || eeh_dev_removed(edev))
369 return NULL;
77bd7415
LV
370 dev->error_state = pci_channel_io_perm_failure;
371
feadf7c0
GS
372 driver = eeh_pcid_get(dev);
373 if (!driver) return NULL;
77bd7415 374
8535ef05
MM
375 eeh_disable_irq(dev);
376
377 if (!driver->err_handler ||
feadf7c0
GS
378 !driver->err_handler->error_detected) {
379 eeh_pcid_put(dev);
9b3c76f0 380 return NULL;
feadf7c0 381 }
8535ef05 382
77bd7415 383 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
70298c6e 384
feadf7c0 385 eeh_pcid_put(dev);
9b3c76f0 386 return NULL;
77bd7415
LV
387}
388
f5c57710
GS
389static void *eeh_rmv_device(void *data, void *userdata)
390{
391 struct pci_driver *driver;
392 struct eeh_dev *edev = (struct eeh_dev *)data;
393 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
394 int *removed = (int *)userdata;
395
396 /*
397 * Actually, we should remove the PCI bridges as well.
398 * However, that's lots of complexity to do that,
399 * particularly some of devices under the bridge might
400 * support EEH. So we just care about PCI devices for
401 * simplicity here.
402 */
403 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
404 return NULL;
8cc6b6cd 405
d2b0f6f7
GS
406 /*
407 * We rely on count-based pcibios_release_device() to
408 * detach permanently offlined PEs. Unfortunately, that's
409 * not reliable enough. We might have the permanently
410 * offlined PEs attached, but we needn't take care of
411 * them and their child devices.
412 */
413 if (eeh_dev_removed(edev))
414 return NULL;
415
f5c57710 416 driver = eeh_pcid_get(dev);
8cc6b6cd
TLSC
417 if (driver) {
418 eeh_pcid_put(dev);
f2da4ccf
GS
419 if (driver->err_handler &&
420 driver->err_handler->error_detected &&
421 driver->err_handler->slot_reset &&
422 driver->err_handler->resume)
8cc6b6cd
TLSC
423 return NULL;
424 }
f5c57710
GS
425
426 /* Remove it from PCI subsystem */
427 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
428 pci_name(dev));
429 edev->bus = dev->bus;
430 edev->mode |= EEH_DEV_DISCONNECTED;
431 (*removed)++;
432
1c2042c8 433 pci_lock_rescan_remove();
f5c57710 434 pci_stop_and_remove_bus_device(dev);
1c2042c8 435 pci_unlock_rescan_remove();
f5c57710
GS
436
437 return NULL;
438}
439
440static void *eeh_pe_detach_dev(void *data, void *userdata)
441{
442 struct eeh_pe *pe = (struct eeh_pe *)data;
443 struct eeh_dev *edev, *tmp;
444
445 eeh_pe_for_each_dev(pe, edev, tmp) {
446 if (!(edev->mode & EEH_DEV_DISCONNECTED))
447 continue;
448
449 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
450 eeh_rmv_from_parent_pe(edev);
451 }
452
453 return NULL;
454}
455
78954700
GS
456/*
457 * Explicitly clear PE's frozen state for PowerNV where
458 * we have frozen PE until BAR restore is completed. It's
459 * harmless to clear it for pSeries. To be consistent with
460 * PE reset (for 3 times), we try to clear the frozen state
461 * for 3 times as well.
462 */
2c665992 463static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
78954700 464{
2c665992 465 struct eeh_pe *pe = (struct eeh_pe *)data;
5cfb20b9 466 bool *clear_sw_state = flag;
c9dd0143 467 int i, rc = 1;
78954700 468
c9dd0143 469 for (i = 0; rc && i < 3; i++)
5cfb20b9 470 rc = eeh_unfreeze_pe(pe, clear_sw_state);
78954700 471
c9dd0143 472 /* Stop immediately on any errors */
2c665992 473 if (rc) {
c9dd0143
GS
474 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
475 __func__, rc, pe->phb->global_number, pe->addr);
2c665992
GS
476 return (void *)pe;
477 }
478
479 return NULL;
480}
481
5cfb20b9
GS
482static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
483 bool clear_sw_state)
2c665992
GS
484{
485 void *rc;
486
5cfb20b9 487 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
2c665992 488 if (!rc)
78954700
GS
489 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
490
2c665992 491 return rc ? -EIO : 0;
78954700
GS
492}
493
5cfb20b9
GS
494int eeh_pe_reset_and_recover(struct eeh_pe *pe)
495{
496 int result, ret;
497
498 /* Bail if the PE is being recovered */
499 if (pe->state & EEH_PE_RECOVERING)
500 return 0;
501
502 /* Put the PE into recovery mode */
503 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
504
505 /* Save states */
506 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
507
508 /* Report error */
509 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
510
511 /* Issue reset */
5cfb20b9
GS
512 ret = eeh_reset_pe(pe);
513 if (ret) {
28bf36f9 514 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
5cfb20b9
GS
515 return ret;
516 }
5cfb20b9
GS
517
518 /* Unfreeze the PE */
519 ret = eeh_clear_pe_frozen_state(pe, true);
520 if (ret) {
521 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
522 return ret;
523 }
524
525 /* Notify completion of reset */
526 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
527
528 /* Restore device state */
529 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
530
531 /* Resume */
532 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
533
534 /* Clear recovery mode */
535 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
536
537 return 0;
538}
539
77bd7415 540/**
29f8bf1b 541 * eeh_reset_device - Perform actual reset of a pci slot
9b3c76f0 542 * @pe: EEH PE
29f8bf1b 543 * @bus: PCI bus corresponding to the isolcated slot
77bd7415 544 *
29f8bf1b
GS
545 * This routine must be called to do reset on the indicated PE.
546 * During the reset, udev might be invoked because those affected
547 * PCI devices will be removed and then added.
77bd7415 548 */
9b3c76f0 549static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
77bd7415 550{
f5c57710 551 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
5a71978e 552 struct timeval tstamp;
f5c57710 553 int cnt, rc, removed = 0;
42405456
LV
554
555 /* pcibios will clear the counter; save the value */
9b3c76f0 556 cnt = pe->freeze_count;
5a71978e 557 tstamp = pe->tstamp;
42405456 558
20ee6a97
GS
559 /*
560 * We don't remove the corresponding PE instances because
561 * we need the information afterwords. The attached EEH
562 * devices are expected to be attached soon when calling
563 * into pcibios_add_pci_devices().
564 */
f5c57710 565 eeh_pe_state_mark(pe, EEH_PE_KEEP);
1c2042c8 566 if (bus) {
05ba75f8 567 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
1c2042c8 568 pci_lock_rescan_remove();
807a827d 569 pcibios_remove_pci_devices(bus);
1c2042c8
RW
570 pci_unlock_rescan_remove();
571 } else if (frozen_bus) {
f5c57710 572 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
1c2042c8 573 }
77bd7415 574
d0914f50
GS
575 /*
576 * Reset the pci controller. (Asserts RST#; resets config space).
b6495c0c 577 * Reconfigure bridges and devices. Don't try to bring the system
29f8bf1b 578 * up if the reset failed for some reason.
d0914f50
GS
579 *
580 * During the reset, it's very dangerous to have uncontrolled PCI
581 * config accesses. So we prefer to block them. However, controlled
582 * PCI config accesses initiated from EEH itself are allowed.
29f8bf1b 583 */
9b3c76f0 584 rc = eeh_reset_pe(pe);
28bf36f9 585 if (rc)
b6495c0c 586 return rc;
77bd7415 587
1c2042c8
RW
588 pci_lock_rescan_remove();
589
9b3c76f0
GS
590 /* Restore PE */
591 eeh_ops->configure_bridge(pe);
592 eeh_pe_restore_bars(pe);
77bd7415 593
dc9c41bd
AD
594 /* Clear frozen state */
595 rc = eeh_clear_pe_frozen_state(pe, false);
596 if (rc)
597 return rc;
78954700 598
77bd7415 599 /* Give the system 5 seconds to finish running the user-space
a84f273c
GS
600 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
601 * this is a hack, but if we don't do this, and try to bring
602 * the device up before the scripts have taken it down,
77bd7415
LV
603 * potentially weird things happen.
604 */
605 if (bus) {
f5c57710 606 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
29f8bf1b 607 ssleep(5);
f5c57710
GS
608
609 /*
610 * The EEH device is still connected with its parent
611 * PE. We should disconnect it so the binding can be
612 * rebuilt when adding PCI devices.
613 */
614 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
77bd7415 615 pcibios_add_pci_devices(bus);
f5c57710
GS
616 } else if (frozen_bus && removed) {
617 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
618 ssleep(5);
619
620 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
621 pcibios_add_pci_devices(frozen_bus);
77bd7415 622 }
f5c57710 623 eeh_pe_state_clear(pe, EEH_PE_KEEP);
5a71978e
GS
624
625 pe->tstamp = tstamp;
9b3c76f0 626 pe->freeze_count = cnt;
b6495c0c 627
1c2042c8 628 pci_unlock_rescan_remove();
b6495c0c 629 return 0;
77bd7415
LV
630}
631
632/* The longest amount of time to wait for a pci device
633 * to come back on line, in seconds.
634 */
fb48dc22 635#define MAX_WAIT_FOR_RECOVERY 300
77bd7415 636
8a6b1bc7 637static void eeh_handle_normal_event(struct eeh_pe *pe)
77bd7415 638{
77bd7415 639 struct pci_bus *frozen_bus;
b6495c0c 640 int rc = 0;
18eb3b39 641 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
77bd7415 642
9b3c76f0 643 frozen_bus = eeh_pe_bus_get(pe);
77bd7415 644 if (!frozen_bus) {
9b3c76f0
GS
645 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
646 __func__, pe->phb->global_number, pe->addr);
647 return;
77bd7415
LV
648 }
649
5a71978e 650 eeh_pe_update_time_stamp(pe);
9b3c76f0 651 pe->freeze_count++;
1b28f170 652 if (pe->freeze_count > eeh_max_freezes)
8df83028 653 goto excess_failures;
0dae2743 654 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
9b3c76f0 655 pe->freeze_count);
77bd7415
LV
656
657 /* Walk the various device drivers attached to this slot through
658 * a reset sequence, giving each an opportunity to do what it needs
659 * to accomplish the reset. Each child gets a report of the
660 * status ... if any child can't handle the reset, then the entire
661 * slot is dlpar removed and added.
8234fced
GS
662 *
663 * When the PHB is fenced, we have to issue a reset to recover from
664 * the error. Override the result if necessary to have partially
665 * hotplug for this case.
77bd7415 666 */
56ca4fde 667 pr_info("EEH: Notify device drivers to shutdown\n");
9b3c76f0 668 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
8234fced
GS
669 if ((pe->type & EEH_PE_PHB) &&
670 result != PCI_ERS_RESULT_NONE &&
671 result != PCI_ERS_RESULT_NEED_RESET)
672 result = PCI_ERS_RESULT_NEED_RESET;
77bd7415 673
5f1a7c81 674 /* Get the current PCI slot state. This can take a long time,
2ac3990c 675 * sometimes over 300 seconds for certain systems.
29f8bf1b 676 */
9b3c76f0 677 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
eb594a47 678 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
0dae2743 679 pr_warn("EEH: Permanent failure\n");
5f1a7c81
LV
680 goto hard_fail;
681 }
682
ede8ca26
LV
683 /* Since rtas may enable MMIO when posting the error log,
684 * don't post the error log until after all dev drivers
17213c3b
LV
685 * have been informed.
686 */
56ca4fde 687 pr_info("EEH: Collect temporary log\n");
9b3c76f0 688 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
ede8ca26 689
77bd7415
LV
690 /* If all device drivers were EEH-unaware, then shut
691 * down all of the device drivers, and hope they
692 * go down willingly, without panicing the system.
693 */
18eb3b39 694 if (result == PCI_ERS_RESULT_NONE) {
56ca4fde 695 pr_info("EEH: Reset with hotplug activity\n");
9b3c76f0 696 rc = eeh_reset_device(pe, frozen_bus);
e0f90b64 697 if (rc) {
0dae2743
GS
698 pr_warn("%s: Unable to reset, err=%d\n",
699 __func__, rc);
b6495c0c 700 goto hard_fail;
e0f90b64 701 }
77bd7415
LV
702 }
703
6a1ca373
LV
704 /* If all devices reported they can proceed, then re-enable MMIO */
705 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
56ca4fde 706 pr_info("EEH: Enable I/O for affected devices\n");
9b3c76f0 707 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
6a1ca373 708
fa1be476
LV
709 if (rc < 0)
710 goto hard_fail;
6a1ca373
LV
711 if (rc) {
712 result = PCI_ERS_RESULT_NEED_RESET;
713 } else {
56ca4fde 714 pr_info("EEH: Notify device drivers to resume I/O\n");
9b3c76f0 715 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
6a1ca373 716 }
77bd7415
LV
717 }
718
6a1ca373 719 /* If all devices reported they can proceed, then re-enable DMA */
18eb3b39 720 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
56ca4fde 721 pr_info("EEH: Enabled DMA for affected devices\n");
9b3c76f0 722 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
6a1ca373 723
fa1be476
LV
724 if (rc < 0)
725 goto hard_fail;
35845a78 726 if (rc) {
6a1ca373 727 result = PCI_ERS_RESULT_NEED_RESET;
35845a78
GS
728 } else {
729 /*
730 * We didn't do PE reset for the case. The PE
731 * is still in frozen state. Clear it before
732 * resuming the PE.
733 */
734 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
d0e70341 735 result = PCI_ERS_RESULT_RECOVERED;
35845a78 736 }
6a1ca373
LV
737 }
738
739 /* If any device has a hard failure, then shut off everything. */
e0f90b64 740 if (result == PCI_ERS_RESULT_DISCONNECT) {
0dae2743 741 pr_warn("EEH: Device driver gave up\n");
6a1ca373 742 goto hard_fail;
e0f90b64 743 }
6a1ca373
LV
744
745 /* If any device called out for a reset, then reset the slot */
746 if (result == PCI_ERS_RESULT_NEED_RESET) {
56ca4fde 747 pr_info("EEH: Reset without hotplug activity\n");
9b3c76f0 748 rc = eeh_reset_device(pe, NULL);
e0f90b64 749 if (rc) {
0dae2743
GS
750 pr_warn("%s: Cannot reset, err=%d\n",
751 __func__, rc);
b6495c0c 752 goto hard_fail;
e0f90b64 753 }
56ca4fde
GS
754
755 pr_info("EEH: Notify device drivers "
756 "the completion of reset\n");
6a1ca373 757 result = PCI_ERS_RESULT_NONE;
9b3c76f0 758 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
77bd7415
LV
759 }
760
6a1ca373 761 /* All devices should claim they have recovered by now. */
90fdd613
LV
762 if ((result != PCI_ERS_RESULT_RECOVERED) &&
763 (result != PCI_ERS_RESULT_NONE)) {
0dae2743 764 pr_warn("EEH: Not recovered\n");
6a1ca373 765 goto hard_fail;
e0f90b64 766 }
6a1ca373 767
77bd7415 768 /* Tell all device drivers that they can resume operations */
56ca4fde 769 pr_info("EEH: Notify device driver to resume\n");
9b3c76f0 770 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
b6495c0c 771
9b3c76f0 772 return;
a84f273c 773
8df83028 774excess_failures:
b6495c0c
LV
775 /*
776 * About 90% of all real-life EEH failures in the field
777 * are due to poorly seated PCI cards. Only 10% or so are
778 * due to actual, failed cards.
779 */
9b3c76f0
GS
780 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
781 "last hour and has been permanently disabled.\n"
782 "Please try reseating or replacing it.\n",
783 pe->phb->global_number, pe->addr,
784 pe->freeze_count);
8df83028
LV
785 goto perm_error;
786
787hard_fail:
9b3c76f0
GS
788 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
789 "Please try reseating or replacing it\n",
790 pe->phb->global_number, pe->addr);
b6495c0c 791
8df83028 792perm_error:
9b3c76f0 793 eeh_slot_error_detail(pe, EEH_LOG_PERM);
b6495c0c
LV
794
795 /* Notify all devices that they're about to go down. */
9b3c76f0 796 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
b6495c0c 797
d2b0f6f7 798 /* Mark the PE to be removed permanently */
432227e9 799 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
d2b0f6f7
GS
800
801 /*
802 * Shut down the device drivers for good. We mark
803 * all removed devices correctly to avoid access
804 * the their PCI config any more.
805 */
1c2042c8 806 if (frozen_bus) {
05ba75f8 807 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
d2b0f6f7
GS
808 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
809
1c2042c8 810 pci_lock_rescan_remove();
9b3c76f0 811 pcibios_remove_pci_devices(frozen_bus);
1c2042c8
RW
812 pci_unlock_rescan_remove();
813 }
77bd7415 814}
8a6b1bc7
GS
815
816static void eeh_handle_special_event(void)
817{
818 struct eeh_pe *pe, *phb_pe;
819 struct pci_bus *bus;
7e4e7867 820 struct pci_controller *hose;
8a6b1bc7 821 unsigned long flags;
7e4e7867 822 int rc;
8a6b1bc7 823
8a6b1bc7 824
7e4e7867
GS
825 do {
826 rc = eeh_ops->next_error(&pe);
827
828 switch (rc) {
829 case EEH_NEXT_ERR_DEAD_IOC:
830 /* Mark all PHBs in dead state */
831 eeh_serialize_lock(&flags);
832
833 /* Purge all events */
5c7a35e3 834 eeh_remove_event(NULL, true);
7e4e7867
GS
835
836 list_for_each_entry(hose, &hose_list, list_node) {
837 phb_pe = eeh_phb_pe_get(hose);
838 if (!phb_pe) continue;
839
9e049375 840 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
7e4e7867
GS
841 }
842
843 eeh_serialize_unlock(flags);
844
845 break;
846 case EEH_NEXT_ERR_FROZEN_PE:
847 case EEH_NEXT_ERR_FENCED_PHB:
848 case EEH_NEXT_ERR_DEAD_PHB:
849 /* Mark the PE in fenced state */
850 eeh_serialize_lock(&flags);
851
852 /* Purge all events of the PHB */
5c7a35e3 853 eeh_remove_event(pe, true);
7e4e7867
GS
854
855 if (rc == EEH_NEXT_ERR_DEAD_PHB)
9e049375 856 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
7e4e7867
GS
857 else
858 eeh_pe_state_mark(pe,
859 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
860
861 eeh_serialize_unlock(flags);
862
863 break;
864 case EEH_NEXT_ERR_NONE:
865 return;
866 default:
867 pr_warn("%s: Invalid value %d from next_error()\n",
868 __func__, rc);
869 return;
8a6b1bc7 870 }
8a6b1bc7 871
7e4e7867
GS
872 /*
873 * For fenced PHB and frozen PE, it's handled as normal
874 * event. We have to remove the affected PHBs for dead
875 * PHB and IOC
876 */
877 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
878 rc == EEH_NEXT_ERR_FENCED_PHB) {
879 eeh_handle_normal_event(pe);
9e049375 880 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
7e4e7867 881 } else {
1b17366d 882 pci_lock_rescan_remove();
7e4e7867
GS
883 list_for_each_entry(hose, &hose_list, list_node) {
884 phb_pe = eeh_phb_pe_get(hose);
885 if (!phb_pe ||
9e049375
GS
886 !(phb_pe->state & EEH_PE_ISOLATED) ||
887 (phb_pe->state & EEH_PE_RECOVERING))
7e4e7867
GS
888 continue;
889
890 /* Notify all devices to be down */
05ba75f8 891 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
7e4e7867
GS
892 bus = eeh_pe_bus_get(phb_pe);
893 eeh_pe_dev_traverse(pe,
894 eeh_report_failure, NULL);
895 pcibios_remove_pci_devices(bus);
896 }
1b17366d 897 pci_unlock_rescan_remove();
8a6b1bc7 898 }
7e4e7867
GS
899
900 /*
901 * If we have detected dead IOC, we needn't proceed
902 * any more since all PHBs would have been removed
903 */
904 if (rc == EEH_NEXT_ERR_DEAD_IOC)
905 break;
906 } while (rc != EEH_NEXT_ERR_NONE);
8a6b1bc7
GS
907}
908
909/**
910 * eeh_handle_event - Reset a PCI device after hard lockup.
911 * @pe: EEH PE
912 *
913 * While PHB detects address or data parity errors on particular PCI
914 * slot, the associated PE will be frozen. Besides, DMA's occurring
915 * to wild addresses (which usually happen due to bugs in device
916 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
917 * #PERR or other misc PCI-related errors also can trigger EEH errors.
918 *
919 * Recovery process consists of unplugging the device driver (which
920 * generated hotplug events to userspace), then issuing a PCI #RST to
921 * the device, then reconfiguring the PCI config space for all bridges
922 * & devices under this slot, and then finally restarting the device
923 * drivers (which cause a second set of hotplug events to go out to
924 * userspace).
925 */
926void eeh_handle_event(struct eeh_pe *pe)
927{
928 if (pe)
929 eeh_handle_normal_event(pe);
930 else
931 eeh_handle_special_event();
932}