]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/powerpc/kernel/eeh.c
Merge remote-tracking branches 'spi/topic/mxs', 'spi/topic/pxa', 'spi/topic/rockchip...
[mirror_ubuntu-eoan-kernel.git] / arch / powerpc / kernel / eeh.c
1 /*
2 * Copyright IBM Corporation 2001, 2005, 2006
3 * Copyright Dave Engebretsen & Todd Inglett 2001
4 * Copyright Linas Vepstas 2005, 2006
5 * Copyright 2001-2012 IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
22 */
23
24 #include <linux/delay.h>
25 #include <linux/debugfs.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <linux/list.h>
29 #include <linux/pci.h>
30 #include <linux/iommu.h>
31 #include <linux/proc_fs.h>
32 #include <linux/rbtree.h>
33 #include <linux/reboot.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/of.h>
38
39 #include <linux/atomic.h>
40 #include <asm/debug.h>
41 #include <asm/eeh.h>
42 #include <asm/eeh_event.h>
43 #include <asm/io.h>
44 #include <asm/iommu.h>
45 #include <asm/machdep.h>
46 #include <asm/ppc-pci.h>
47 #include <asm/rtas.h>
48
49
50 /** Overview:
51 * EEH, or "Extended Error Handling" is a PCI bridge technology for
52 * dealing with PCI bus errors that can't be dealt with within the
53 * usual PCI framework, except by check-stopping the CPU. Systems
54 * that are designed for high-availability/reliability cannot afford
55 * to crash due to a "mere" PCI error, thus the need for EEH.
56 * An EEH-capable bridge operates by converting a detected error
57 * into a "slot freeze", taking the PCI adapter off-line, making
58 * the slot behave, from the OS'es point of view, as if the slot
59 * were "empty": all reads return 0xff's and all writes are silently
60 * ignored. EEH slot isolation events can be triggered by parity
61 * errors on the address or data busses (e.g. during posted writes),
62 * which in turn might be caused by low voltage on the bus, dust,
63 * vibration, humidity, radioactivity or plain-old failed hardware.
64 *
65 * Note, however, that one of the leading causes of EEH slot
66 * freeze events are buggy device drivers, buggy device microcode,
67 * or buggy device hardware. This is because any attempt by the
68 * device to bus-master data to a memory address that is not
69 * assigned to the device will trigger a slot freeze. (The idea
70 * is to prevent devices-gone-wild from corrupting system memory).
71 * Buggy hardware/drivers will have a miserable time co-existing
72 * with EEH.
73 *
74 * Ideally, a PCI device driver, when suspecting that an isolation
75 * event has occurred (e.g. by reading 0xff's), will then ask EEH
76 * whether this is the case, and then take appropriate steps to
77 * reset the PCI slot, the PCI device, and then resume operations.
78 * However, until that day, the checking is done here, with the
79 * eeh_check_failure() routine embedded in the MMIO macros. If
80 * the slot is found to be isolated, an "EEH Event" is synthesized
81 * and sent out for processing.
82 */
83
84 /* If a device driver keeps reading an MMIO register in an interrupt
85 * handler after a slot isolation event, it might be broken.
86 * This sets the threshold for how many read attempts we allow
87 * before printing an error message.
88 */
89 #define EEH_MAX_FAILS 2100000
90
91 /* Time to wait for a PCI slot to report status, in milliseconds */
92 #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
93
94 /*
95 * EEH probe mode support, which is part of the flags,
96 * is to support multiple platforms for EEH. Some platforms
97 * like pSeries do PCI emunation based on device tree.
98 * However, other platforms like powernv probe PCI devices
99 * from hardware. The flag is used to distinguish that.
100 * In addition, struct eeh_ops::probe would be invoked for
101 * particular OF node or PCI device so that the corresponding
102 * PE would be created there.
103 */
104 int eeh_subsystem_flags;
105 EXPORT_SYMBOL(eeh_subsystem_flags);
106
107 /* Platform dependent EEH operations */
108 struct eeh_ops *eeh_ops = NULL;
109
110 /* Lock to avoid races due to multiple reports of an error */
111 DEFINE_RAW_SPINLOCK(confirm_error_lock);
112
113 /* Lock to protect passed flags */
114 static DEFINE_MUTEX(eeh_dev_mutex);
115
116 /* Buffer for reporting pci register dumps. Its here in BSS, and
117 * not dynamically alloced, so that it ends up in RMO where RTAS
118 * can access it.
119 */
120 #define EEH_PCI_REGS_LOG_LEN 8192
121 static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
122
123 /*
124 * The struct is used to maintain the EEH global statistic
125 * information. Besides, the EEH global statistics will be
126 * exported to user space through procfs
127 */
128 struct eeh_stats {
129 u64 no_device; /* PCI device not found */
130 u64 no_dn; /* OF node not found */
131 u64 no_cfg_addr; /* Config address not found */
132 u64 ignored_check; /* EEH check skipped */
133 u64 total_mmio_ffs; /* Total EEH checks */
134 u64 false_positives; /* Unnecessary EEH checks */
135 u64 slot_resets; /* PE reset */
136 };
137
138 static struct eeh_stats eeh_stats;
139
140 #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
141
142 static int __init eeh_setup(char *str)
143 {
144 if (!strcmp(str, "off"))
145 eeh_add_flag(EEH_FORCE_DISABLED);
146
147 return 1;
148 }
149 __setup("eeh=", eeh_setup);
150
151 /*
152 * This routine captures assorted PCI configuration space data
153 * for the indicated PCI device, and puts them into a buffer
154 * for RTAS error logging.
155 */
156 static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
157 {
158 struct device_node *dn = eeh_dev_to_of_node(edev);
159 u32 cfg;
160 int cap, i;
161 int n = 0, l = 0;
162 char buffer[128];
163
164 n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
165 pr_warn("EEH: of node=%s\n", dn->full_name);
166
167 eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
168 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
169 pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
170
171 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
172 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
173 pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
174
175 /* Gather bridge-specific registers */
176 if (edev->mode & EEH_DEV_BRIDGE) {
177 eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
178 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
179 pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
180
181 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
182 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
183 pr_warn("EEH: Bridge control: %04x\n", cfg);
184 }
185
186 /* Dump out the PCI-X command and status regs */
187 cap = edev->pcix_cap;
188 if (cap) {
189 eeh_ops->read_config(dn, cap, 4, &cfg);
190 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
191 pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
192
193 eeh_ops->read_config(dn, cap+4, 4, &cfg);
194 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
195 pr_warn("EEH: PCI-X status: %08x\n", cfg);
196 }
197
198 /* If PCI-E capable, dump PCI-E cap 10 */
199 cap = edev->pcie_cap;
200 if (cap) {
201 n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
202 pr_warn("EEH: PCI-E capabilities and status follow:\n");
203
204 for (i=0; i<=8; i++) {
205 eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
206 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
207
208 if ((i % 4) == 0) {
209 if (i != 0)
210 pr_warn("%s\n", buffer);
211
212 l = scnprintf(buffer, sizeof(buffer),
213 "EEH: PCI-E %02x: %08x ",
214 4*i, cfg);
215 } else {
216 l += scnprintf(buffer+l, sizeof(buffer)-l,
217 "%08x ", cfg);
218 }
219
220 }
221
222 pr_warn("%s\n", buffer);
223 }
224
225 /* If AER capable, dump it */
226 cap = edev->aer_cap;
227 if (cap) {
228 n += scnprintf(buf+n, len-n, "pci-e AER:\n");
229 pr_warn("EEH: PCI-E AER capability register set follows:\n");
230
231 for (i=0; i<=13; i++) {
232 eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
233 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
234
235 if ((i % 4) == 0) {
236 if (i != 0)
237 pr_warn("%s\n", buffer);
238
239 l = scnprintf(buffer, sizeof(buffer),
240 "EEH: PCI-E AER %02x: %08x ",
241 4*i, cfg);
242 } else {
243 l += scnprintf(buffer+l, sizeof(buffer)-l,
244 "%08x ", cfg);
245 }
246 }
247
248 pr_warn("%s\n", buffer);
249 }
250
251 return n;
252 }
253
254 static void *eeh_dump_pe_log(void *data, void *flag)
255 {
256 struct eeh_pe *pe = data;
257 struct eeh_dev *edev, *tmp;
258 size_t *plen = flag;
259
260 /* If the PE's config space is blocked, 0xFF's will be
261 * returned. It's pointless to collect the log in this
262 * case.
263 */
264 if (pe->state & EEH_PE_CFG_BLOCKED)
265 return NULL;
266
267 eeh_pe_for_each_dev(pe, edev, tmp)
268 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
269 EEH_PCI_REGS_LOG_LEN - *plen);
270
271 return NULL;
272 }
273
274 /**
275 * eeh_slot_error_detail - Generate combined log including driver log and error log
276 * @pe: EEH PE
277 * @severity: temporary or permanent error log
278 *
279 * This routine should be called to generate the combined log, which
280 * is comprised of driver log and error log. The driver log is figured
281 * out from the config space of the corresponding PCI device, while
282 * the error log is fetched through platform dependent function call.
283 */
284 void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
285 {
286 size_t loglen = 0;
287
288 /*
289 * When the PHB is fenced or dead, it's pointless to collect
290 * the data from PCI config space because it should return
291 * 0xFF's. For ER, we still retrieve the data from the PCI
292 * config space.
293 *
294 * For pHyp, we have to enable IO for log retrieval. Otherwise,
295 * 0xFF's is always returned from PCI config space.
296 */
297 if (!(pe->type & EEH_PE_PHB)) {
298 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
299 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
300 eeh_ops->configure_bridge(pe);
301 eeh_pe_restore_bars(pe);
302
303 pci_regs_buf[0] = 0;
304 eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
305 }
306
307 eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
308 }
309
310 /**
311 * eeh_token_to_phys - Convert EEH address token to phys address
312 * @token: I/O token, should be address in the form 0xA....
313 *
314 * This routine should be called to convert virtual I/O address
315 * to physical one.
316 */
317 static inline unsigned long eeh_token_to_phys(unsigned long token)
318 {
319 pte_t *ptep;
320 unsigned long pa;
321 int hugepage_shift;
322
323 /*
324 * We won't find hugepages here, iomem
325 */
326 ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
327 if (!ptep)
328 return token;
329 WARN_ON(hugepage_shift);
330 pa = pte_pfn(*ptep) << PAGE_SHIFT;
331
332 return pa | (token & (PAGE_SIZE-1));
333 }
334
335 /*
336 * On PowerNV platform, we might already have fenced PHB there.
337 * For that case, it's meaningless to recover frozen PE. Intead,
338 * We have to handle fenced PHB firstly.
339 */
340 static int eeh_phb_check_failure(struct eeh_pe *pe)
341 {
342 struct eeh_pe *phb_pe;
343 unsigned long flags;
344 int ret;
345
346 if (!eeh_has_flag(EEH_PROBE_MODE_DEV))
347 return -EPERM;
348
349 /* Find the PHB PE */
350 phb_pe = eeh_phb_pe_get(pe->phb);
351 if (!phb_pe) {
352 pr_warn("%s Can't find PE for PHB#%d\n",
353 __func__, pe->phb->global_number);
354 return -EEXIST;
355 }
356
357 /* If the PHB has been in problematic state */
358 eeh_serialize_lock(&flags);
359 if (phb_pe->state & EEH_PE_ISOLATED) {
360 ret = 0;
361 goto out;
362 }
363
364 /* Check PHB state */
365 ret = eeh_ops->get_state(phb_pe, NULL);
366 if ((ret < 0) ||
367 (ret == EEH_STATE_NOT_SUPPORT) ||
368 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
369 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
370 ret = 0;
371 goto out;
372 }
373
374 /* Isolate the PHB and send event */
375 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
376 eeh_serialize_unlock(flags);
377
378 pr_err("EEH: PHB#%x failure detected, location: %s\n",
379 phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
380 dump_stack();
381 eeh_send_failure_event(phb_pe);
382
383 return 1;
384 out:
385 eeh_serialize_unlock(flags);
386 return ret;
387 }
388
389 /**
390 * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
391 * @edev: eeh device
392 *
393 * Check for an EEH failure for the given device node. Call this
394 * routine if the result of a read was all 0xff's and you want to
395 * find out if this is due to an EEH slot freeze. This routine
396 * will query firmware for the EEH status.
397 *
398 * Returns 0 if there has not been an EEH error; otherwise returns
399 * a non-zero value and queues up a slot isolation event notification.
400 *
401 * It is safe to call this routine in an interrupt context.
402 */
403 int eeh_dev_check_failure(struct eeh_dev *edev)
404 {
405 int ret;
406 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
407 unsigned long flags;
408 struct device_node *dn;
409 struct pci_dev *dev;
410 struct eeh_pe *pe, *parent_pe, *phb_pe;
411 int rc = 0;
412 const char *location;
413
414 eeh_stats.total_mmio_ffs++;
415
416 if (!eeh_enabled())
417 return 0;
418
419 if (!edev) {
420 eeh_stats.no_dn++;
421 return 0;
422 }
423 dn = eeh_dev_to_of_node(edev);
424 dev = eeh_dev_to_pci_dev(edev);
425 pe = eeh_dev_to_pe(edev);
426
427 /* Access to IO BARs might get this far and still not want checking. */
428 if (!pe) {
429 eeh_stats.ignored_check++;
430 pr_debug("EEH: Ignored check for %s %s\n",
431 eeh_pci_name(dev), dn->full_name);
432 return 0;
433 }
434
435 if (!pe->addr && !pe->config_addr) {
436 eeh_stats.no_cfg_addr++;
437 return 0;
438 }
439
440 /*
441 * On PowerNV platform, we might already have fenced PHB
442 * there and we need take care of that firstly.
443 */
444 ret = eeh_phb_check_failure(pe);
445 if (ret > 0)
446 return ret;
447
448 /*
449 * If the PE isn't owned by us, we shouldn't check the
450 * state. Instead, let the owner handle it if the PE has
451 * been frozen.
452 */
453 if (eeh_pe_passed(pe))
454 return 0;
455
456 /* If we already have a pending isolation event for this
457 * slot, we know it's bad already, we don't need to check.
458 * Do this checking under a lock; as multiple PCI devices
459 * in one slot might report errors simultaneously, and we
460 * only want one error recovery routine running.
461 */
462 eeh_serialize_lock(&flags);
463 rc = 1;
464 if (pe->state & EEH_PE_ISOLATED) {
465 pe->check_count++;
466 if (pe->check_count % EEH_MAX_FAILS == 0) {
467 location = of_get_property(dn, "ibm,loc-code", NULL);
468 printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
469 "location=%s driver=%s pci addr=%s\n",
470 pe->check_count, location,
471 eeh_driver_name(dev), eeh_pci_name(dev));
472 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
473 eeh_driver_name(dev));
474 dump_stack();
475 }
476 goto dn_unlock;
477 }
478
479 /*
480 * Now test for an EEH failure. This is VERY expensive.
481 * Note that the eeh_config_addr may be a parent device
482 * in the case of a device behind a bridge, or it may be
483 * function zero of a multi-function device.
484 * In any case they must share a common PHB.
485 */
486 ret = eeh_ops->get_state(pe, NULL);
487
488 /* Note that config-io to empty slots may fail;
489 * they are empty when they don't have children.
490 * We will punt with the following conditions: Failure to get
491 * PE's state, EEH not support and Permanently unavailable
492 * state, PE is in good state.
493 */
494 if ((ret < 0) ||
495 (ret == EEH_STATE_NOT_SUPPORT) ||
496 ((ret & active_flags) == active_flags)) {
497 eeh_stats.false_positives++;
498 pe->false_positives++;
499 rc = 0;
500 goto dn_unlock;
501 }
502
503 /*
504 * It should be corner case that the parent PE has been
505 * put into frozen state as well. We should take care
506 * that at first.
507 */
508 parent_pe = pe->parent;
509 while (parent_pe) {
510 /* Hit the ceiling ? */
511 if (parent_pe->type & EEH_PE_PHB)
512 break;
513
514 /* Frozen parent PE ? */
515 ret = eeh_ops->get_state(parent_pe, NULL);
516 if (ret > 0 &&
517 (ret & active_flags) != active_flags)
518 pe = parent_pe;
519
520 /* Next parent level */
521 parent_pe = parent_pe->parent;
522 }
523
524 eeh_stats.slot_resets++;
525
526 /* Avoid repeated reports of this failure, including problems
527 * with other functions on this device, and functions under
528 * bridges.
529 */
530 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
531 eeh_serialize_unlock(flags);
532
533 /* Most EEH events are due to device driver bugs. Having
534 * a stack trace will help the device-driver authors figure
535 * out what happened. So print that out.
536 */
537 phb_pe = eeh_phb_pe_get(pe->phb);
538 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
539 pe->phb->global_number, pe->addr);
540 pr_err("EEH: PE location: %s, PHB location: %s\n",
541 eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
542 dump_stack();
543
544 eeh_send_failure_event(pe);
545
546 return 1;
547
548 dn_unlock:
549 eeh_serialize_unlock(flags);
550 return rc;
551 }
552
553 EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
554
555 /**
556 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
557 * @token: I/O address
558 *
559 * Check for an EEH failure at the given I/O address. Call this
560 * routine if the result of a read was all 0xff's and you want to
561 * find out if this is due to an EEH slot freeze event. This routine
562 * will query firmware for the EEH status.
563 *
564 * Note this routine is safe to call in an interrupt context.
565 */
566 int eeh_check_failure(const volatile void __iomem *token)
567 {
568 unsigned long addr;
569 struct eeh_dev *edev;
570
571 /* Finding the phys addr + pci device; this is pretty quick. */
572 addr = eeh_token_to_phys((unsigned long __force) token);
573 edev = eeh_addr_cache_get_dev(addr);
574 if (!edev) {
575 eeh_stats.no_device++;
576 return 0;
577 }
578
579 return eeh_dev_check_failure(edev);
580 }
581 EXPORT_SYMBOL(eeh_check_failure);
582
583
584 /**
585 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
586 * @pe: EEH PE
587 *
588 * This routine should be called to reenable frozen MMIO or DMA
589 * so that it would work correctly again. It's useful while doing
590 * recovery or log collection on the indicated device.
591 */
592 int eeh_pci_enable(struct eeh_pe *pe, int function)
593 {
594 int active_flag, rc;
595
596 /*
597 * pHyp doesn't allow to enable IO or DMA on unfrozen PE.
598 * Also, it's pointless to enable them on unfrozen PE. So
599 * we have to check before enabling IO or DMA.
600 */
601 switch (function) {
602 case EEH_OPT_THAW_MMIO:
603 active_flag = EEH_STATE_MMIO_ACTIVE;
604 break;
605 case EEH_OPT_THAW_DMA:
606 active_flag = EEH_STATE_DMA_ACTIVE;
607 break;
608 case EEH_OPT_DISABLE:
609 case EEH_OPT_ENABLE:
610 case EEH_OPT_FREEZE_PE:
611 active_flag = 0;
612 break;
613 default:
614 pr_warn("%s: Invalid function %d\n",
615 __func__, function);
616 return -EINVAL;
617 }
618
619 /*
620 * Check if IO or DMA has been enabled before
621 * enabling them.
622 */
623 if (active_flag) {
624 rc = eeh_ops->get_state(pe, NULL);
625 if (rc < 0)
626 return rc;
627
628 /* Needn't enable it at all */
629 if (rc == EEH_STATE_NOT_SUPPORT)
630 return 0;
631
632 /* It's already enabled */
633 if (rc & active_flag)
634 return 0;
635 }
636
637
638 /* Issue the request */
639 rc = eeh_ops->set_option(pe, function);
640 if (rc)
641 pr_warn("%s: Unexpected state change %d on "
642 "PHB#%d-PE#%x, err=%d\n",
643 __func__, function, pe->phb->global_number,
644 pe->addr, rc);
645
646 /* Check if the request is finished successfully */
647 if (active_flag) {
648 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
649 if (rc <= 0)
650 return rc;
651
652 if (rc & active_flag)
653 return 0;
654
655 return -EIO;
656 }
657
658 return rc;
659 }
660
661 /**
662 * pcibios_set_pcie_slot_reset - Set PCI-E reset state
663 * @dev: pci device struct
664 * @state: reset state to enter
665 *
666 * Return value:
667 * 0 if success
668 */
669 int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
670 {
671 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
672 struct eeh_pe *pe = eeh_dev_to_pe(edev);
673
674 if (!pe) {
675 pr_err("%s: No PE found on PCI device %s\n",
676 __func__, pci_name(dev));
677 return -EINVAL;
678 }
679
680 switch (state) {
681 case pcie_deassert_reset:
682 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
683 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
684 break;
685 case pcie_hot_reset:
686 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
687 eeh_ops->reset(pe, EEH_RESET_HOT);
688 break;
689 case pcie_warm_reset:
690 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
691 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
692 break;
693 default:
694 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
695 return -EINVAL;
696 };
697
698 return 0;
699 }
700
701 /**
702 * eeh_set_pe_freset - Check the required reset for the indicated device
703 * @data: EEH device
704 * @flag: return value
705 *
706 * Each device might have its preferred reset type: fundamental or
707 * hot reset. The routine is used to collected the information for
708 * the indicated device and its children so that the bunch of the
709 * devices could be reset properly.
710 */
711 static void *eeh_set_dev_freset(void *data, void *flag)
712 {
713 struct pci_dev *dev;
714 unsigned int *freset = (unsigned int *)flag;
715 struct eeh_dev *edev = (struct eeh_dev *)data;
716
717 dev = eeh_dev_to_pci_dev(edev);
718 if (dev)
719 *freset |= dev->needs_freset;
720
721 return NULL;
722 }
723
724 /**
725 * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
726 * @pe: EEH PE
727 *
728 * Assert the PCI #RST line for 1/4 second.
729 */
730 static void eeh_reset_pe_once(struct eeh_pe *pe)
731 {
732 unsigned int freset = 0;
733
734 /* Determine type of EEH reset required for
735 * Partitionable Endpoint, a hot-reset (1)
736 * or a fundamental reset (3).
737 * A fundamental reset required by any device under
738 * Partitionable Endpoint trumps hot-reset.
739 */
740 eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
741
742 if (freset)
743 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
744 else
745 eeh_ops->reset(pe, EEH_RESET_HOT);
746
747 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
748 }
749
750 /**
751 * eeh_reset_pe - Reset the indicated PE
752 * @pe: EEH PE
753 *
754 * This routine should be called to reset indicated device, including
755 * PE. A PE might include multiple PCI devices and sometimes PCI bridges
756 * might be involved as well.
757 */
758 int eeh_reset_pe(struct eeh_pe *pe)
759 {
760 int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
761 int i, rc;
762
763 /* Take three shots at resetting the bus */
764 for (i=0; i<3; i++) {
765 eeh_reset_pe_once(pe);
766
767 /*
768 * EEH_PE_ISOLATED is expected to be removed after
769 * BAR restore.
770 */
771 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
772 if ((rc & flags) == flags)
773 return 0;
774
775 if (rc < 0) {
776 pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
777 __func__, pe->phb->global_number, pe->addr);
778 return -1;
779 }
780 pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
781 i+1, pe->phb->global_number, pe->addr, rc);
782 }
783
784 return -1;
785 }
786
787 /**
788 * eeh_save_bars - Save device bars
789 * @edev: PCI device associated EEH device
790 *
791 * Save the values of the device bars. Unlike the restore
792 * routine, this routine is *not* recursive. This is because
793 * PCI devices are added individually; but, for the restore,
794 * an entire slot is reset at a time.
795 */
796 void eeh_save_bars(struct eeh_dev *edev)
797 {
798 int i;
799 struct device_node *dn;
800
801 if (!edev)
802 return;
803 dn = eeh_dev_to_of_node(edev);
804
805 for (i = 0; i < 16; i++)
806 eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
807
808 /*
809 * For PCI bridges including root port, we need enable bus
810 * master explicitly. Otherwise, it can't fetch IODA table
811 * entries correctly. So we cache the bit in advance so that
812 * we can restore it after reset, either PHB range or PE range.
813 */
814 if (edev->mode & EEH_DEV_BRIDGE)
815 edev->config_space[1] |= PCI_COMMAND_MASTER;
816 }
817
818 /**
819 * eeh_ops_register - Register platform dependent EEH operations
820 * @ops: platform dependent EEH operations
821 *
822 * Register the platform dependent EEH operation callback
823 * functions. The platform should call this function before
824 * any other EEH operations.
825 */
826 int __init eeh_ops_register(struct eeh_ops *ops)
827 {
828 if (!ops->name) {
829 pr_warn("%s: Invalid EEH ops name for %p\n",
830 __func__, ops);
831 return -EINVAL;
832 }
833
834 if (eeh_ops && eeh_ops != ops) {
835 pr_warn("%s: EEH ops of platform %s already existing (%s)\n",
836 __func__, eeh_ops->name, ops->name);
837 return -EEXIST;
838 }
839
840 eeh_ops = ops;
841
842 return 0;
843 }
844
845 /**
846 * eeh_ops_unregister - Unreigster platform dependent EEH operations
847 * @name: name of EEH platform operations
848 *
849 * Unregister the platform dependent EEH operation callback
850 * functions.
851 */
852 int __exit eeh_ops_unregister(const char *name)
853 {
854 if (!name || !strlen(name)) {
855 pr_warn("%s: Invalid EEH ops name\n",
856 __func__);
857 return -EINVAL;
858 }
859
860 if (eeh_ops && !strcmp(eeh_ops->name, name)) {
861 eeh_ops = NULL;
862 return 0;
863 }
864
865 return -EEXIST;
866 }
867
868 static int eeh_reboot_notifier(struct notifier_block *nb,
869 unsigned long action, void *unused)
870 {
871 eeh_clear_flag(EEH_ENABLED);
872 return NOTIFY_DONE;
873 }
874
875 static struct notifier_block eeh_reboot_nb = {
876 .notifier_call = eeh_reboot_notifier,
877 };
878
879 /**
880 * eeh_init - EEH initialization
881 *
882 * Initialize EEH by trying to enable it for all of the adapters in the system.
883 * As a side effect we can determine here if eeh is supported at all.
884 * Note that we leave EEH on so failed config cycles won't cause a machine
885 * check. If a user turns off EEH for a particular adapter they are really
886 * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't
887 * grant access to a slot if EEH isn't enabled, and so we always enable
888 * EEH for all slots/all devices.
889 *
890 * The eeh-force-off option disables EEH checking globally, for all slots.
891 * Even if force-off is set, the EEH hardware is still enabled, so that
892 * newer systems can boot.
893 */
894 int eeh_init(void)
895 {
896 struct pci_controller *hose, *tmp;
897 struct device_node *phb;
898 static int cnt = 0;
899 int ret = 0;
900
901 /*
902 * We have to delay the initialization on PowerNV after
903 * the PCI hierarchy tree has been built because the PEs
904 * are figured out based on PCI devices instead of device
905 * tree nodes
906 */
907 if (machine_is(powernv) && cnt++ <= 0)
908 return ret;
909
910 /* Register reboot notifier */
911 ret = register_reboot_notifier(&eeh_reboot_nb);
912 if (ret) {
913 pr_warn("%s: Failed to register notifier (%d)\n",
914 __func__, ret);
915 return ret;
916 }
917
918 /* call platform initialization function */
919 if (!eeh_ops) {
920 pr_warn("%s: Platform EEH operation not found\n",
921 __func__);
922 return -EEXIST;
923 } else if ((ret = eeh_ops->init())) {
924 pr_warn("%s: Failed to call platform init function (%d)\n",
925 __func__, ret);
926 return ret;
927 }
928
929 /* Initialize EEH event */
930 ret = eeh_event_init();
931 if (ret)
932 return ret;
933
934 /* Enable EEH for all adapters */
935 if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) {
936 list_for_each_entry_safe(hose, tmp,
937 &hose_list, list_node) {
938 phb = hose->dn;
939 traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
940 }
941 } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) {
942 list_for_each_entry_safe(hose, tmp,
943 &hose_list, list_node)
944 pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
945 } else {
946 pr_warn("%s: Invalid probe mode %x",
947 __func__, eeh_subsystem_flags);
948 return -EINVAL;
949 }
950
951 /*
952 * Call platform post-initialization. Actually, It's good chance
953 * to inform platform that EEH is ready to supply service if the
954 * I/O cache stuff has been built up.
955 */
956 if (eeh_ops->post_init) {
957 ret = eeh_ops->post_init();
958 if (ret)
959 return ret;
960 }
961
962 if (eeh_enabled())
963 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
964 else
965 pr_warn("EEH: No capable adapters found\n");
966
967 return ret;
968 }
969
970 core_initcall_sync(eeh_init);
971
972 /**
973 * eeh_add_device_early - Enable EEH for the indicated device_node
974 * @dn: device node for which to set up EEH
975 *
976 * This routine must be used to perform EEH initialization for PCI
977 * devices that were added after system boot (e.g. hotplug, dlpar).
978 * This routine must be called before any i/o is performed to the
979 * adapter (inluding any config-space i/o).
980 * Whether this actually enables EEH or not for this device depends
981 * on the CEC architecture, type of the device, on earlier boot
982 * command-line arguments & etc.
983 */
984 void eeh_add_device_early(struct device_node *dn)
985 {
986 struct pci_controller *phb;
987
988 /*
989 * If we're doing EEH probe based on PCI device, we
990 * would delay the probe until late stage because
991 * the PCI device isn't available this moment.
992 */
993 if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
994 return;
995
996 if (!of_node_to_eeh_dev(dn))
997 return;
998 phb = of_node_to_eeh_dev(dn)->phb;
999
1000 /* USB Bus children of PCI devices will not have BUID's */
1001 if (NULL == phb || 0 == phb->buid)
1002 return;
1003
1004 eeh_ops->of_probe(dn, NULL);
1005 }
1006
1007 /**
1008 * eeh_add_device_tree_early - Enable EEH for the indicated device
1009 * @dn: device node
1010 *
1011 * This routine must be used to perform EEH initialization for the
1012 * indicated PCI device that was added after system boot (e.g.
1013 * hotplug, dlpar).
1014 */
1015 void eeh_add_device_tree_early(struct device_node *dn)
1016 {
1017 struct device_node *sib;
1018
1019 for_each_child_of_node(dn, sib)
1020 eeh_add_device_tree_early(sib);
1021 eeh_add_device_early(dn);
1022 }
1023 EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1024
1025 /**
1026 * eeh_add_device_late - Perform EEH initialization for the indicated pci device
1027 * @dev: pci device for which to set up EEH
1028 *
1029 * This routine must be used to complete EEH initialization for PCI
1030 * devices that were added after system boot (e.g. hotplug, dlpar).
1031 */
1032 void eeh_add_device_late(struct pci_dev *dev)
1033 {
1034 struct device_node *dn;
1035 struct eeh_dev *edev;
1036
1037 if (!dev || !eeh_enabled())
1038 return;
1039
1040 pr_debug("EEH: Adding device %s\n", pci_name(dev));
1041
1042 dn = pci_device_to_OF_node(dev);
1043 edev = of_node_to_eeh_dev(dn);
1044 if (edev->pdev == dev) {
1045 pr_debug("EEH: Already referenced !\n");
1046 return;
1047 }
1048
1049 /*
1050 * The EEH cache might not be removed correctly because of
1051 * unbalanced kref to the device during unplug time, which
1052 * relies on pcibios_release_device(). So we have to remove
1053 * that here explicitly.
1054 */
1055 if (edev->pdev) {
1056 eeh_rmv_from_parent_pe(edev);
1057 eeh_addr_cache_rmv_dev(edev->pdev);
1058 eeh_sysfs_remove_device(edev->pdev);
1059 edev->mode &= ~EEH_DEV_SYSFS;
1060
1061 /*
1062 * We definitely should have the PCI device removed
1063 * though it wasn't correctly. So we needn't call
1064 * into error handler afterwards.
1065 */
1066 edev->mode |= EEH_DEV_NO_HANDLER;
1067
1068 edev->pdev = NULL;
1069 dev->dev.archdata.edev = NULL;
1070 }
1071
1072 edev->pdev = dev;
1073 dev->dev.archdata.edev = edev;
1074
1075 /*
1076 * We have to do the EEH probe here because the PCI device
1077 * hasn't been created yet in the early stage.
1078 */
1079 if (eeh_has_flag(EEH_PROBE_MODE_DEV))
1080 eeh_ops->dev_probe(dev, NULL);
1081
1082 eeh_addr_cache_insert_dev(dev);
1083 }
1084
1085 /**
1086 * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
1087 * @bus: PCI bus
1088 *
1089 * This routine must be used to perform EEH initialization for PCI
1090 * devices which are attached to the indicated PCI bus. The PCI bus
1091 * is added after system boot through hotplug or dlpar.
1092 */
1093 void eeh_add_device_tree_late(struct pci_bus *bus)
1094 {
1095 struct pci_dev *dev;
1096
1097 list_for_each_entry(dev, &bus->devices, bus_list) {
1098 eeh_add_device_late(dev);
1099 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1100 struct pci_bus *subbus = dev->subordinate;
1101 if (subbus)
1102 eeh_add_device_tree_late(subbus);
1103 }
1104 }
1105 }
1106 EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1107
1108 /**
1109 * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
1110 * @bus: PCI bus
1111 *
1112 * This routine must be used to add EEH sysfs files for PCI
1113 * devices which are attached to the indicated PCI bus. The PCI bus
1114 * is added after system boot through hotplug or dlpar.
1115 */
1116 void eeh_add_sysfs_files(struct pci_bus *bus)
1117 {
1118 struct pci_dev *dev;
1119
1120 list_for_each_entry(dev, &bus->devices, bus_list) {
1121 eeh_sysfs_add_device(dev);
1122 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1123 struct pci_bus *subbus = dev->subordinate;
1124 if (subbus)
1125 eeh_add_sysfs_files(subbus);
1126 }
1127 }
1128 }
1129 EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
1130
1131 /**
1132 * eeh_remove_device - Undo EEH setup for the indicated pci device
1133 * @dev: pci device to be removed
1134 *
1135 * This routine should be called when a device is removed from
1136 * a running system (e.g. by hotplug or dlpar). It unregisters
1137 * the PCI device from the EEH subsystem. I/O errors affecting
1138 * this device will no longer be detected after this call; thus,
1139 * i/o errors affecting this slot may leave this device unusable.
1140 */
1141 void eeh_remove_device(struct pci_dev *dev)
1142 {
1143 struct eeh_dev *edev;
1144
1145 if (!dev || !eeh_enabled())
1146 return;
1147 edev = pci_dev_to_eeh_dev(dev);
1148
1149 /* Unregister the device with the EEH/PCI address search system */
1150 pr_debug("EEH: Removing device %s\n", pci_name(dev));
1151
1152 if (!edev || !edev->pdev || !edev->pe) {
1153 pr_debug("EEH: Not referenced !\n");
1154 return;
1155 }
1156
1157 /*
1158 * During the hotplug for EEH error recovery, we need the EEH
1159 * device attached to the parent PE in order for BAR restore
1160 * a bit later. So we keep it for BAR restore and remove it
1161 * from the parent PE during the BAR resotre.
1162 */
1163 edev->pdev = NULL;
1164 dev->dev.archdata.edev = NULL;
1165 if (!(edev->pe->state & EEH_PE_KEEP))
1166 eeh_rmv_from_parent_pe(edev);
1167 else
1168 edev->mode |= EEH_DEV_DISCONNECTED;
1169
1170 /*
1171 * We're removing from the PCI subsystem, that means
1172 * the PCI device driver can't support EEH or not
1173 * well. So we rely on hotplug completely to do recovery
1174 * for the specific PCI device.
1175 */
1176 edev->mode |= EEH_DEV_NO_HANDLER;
1177
1178 eeh_addr_cache_rmv_dev(dev);
1179 eeh_sysfs_remove_device(dev);
1180 edev->mode &= ~EEH_DEV_SYSFS;
1181 }
1182
1183 int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
1184 {
1185 int ret;
1186
1187 ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
1188 if (ret) {
1189 pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
1190 __func__, ret, pe->phb->global_number, pe->addr);
1191 return ret;
1192 }
1193
1194 ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
1195 if (ret) {
1196 pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
1197 __func__, ret, pe->phb->global_number, pe->addr);
1198 return ret;
1199 }
1200
1201 /* Clear software isolated state */
1202 if (sw_state && (pe->state & EEH_PE_ISOLATED))
1203 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
1204
1205 return ret;
1206 }
1207
1208
1209 static struct pci_device_id eeh_reset_ids[] = {
1210 { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
1211 { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
1212 { 0 }
1213 };
1214
1215 static int eeh_pe_change_owner(struct eeh_pe *pe)
1216 {
1217 struct eeh_dev *edev, *tmp;
1218 struct pci_dev *pdev;
1219 struct pci_device_id *id;
1220 int flags, ret;
1221
1222 /* Check PE state */
1223 flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1224 ret = eeh_ops->get_state(pe, NULL);
1225 if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
1226 return 0;
1227
1228 /* Unfrozen PE, nothing to do */
1229 if ((ret & flags) == flags)
1230 return 0;
1231
1232 /* Frozen PE, check if it needs PE level reset */
1233 eeh_pe_for_each_dev(pe, edev, tmp) {
1234 pdev = eeh_dev_to_pci_dev(edev);
1235 if (!pdev)
1236 continue;
1237
1238 for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
1239 if (id->vendor != PCI_ANY_ID &&
1240 id->vendor != pdev->vendor)
1241 continue;
1242 if (id->device != PCI_ANY_ID &&
1243 id->device != pdev->device)
1244 continue;
1245 if (id->subvendor != PCI_ANY_ID &&
1246 id->subvendor != pdev->subsystem_vendor)
1247 continue;
1248 if (id->subdevice != PCI_ANY_ID &&
1249 id->subdevice != pdev->subsystem_device)
1250 continue;
1251
1252 goto reset;
1253 }
1254 }
1255
1256 return eeh_unfreeze_pe(pe, true);
1257
1258 reset:
1259 return eeh_pe_reset_and_recover(pe);
1260 }
1261
1262 /**
1263 * eeh_dev_open - Increase count of pass through devices for PE
1264 * @pdev: PCI device
1265 *
1266 * Increase count of passed through devices for the indicated
1267 * PE. In the result, the EEH errors detected on the PE won't be
1268 * reported. The PE owner will be responsible for detection
1269 * and recovery.
1270 */
1271 int eeh_dev_open(struct pci_dev *pdev)
1272 {
1273 struct eeh_dev *edev;
1274 int ret = -ENODEV;
1275
1276 mutex_lock(&eeh_dev_mutex);
1277
1278 /* No PCI device ? */
1279 if (!pdev)
1280 goto out;
1281
1282 /* No EEH device or PE ? */
1283 edev = pci_dev_to_eeh_dev(pdev);
1284 if (!edev || !edev->pe)
1285 goto out;
1286
1287 /*
1288 * The PE might have been put into frozen state, but we
1289 * didn't detect that yet. The passed through PCI devices
1290 * in frozen PE won't work properly. Clear the frozen state
1291 * in advance.
1292 */
1293 ret = eeh_pe_change_owner(edev->pe);
1294 if (ret)
1295 goto out;
1296
1297 /* Increase PE's pass through count */
1298 atomic_inc(&edev->pe->pass_dev_cnt);
1299 mutex_unlock(&eeh_dev_mutex);
1300
1301 return 0;
1302 out:
1303 mutex_unlock(&eeh_dev_mutex);
1304 return ret;
1305 }
1306 EXPORT_SYMBOL_GPL(eeh_dev_open);
1307
1308 /**
1309 * eeh_dev_release - Decrease count of pass through devices for PE
1310 * @pdev: PCI device
1311 *
1312 * Decrease count of pass through devices for the indicated PE. If
1313 * there is no passed through device in PE, the EEH errors detected
1314 * on the PE will be reported and handled as usual.
1315 */
1316 void eeh_dev_release(struct pci_dev *pdev)
1317 {
1318 struct eeh_dev *edev;
1319
1320 mutex_lock(&eeh_dev_mutex);
1321
1322 /* No PCI device ? */
1323 if (!pdev)
1324 goto out;
1325
1326 /* No EEH device ? */
1327 edev = pci_dev_to_eeh_dev(pdev);
1328 if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
1329 goto out;
1330
1331 /* Decrease PE's pass through count */
1332 atomic_dec(&edev->pe->pass_dev_cnt);
1333 WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0);
1334 eeh_pe_change_owner(edev->pe);
1335 out:
1336 mutex_unlock(&eeh_dev_mutex);
1337 }
1338 EXPORT_SYMBOL(eeh_dev_release);
1339
1340 #ifdef CONFIG_IOMMU_API
1341
1342 static int dev_has_iommu_table(struct device *dev, void *data)
1343 {
1344 struct pci_dev *pdev = to_pci_dev(dev);
1345 struct pci_dev **ppdev = data;
1346 struct iommu_table *tbl;
1347
1348 if (!dev)
1349 return 0;
1350
1351 tbl = get_iommu_table_base(dev);
1352 if (tbl && tbl->it_group) {
1353 *ppdev = pdev;
1354 return 1;
1355 }
1356
1357 return 0;
1358 }
1359
1360 /**
1361 * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE
1362 * @group: IOMMU group
1363 *
1364 * The routine is called to convert IOMMU group to EEH PE.
1365 */
1366 struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group)
1367 {
1368 struct pci_dev *pdev = NULL;
1369 struct eeh_dev *edev;
1370 int ret;
1371
1372 /* No IOMMU group ? */
1373 if (!group)
1374 return NULL;
1375
1376 ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table);
1377 if (!ret || !pdev)
1378 return NULL;
1379
1380 /* No EEH device or PE ? */
1381 edev = pci_dev_to_eeh_dev(pdev);
1382 if (!edev || !edev->pe)
1383 return NULL;
1384
1385 return edev->pe;
1386 }
1387 EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe);
1388
1389 #endif /* CONFIG_IOMMU_API */
1390
1391 /**
1392 * eeh_pe_set_option - Set options for the indicated PE
1393 * @pe: EEH PE
1394 * @option: requested option
1395 *
1396 * The routine is called to enable or disable EEH functionality
1397 * on the indicated PE, to enable IO or DMA for the frozen PE.
1398 */
1399 int eeh_pe_set_option(struct eeh_pe *pe, int option)
1400 {
1401 int ret = 0;
1402
1403 /* Invalid PE ? */
1404 if (!pe)
1405 return -ENODEV;
1406
1407 /*
1408 * EEH functionality could possibly be disabled, just
1409 * return error for the case. And the EEH functinality
1410 * isn't expected to be disabled on one specific PE.
1411 */
1412 switch (option) {
1413 case EEH_OPT_ENABLE:
1414 if (eeh_enabled()) {
1415 ret = eeh_pe_change_owner(pe);
1416 break;
1417 }
1418 ret = -EIO;
1419 break;
1420 case EEH_OPT_DISABLE:
1421 break;
1422 case EEH_OPT_THAW_MMIO:
1423 case EEH_OPT_THAW_DMA:
1424 if (!eeh_ops || !eeh_ops->set_option) {
1425 ret = -ENOENT;
1426 break;
1427 }
1428
1429 ret = eeh_pci_enable(pe, option);
1430 break;
1431 default:
1432 pr_debug("%s: Option %d out of range (%d, %d)\n",
1433 __func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA);
1434 ret = -EINVAL;
1435 }
1436
1437 return ret;
1438 }
1439 EXPORT_SYMBOL_GPL(eeh_pe_set_option);
1440
1441 /**
1442 * eeh_pe_get_state - Retrieve PE's state
1443 * @pe: EEH PE
1444 *
1445 * Retrieve the PE's state, which includes 3 aspects: enabled
1446 * DMA, enabled IO and asserted reset.
1447 */
1448 int eeh_pe_get_state(struct eeh_pe *pe)
1449 {
1450 int result, ret = 0;
1451 bool rst_active, dma_en, mmio_en;
1452
1453 /* Existing PE ? */
1454 if (!pe)
1455 return -ENODEV;
1456
1457 if (!eeh_ops || !eeh_ops->get_state)
1458 return -ENOENT;
1459
1460 result = eeh_ops->get_state(pe, NULL);
1461 rst_active = !!(result & EEH_STATE_RESET_ACTIVE);
1462 dma_en = !!(result & EEH_STATE_DMA_ENABLED);
1463 mmio_en = !!(result & EEH_STATE_MMIO_ENABLED);
1464
1465 if (rst_active)
1466 ret = EEH_PE_STATE_RESET;
1467 else if (dma_en && mmio_en)
1468 ret = EEH_PE_STATE_NORMAL;
1469 else if (!dma_en && !mmio_en)
1470 ret = EEH_PE_STATE_STOPPED_IO_DMA;
1471 else if (!dma_en && mmio_en)
1472 ret = EEH_PE_STATE_STOPPED_DMA;
1473 else
1474 ret = EEH_PE_STATE_UNAVAIL;
1475
1476 return ret;
1477 }
1478 EXPORT_SYMBOL_GPL(eeh_pe_get_state);
1479
1480 static int eeh_pe_reenable_devices(struct eeh_pe *pe)
1481 {
1482 struct eeh_dev *edev, *tmp;
1483 struct pci_dev *pdev;
1484 int ret = 0;
1485
1486 /* Restore config space */
1487 eeh_pe_restore_bars(pe);
1488
1489 /*
1490 * Reenable PCI devices as the devices passed
1491 * through are always enabled before the reset.
1492 */
1493 eeh_pe_for_each_dev(pe, edev, tmp) {
1494 pdev = eeh_dev_to_pci_dev(edev);
1495 if (!pdev)
1496 continue;
1497
1498 ret = pci_reenable_device(pdev);
1499 if (ret) {
1500 pr_warn("%s: Failure %d reenabling %s\n",
1501 __func__, ret, pci_name(pdev));
1502 return ret;
1503 }
1504 }
1505
1506 /* The PE is still in frozen state */
1507 return eeh_unfreeze_pe(pe, true);
1508 }
1509
1510 /**
1511 * eeh_pe_reset - Issue PE reset according to specified type
1512 * @pe: EEH PE
1513 * @option: reset type
1514 *
1515 * The routine is called to reset the specified PE with the
1516 * indicated type, either fundamental reset or hot reset.
1517 * PE reset is the most important part for error recovery.
1518 */
1519 int eeh_pe_reset(struct eeh_pe *pe, int option)
1520 {
1521 int ret = 0;
1522
1523 /* Invalid PE ? */
1524 if (!pe)
1525 return -ENODEV;
1526
1527 if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset)
1528 return -ENOENT;
1529
1530 switch (option) {
1531 case EEH_RESET_DEACTIVATE:
1532 ret = eeh_ops->reset(pe, option);
1533 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
1534 if (ret)
1535 break;
1536
1537 ret = eeh_pe_reenable_devices(pe);
1538 break;
1539 case EEH_RESET_HOT:
1540 case EEH_RESET_FUNDAMENTAL:
1541 /*
1542 * Proactively freeze the PE to drop all MMIO access
1543 * during reset, which should be banned as it's always
1544 * cause recursive EEH error.
1545 */
1546 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
1547
1548 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
1549 ret = eeh_ops->reset(pe, option);
1550 break;
1551 default:
1552 pr_debug("%s: Unsupported option %d\n",
1553 __func__, option);
1554 ret = -EINVAL;
1555 }
1556
1557 return ret;
1558 }
1559 EXPORT_SYMBOL_GPL(eeh_pe_reset);
1560
1561 /**
1562 * eeh_pe_configure - Configure PCI bridges after PE reset
1563 * @pe: EEH PE
1564 *
1565 * The routine is called to restore the PCI config space for
1566 * those PCI devices, especially PCI bridges affected by PE
1567 * reset issued previously.
1568 */
1569 int eeh_pe_configure(struct eeh_pe *pe)
1570 {
1571 int ret = 0;
1572
1573 /* Invalid PE ? */
1574 if (!pe)
1575 return -ENODEV;
1576
1577 return ret;
1578 }
1579 EXPORT_SYMBOL_GPL(eeh_pe_configure);
1580
1581 static int proc_eeh_show(struct seq_file *m, void *v)
1582 {
1583 if (!eeh_enabled()) {
1584 seq_printf(m, "EEH Subsystem is globally disabled\n");
1585 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1586 } else {
1587 seq_printf(m, "EEH Subsystem is enabled\n");
1588 seq_printf(m,
1589 "no device=%llu\n"
1590 "no device node=%llu\n"
1591 "no config address=%llu\n"
1592 "check not wanted=%llu\n"
1593 "eeh_total_mmio_ffs=%llu\n"
1594 "eeh_false_positives=%llu\n"
1595 "eeh_slot_resets=%llu\n",
1596 eeh_stats.no_device,
1597 eeh_stats.no_dn,
1598 eeh_stats.no_cfg_addr,
1599 eeh_stats.ignored_check,
1600 eeh_stats.total_mmio_ffs,
1601 eeh_stats.false_positives,
1602 eeh_stats.slot_resets);
1603 }
1604
1605 return 0;
1606 }
1607
1608 static int proc_eeh_open(struct inode *inode, struct file *file)
1609 {
1610 return single_open(file, proc_eeh_show, NULL);
1611 }
1612
1613 static const struct file_operations proc_eeh_operations = {
1614 .open = proc_eeh_open,
1615 .read = seq_read,
1616 .llseek = seq_lseek,
1617 .release = single_release,
1618 };
1619
1620 #ifdef CONFIG_DEBUG_FS
1621 static int eeh_enable_dbgfs_set(void *data, u64 val)
1622 {
1623 if (val)
1624 eeh_clear_flag(EEH_FORCE_DISABLED);
1625 else
1626 eeh_add_flag(EEH_FORCE_DISABLED);
1627
1628 /* Notify the backend */
1629 if (eeh_ops->post_init)
1630 eeh_ops->post_init();
1631
1632 return 0;
1633 }
1634
1635 static int eeh_enable_dbgfs_get(void *data, u64 *val)
1636 {
1637 if (eeh_enabled())
1638 *val = 0x1ul;
1639 else
1640 *val = 0x0ul;
1641 return 0;
1642 }
1643
1644 DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
1645 eeh_enable_dbgfs_set, "0x%llx\n");
1646 #endif
1647
1648 static int __init eeh_init_proc(void)
1649 {
1650 if (machine_is(pseries) || machine_is(powernv)) {
1651 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
1652 #ifdef CONFIG_DEBUG_FS
1653 debugfs_create_file("eeh_enable", 0600,
1654 powerpc_debugfs_root, NULL,
1655 &eeh_enable_dbgfs_ops);
1656 #endif
1657 }
1658
1659 return 0;
1660 }
1661 __initcall(eeh_init_proc);