]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/misc/genwqe/card_base.c
GenWQE: Add support for EEH error recovery
[mirror_ubuntu-bionic-kernel.git] / drivers / misc / genwqe / card_base.c
CommitLineData
12eb4683
FH
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Module initialization and PCIe setup. Card health monitoring and
23 * recovery functionality. Character device creation and deletion are
24 * controlled from here.
25 */
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/pci.h>
30#include <linux/err.h>
31#include <linux/aer.h>
32#include <linux/string.h>
33#include <linux/sched.h>
34#include <linux/wait.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/module.h>
38#include <linux/notifier.h>
39#include <linux/device.h>
40#include <linux/log2.h>
41#include <linux/genwqe/genwqe_card.h>
42
43#include "card_base.h"
44#include "card_ddcb.h"
45
46MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>");
47MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>");
48MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>");
49MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>");
50
51MODULE_DESCRIPTION("GenWQE Card");
52MODULE_VERSION(DRV_VERS_STRING);
53MODULE_LICENSE("GPL");
54
55static char genwqe_driver_name[] = GENWQE_DEVNAME;
56static struct class *class_genwqe;
57static struct dentry *debugfs_genwqe;
58static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
59
60/* PCI structure for identifying device by PCI vendor and device ID */
61static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = {
62 { .vendor = PCI_VENDOR_ID_IBM,
63 .device = PCI_DEVICE_GENWQE,
64 .subvendor = PCI_SUBVENDOR_ID_IBM,
65 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
66 .class = (PCI_CLASSCODE_GENWQE5 << 8),
67 .class_mask = ~0,
68 .driver_data = 0 },
69
70 /* Initial SR-IOV bring-up image */
71 { .vendor = PCI_VENDOR_ID_IBM,
72 .device = PCI_DEVICE_GENWQE,
73 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
74 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
75 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
76 .class_mask = ~0,
77 .driver_data = 0 },
78
79 { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
80 .device = 0x0000, /* VF Device ID */
81 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
82 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
83 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
84 .class_mask = ~0,
85 .driver_data = 0 },
86
87 /* Fixed up image */
88 { .vendor = PCI_VENDOR_ID_IBM,
89 .device = PCI_DEVICE_GENWQE,
90 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
91 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
92 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
93 .class_mask = ~0,
94 .driver_data = 0 },
95
96 { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
97 .device = 0x0000, /* VF Device ID */
98 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
99 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
100 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
101 .class_mask = ~0,
102 .driver_data = 0 },
103
104 /* Even one more ... */
105 { .vendor = PCI_VENDOR_ID_IBM,
106 .device = PCI_DEVICE_GENWQE,
107 .subvendor = PCI_SUBVENDOR_ID_IBM,
108 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW,
109 .class = (PCI_CLASSCODE_GENWQE5 << 8),
110 .class_mask = ~0,
111 .driver_data = 0 },
112
113 { 0, } /* 0 terminated list. */
114};
115
116MODULE_DEVICE_TABLE(pci, genwqe_device_table);
117
118/**
119 * genwqe_dev_alloc() - Create and prepare a new card descriptor
120 *
121 * Return: Pointer to card descriptor, or ERR_PTR(err) on error
122 */
123static struct genwqe_dev *genwqe_dev_alloc(void)
124{
125 unsigned int i = 0, j;
126 struct genwqe_dev *cd;
127
128 for (i = 0; i < GENWQE_CARD_NO_MAX; i++) {
129 if (genwqe_devices[i] == NULL)
130 break;
131 }
132 if (i >= GENWQE_CARD_NO_MAX)
133 return ERR_PTR(-ENODEV);
134
135 cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL);
136 if (!cd)
137 return ERR_PTR(-ENOMEM);
138
139 cd->card_idx = i;
140 cd->class_genwqe = class_genwqe;
141 cd->debugfs_genwqe = debugfs_genwqe;
142
fb145456
KSS
143 /*
144 * This comes from kernel config option and can be overritten via
145 * debugfs.
146 */
147 cd->use_platform_recovery = CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY;
148
12eb4683
FH
149 init_waitqueue_head(&cd->queue_waitq);
150
151 spin_lock_init(&cd->file_lock);
152 INIT_LIST_HEAD(&cd->file_list);
153
154 cd->card_state = GENWQE_CARD_UNUSED;
155 spin_lock_init(&cd->print_lock);
156
157 cd->ddcb_software_timeout = genwqe_ddcb_software_timeout;
158 cd->kill_timeout = genwqe_kill_timeout;
159
160 for (j = 0; j < GENWQE_MAX_VFS; j++)
161 cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec;
162
163 genwqe_devices[i] = cd;
164 return cd;
165}
166
167static void genwqe_dev_free(struct genwqe_dev *cd)
168{
169 if (!cd)
170 return;
171
172 genwqe_devices[cd->card_idx] = NULL;
173 kfree(cd);
174}
175
176/**
177 * genwqe_bus_reset() - Card recovery
178 *
179 * pci_reset_function() will recover the device and ensure that the
180 * registers are accessible again when it completes with success. If
181 * not, the card will stay dead and registers will be unaccessible
182 * still.
183 */
184static int genwqe_bus_reset(struct genwqe_dev *cd)
185{
186 int bars, rc = 0;
187 struct pci_dev *pci_dev = cd->pci_dev;
188 void __iomem *mmio;
189
190 if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
191 return -EIO;
192
193 mmio = cd->mmio;
194 cd->mmio = NULL;
195 pci_iounmap(pci_dev, mmio);
196
197 bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
198 pci_release_selected_regions(pci_dev, bars);
199
200 /*
201 * Firmware/BIOS might change memory mapping during bus reset.
202 * Settings like enable bus-mastering, ... are backuped and
203 * restored by the pci_reset_function().
204 */
205 dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
206 rc = pci_reset_function(pci_dev);
207 if (rc) {
208 dev_err(&pci_dev->dev,
209 "[%s] err: failed reset func (rc %d)\n", __func__, rc);
210 return rc;
211 }
212 dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);
213
214 /*
215 * Here is the right spot to clear the register read
216 * failure. pci_bus_reset() does this job in real systems.
217 */
218 cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
219 GENWQE_INJECT_GFIR_FATAL |
220 GENWQE_INJECT_GFIR_INFO);
221
222 rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
223 if (rc) {
224 dev_err(&pci_dev->dev,
225 "[%s] err: request bars failed (%d)\n", __func__, rc);
226 return -EIO;
227 }
228
229 cd->mmio = pci_iomap(pci_dev, 0, 0);
230 if (cd->mmio == NULL) {
231 dev_err(&pci_dev->dev,
232 "[%s] err: mapping BAR0 failed\n", __func__);
233 return -ENOMEM;
234 }
235 return 0;
236}
237
238/*
239 * Hardware circumvention section. Certain bitstreams in our test-lab
240 * had different kinds of problems. Here is where we adjust those
241 * bitstreams to function will with this version of our device driver.
242 *
243 * Thise circumventions are applied to the physical function only.
244 * The magical numbers below are identifying development/manufacturing
245 * versions of the bitstream used on the card.
246 *
247 * Turn off error reporting for old/manufacturing images.
248 */
249
250bool genwqe_need_err_masking(struct genwqe_dev *cd)
251{
252 return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
253}
254
255static void genwqe_tweak_hardware(struct genwqe_dev *cd)
256{
257 struct pci_dev *pci_dev = cd->pci_dev;
258
259 /* Mask FIRs for development images */
260 if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) &&
261 ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) {
262 dev_warn(&pci_dev->dev,
263 "FIRs masked due to bitstream %016llx.%016llx\n",
264 cd->slu_unitcfg, cd->app_unitcfg);
265
266 __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR,
267 0xFFFFFFFFFFFFFFFFull);
268
269 __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK,
270 0x0000000000000000ull);
271 }
272}
273
274/**
275 * genwqe_recovery_on_fatal_gfir_required() - Version depended actions
276 *
277 * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
278 * be ignored. This is e.g. true for the bitstream we gave to the card
279 * manufacturer, but also for some old bitstreams we released to our
280 * test-lab.
281 */
282int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd)
283{
284 return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull;
285}
286
287int genwqe_flash_readback_fails(struct genwqe_dev *cd)
288{
289 return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
290}
291
292/**
293 * genwqe_T_psec() - Calculate PF/VF timeout register content
294 *
295 * Note: From a design perspective it turned out to be a bad idea to
296 * use codes here to specifiy the frequency/speed values. An old
297 * driver cannot understand new codes and is therefore always a
298 * problem. Better is to measure out the value or put the
299 * speed/frequency directly into a register which is always a valid
300 * value for old as well as for new software.
301 */
302/* T = 1/f */
303static int genwqe_T_psec(struct genwqe_dev *cd)
304{
305 u16 speed; /* 1/f -> 250, 200, 166, 175 */
306 static const int T[] = { 4000, 5000, 6000, 5714 };
307
308 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
309 if (speed >= ARRAY_SIZE(T))
310 return -1; /* illegal value */
311
312 return T[speed];
313}
314
315/**
316 * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
317 *
318 * Do this _after_ card_reset() is called. Otherwise the values will
319 * vanish. The settings need to be done when the queues are inactive.
320 *
321 * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16.
322 * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16.
323 */
324static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
325{
326 u32 T = genwqe_T_psec(cd);
327 u64 x;
328
329 if (genwqe_pf_jobtimeout_msec == 0)
330 return false;
331
332 /* PF: large value needed, flash update 2sec per block */
333 x = ilog2(genwqe_pf_jobtimeout_msec *
334 16000000000uL/(T * 15)) - 10;
335
336 genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
337 0xff00 | (x & 0xff), 0);
338 return true;
339}
340
341/**
342 * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
343 */
344static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
345{
346 struct pci_dev *pci_dev = cd->pci_dev;
347 unsigned int vf;
348 u32 T = genwqe_T_psec(cd);
349 u64 x;
350
351 for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) {
352
353 if (cd->vf_jobtimeout_msec[vf] == 0)
354 continue;
355
356 x = ilog2(cd->vf_jobtimeout_msec[vf] *
357 16000000000uL/(T * 15)) - 10;
358
359 genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
360 0xff00 | (x & 0xff), vf + 1);
361 }
362 return true;
363}
364
365static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
366{
367 unsigned int type, e = 0;
368
369 for (type = 0; type < GENWQE_DBG_UNITS; type++) {
370 switch (type) {
371 case GENWQE_DBG_UNIT0:
372 e = genwqe_ffdc_buff_size(cd, 0);
373 break;
374 case GENWQE_DBG_UNIT1:
375 e = genwqe_ffdc_buff_size(cd, 1);
376 break;
377 case GENWQE_DBG_UNIT2:
378 e = genwqe_ffdc_buff_size(cd, 2);
379 break;
380 case GENWQE_DBG_REGS:
381 e = GENWQE_FFDC_REGS;
382 break;
383 }
384
385 /* currently support only the debug units mentioned here */
386 cd->ffdc[type].entries = e;
387 cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg),
388 GFP_KERNEL);
389 /*
390 * regs == NULL is ok, the using code treats this as no regs,
391 * Printing warning is ok in this case.
392 */
393 }
394 return 0;
395}
396
397static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd)
398{
399 unsigned int type;
400
401 for (type = 0; type < GENWQE_DBG_UNITS; type++) {
402 kfree(cd->ffdc[type].regs);
403 cd->ffdc[type].regs = NULL;
404 }
405}
406
407static int genwqe_read_ids(struct genwqe_dev *cd)
408{
409 int err = 0;
410 int slu_id;
411 struct pci_dev *pci_dev = cd->pci_dev;
412
413 cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
414 if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) {
415 dev_err(&pci_dev->dev,
416 "err: SLUID=%016llx\n", cd->slu_unitcfg);
417 err = -EIO;
418 goto out_err;
419 }
420
421 slu_id = genwqe_get_slu_id(cd);
422 if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) {
423 dev_err(&pci_dev->dev,
424 "err: incompatible SLU Architecture %u\n", slu_id);
425 err = -ENOENT;
426 goto out_err;
427 }
428
429 cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
430 if (cd->app_unitcfg == IO_ILLEGAL_VALUE) {
431 dev_err(&pci_dev->dev,
432 "err: APPID=%016llx\n", cd->app_unitcfg);
433 err = -EIO;
434 goto out_err;
435 }
436 genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name));
437
438 /*
439 * Is access to all registers possible? If we are a VF the
440 * answer is obvious. If we run fully virtualized, we need to
441 * check if we can access all registers. If we do not have
442 * full access we will cause an UR and some informational FIRs
443 * in the PF, but that should not harm.
444 */
445 if (pci_dev->is_virtfn)
446 cd->is_privileged = 0;
447 else
448 cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
449 != IO_ILLEGAL_VALUE);
450
451 out_err:
452 return err;
453}
454
455static int genwqe_start(struct genwqe_dev *cd)
456{
457 int err;
458 struct pci_dev *pci_dev = cd->pci_dev;
459
460 err = genwqe_read_ids(cd);
461 if (err)
462 return err;
463
464 if (genwqe_is_privileged(cd)) {
465 /* do this after the tweaks. alloc fail is acceptable */
466 genwqe_ffdc_buffs_alloc(cd);
467 genwqe_stop_traps(cd);
468
469 /* Collect registers e.g. FIRs, UNITIDs, traces ... */
470 genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs,
471 cd->ffdc[GENWQE_DBG_REGS].entries, 0);
472
473 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0,
474 cd->ffdc[GENWQE_DBG_UNIT0].regs,
475 cd->ffdc[GENWQE_DBG_UNIT0].entries);
476
477 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1,
478 cd->ffdc[GENWQE_DBG_UNIT1].regs,
479 cd->ffdc[GENWQE_DBG_UNIT1].entries);
480
481 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2,
482 cd->ffdc[GENWQE_DBG_UNIT2].regs,
483 cd->ffdc[GENWQE_DBG_UNIT2].entries);
484
485 genwqe_start_traps(cd);
486
487 if (cd->card_state == GENWQE_CARD_FATAL_ERROR) {
488 dev_warn(&pci_dev->dev,
489 "[%s] chip reload/recovery!\n", __func__);
490
491 /*
492 * Stealth Mode: Reload chip on either hot
493 * reset or PERST.
494 */
495 cd->softreset = 0x7Cull;
496 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
497 cd->softreset);
498
499 err = genwqe_bus_reset(cd);
500 if (err != 0) {
501 dev_err(&pci_dev->dev,
502 "[%s] err: bus reset failed!\n",
503 __func__);
504 goto out;
505 }
506
507 /*
508 * Re-read the IDs because
509 * it could happen that the bitstream load
510 * failed!
511 */
512 err = genwqe_read_ids(cd);
513 if (err)
514 goto out;
515 }
516 }
517
518 err = genwqe_setup_service_layer(cd); /* does a reset to the card */
519 if (err != 0) {
520 dev_err(&pci_dev->dev,
521 "[%s] err: could not setup servicelayer!\n", __func__);
522 err = -ENODEV;
523 goto out;
524 }
525
526 if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */
527 genwqe_tweak_hardware(cd);
528
529 genwqe_setup_pf_jtimer(cd);
530 genwqe_setup_vf_jtimer(cd);
531 }
532
533 err = genwqe_device_create(cd);
534 if (err < 0) {
535 dev_err(&pci_dev->dev,
536 "err: chdev init failed! (err=%d)\n", err);
537 goto out_release_service_layer;
538 }
539 return 0;
540
541 out_release_service_layer:
542 genwqe_release_service_layer(cd);
543 out:
544 if (genwqe_is_privileged(cd))
545 genwqe_ffdc_buffs_free(cd);
546 return -EIO;
547}
548
549/**
550 * genwqe_stop() - Stop card operation
551 *
552 * Recovery notes:
553 * As long as genwqe_thread runs we might access registers during
554 * error data capture. Same is with the genwqe_health_thread.
555 * When genwqe_bus_reset() fails this function might called two times:
556 * first by the genwqe_health_thread() and later by genwqe_remove() to
557 * unbind the device. We must be able to survive that.
558 *
559 * This function must be robust enough to be called twice.
560 */
561static int genwqe_stop(struct genwqe_dev *cd)
562{
563 genwqe_finish_queue(cd); /* no register access */
564 genwqe_device_remove(cd); /* device removed, procs killed */
565 genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */
566
567 if (genwqe_is_privileged(cd)) {
568 pci_disable_sriov(cd->pci_dev); /* access pci config space */
569 genwqe_ffdc_buffs_free(cd);
570 }
571
572 return 0;
573}
574
575/**
576 * genwqe_recover_card() - Try to recover the card if it is possible
577 *
578 * If fatal_err is set no register access is possible anymore. It is
579 * likely that genwqe_start fails in that situation. Proper error
580 * handling is required in this case.
581 *
582 * genwqe_bus_reset() will cause the pci code to call genwqe_remove()
583 * and later genwqe_probe() for all virtual functions.
584 */
585static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err)
586{
587 int rc;
588 struct pci_dev *pci_dev = cd->pci_dev;
589
590 genwqe_stop(cd);
591
592 /*
593 * Make sure chip is not reloaded to maintain FFDC. Write SLU
594 * Reset Register, CPLDReset field to 0.
595 */
596 if (!fatal_err) {
597 cd->softreset = 0x70ull;
598 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
599 }
600
601 rc = genwqe_bus_reset(cd);
602 if (rc != 0) {
603 dev_err(&pci_dev->dev,
604 "[%s] err: card recovery impossible!\n", __func__);
605 return rc;
606 }
607
608 rc = genwqe_start(cd);
609 if (rc < 0) {
610 dev_err(&pci_dev->dev,
611 "[%s] err: failed to launch device!\n", __func__);
612 return rc;
613 }
614 return 0;
615}
616
617static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
618{
619 *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
620 return (*gfir & GFIR_ERR_TRIGGER) &&
621 genwqe_recovery_on_fatal_gfir_required(cd);
622}
623
624/**
625 * genwqe_fir_checking() - Check the fault isolation registers of the card
626 *
627 * If this code works ok, can be tried out with help of the genwqe_poke tool:
628 * sudo ./tools/genwqe_poke 0x8 0xfefefefefef
629 *
630 * Now the relevant FIRs/sFIRs should be printed out and the driver should
631 * invoke recovery (devices are removed and readded).
632 */
633static u64 genwqe_fir_checking(struct genwqe_dev *cd)
634{
635 int j, iterations = 0;
636 u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec;
637 u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr;
638 struct pci_dev *pci_dev = cd->pci_dev;
639
640 healthMonitor:
641 iterations++;
642 if (iterations > 16) {
643 dev_err(&pci_dev->dev, "* exit looping after %d times\n",
644 iterations);
645 goto fatal_error;
646 }
647
648 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
649 if (gfir != 0x0)
650 dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n",
651 IO_SLC_CFGREG_GFIR, gfir);
652 if (gfir == IO_ILLEGAL_VALUE)
653 goto fatal_error;
654
655 /*
656 * Avoid printing when to GFIR bit is on prevents contignous
657 * printout e.g. for the following bug:
658 * FIR set without a 2ndary FIR/FIR cannot be cleared
659 * Comment out the following if to get the prints:
660 */
661 if (gfir == 0)
662 return 0;
663
664 gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */
665
666 for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */
667
668 /* read the primary FIR (pfir) */
669 fir_addr = (uid << 24) + 0x08;
670 fir = __genwqe_readq(cd, fir_addr);
671 if (fir == 0x0)
672 continue; /* no error in this unit */
673
674 dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir);
675 if (fir == IO_ILLEGAL_VALUE)
676 goto fatal_error;
677
678 /* read primary FEC */
679 fec_addr = (uid << 24) + 0x18;
680 fec = __genwqe_readq(cd, fec_addr);
681
682 dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec);
683 if (fec == IO_ILLEGAL_VALUE)
684 goto fatal_error;
685
686 for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) {
687
688 /* secondary fir empty, skip it */
689 if ((fir & mask) == 0x0)
690 continue;
691
692 sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
693 sfir = __genwqe_readq(cd, sfir_addr);
694
695 if (sfir == IO_ILLEGAL_VALUE)
696 goto fatal_error;
697 dev_err(&pci_dev->dev,
698 "* 0x%08x 0x%016llx\n", sfir_addr, sfir);
699
700 sfec_addr = (uid << 24) + 0x300 + 0x08 * j;
701 sfec = __genwqe_readq(cd, sfec_addr);
702
703 if (sfec == IO_ILLEGAL_VALUE)
704 goto fatal_error;
705 dev_err(&pci_dev->dev,
706 "* 0x%08x 0x%016llx\n", sfec_addr, sfec);
707
708 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
709 if (gfir == IO_ILLEGAL_VALUE)
710 goto fatal_error;
711
712 /* gfir turned on during routine! get out and
713 start over. */
714 if ((gfir_masked == 0x0) &&
715 (gfir & GFIR_ERR_TRIGGER)) {
716 goto healthMonitor;
717 }
718
719 /* do not clear if we entered with a fatal gfir */
720 if (gfir_masked == 0x0) {
721
722 /* NEW clear by mask the logged bits */
723 sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
724 __genwqe_writeq(cd, sfir_addr, sfir);
725
726 dev_dbg(&pci_dev->dev,
727 "[HM] Clearing 2ndary FIR 0x%08x "
728 "with 0x%016llx\n", sfir_addr, sfir);
729
730 /*
731 * note, these cannot be error-Firs
732 * since gfir_masked is 0 after sfir
733 * was read. Also, it is safe to do
734 * this write if sfir=0. Still need to
735 * clear the primary. This just means
736 * there is no secondary FIR.
737 */
738
739 /* clear by mask the logged bit. */
740 fir_clr_addr = (uid << 24) + 0x10;
741 __genwqe_writeq(cd, fir_clr_addr, mask);
742
743 dev_dbg(&pci_dev->dev,
744 "[HM] Clearing primary FIR 0x%08x "
745 "with 0x%016llx\n", fir_clr_addr,
746 mask);
747 }
748 }
749 }
750 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
751 if (gfir == IO_ILLEGAL_VALUE)
752 goto fatal_error;
753
754 if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) {
755 /*
756 * Check once more that it didn't go on after all the
757 * FIRS were cleared.
758 */
759 dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n",
760 iterations);
761 goto healthMonitor;
762 }
763 return gfir_masked;
764
765 fatal_error:
766 return IO_ILLEGAL_VALUE;
767}
768
c1f732ad
KSS
769/**
770 * genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
771 *
772 * Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
773 * reset method will not work in all cases.
774 *
775 * Return: 0 on success or error code from pci_set_pcie_reset_state()
776 */
777static int genwqe_pci_fundamental_reset(struct pci_dev *pci_dev)
778{
779 int rc;
780
781 /*
782 * lock pci config space access from userspace,
783 * save state and issue PCIe fundamental reset
784 */
785 pci_cfg_access_lock(pci_dev);
786 pci_save_state(pci_dev);
787 rc = pci_set_pcie_reset_state(pci_dev, pcie_warm_reset);
788 if (!rc) {
789 /* keep PCIe reset asserted for 250ms */
790 msleep(250);
791 pci_set_pcie_reset_state(pci_dev, pcie_deassert_reset);
792 /* Wait for 2s to reload flash and train the link */
793 msleep(2000);
794 }
795 pci_restore_state(pci_dev);
796 pci_cfg_access_unlock(pci_dev);
797 return rc;
798}
799
800/*
801 * genwqe_reload_bistream() - reload card bitstream
802 *
803 * Set the appropriate register and call fundamental reset to reaload the card
804 * bitstream.
805 *
806 * Return: 0 on success, error code otherwise
807 */
808static int genwqe_reload_bistream(struct genwqe_dev *cd)
809{
810 struct pci_dev *pci_dev = cd->pci_dev;
811 int rc;
812
813 dev_info(&pci_dev->dev,
814 "[%s] resetting card for bitstream reload\n",
815 __func__);
816
817 genwqe_stop(cd);
818
819 /*
820 * Cause a CPLD reprogram with the 'next_bitstream'
821 * partition on PCIe hot or fundamental reset
822 */
823 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
824 (cd->softreset & 0xcull) | 0x70ull);
825
826 rc = genwqe_pci_fundamental_reset(pci_dev);
827 if (rc) {
828 /*
829 * A fundamental reset failure can be caused
830 * by lack of support on the arch, so we just
831 * log the error and try to start the card
832 * again.
833 */
834 dev_err(&pci_dev->dev,
835 "[%s] err: failed to reset card for bitstream reload\n",
836 __func__);
837 }
838
839 rc = genwqe_start(cd);
840 if (rc) {
841 dev_err(&pci_dev->dev,
842 "[%s] err: cannot start card services! (err=%d)\n",
843 __func__, rc);
844 return rc;
845 }
846 dev_info(&pci_dev->dev,
847 "[%s] card reloaded\n", __func__);
848 return 0;
849}
850
851
12eb4683
FH
852/**
853 * genwqe_health_thread() - Health checking thread
854 *
855 * This thread is only started for the PF of the card.
856 *
857 * This thread monitors the health of the card. A critical situation
858 * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In
859 * this case we need to be recovered from outside. Writing to
860 * registers will very likely not work either.
861 *
862 * This thread must only exit if kthread_should_stop() becomes true.
863 *
864 * Condition for the health-thread to trigger:
865 * a) when a kthread_stop() request comes in or
866 * b) a critical GFIR occured
867 *
868 * Informational GFIRs are checked and potentially printed in
869 * health_check_interval seconds.
870 */
871static int genwqe_health_thread(void *data)
872{
873 int rc, should_stop = 0;
874 struct genwqe_dev *cd = data;
875 struct pci_dev *pci_dev = cd->pci_dev;
876 u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
877
878 while (!kthread_should_stop()) {
879 rc = wait_event_interruptible_timeout(cd->health_waitq,
880 (genwqe_health_check_cond(cd, &gfir) ||
881 (should_stop = kthread_should_stop())),
882 genwqe_health_check_interval * HZ);
883
884 if (should_stop)
885 break;
886
887 if (gfir == IO_ILLEGAL_VALUE) {
888 dev_err(&pci_dev->dev,
889 "[%s] GFIR=%016llx\n", __func__, gfir);
890 goto fatal_error;
891 }
892
893 slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
894 if (slu_unitcfg == IO_ILLEGAL_VALUE) {
895 dev_err(&pci_dev->dev,
896 "[%s] SLU_UNITCFG=%016llx\n",
897 __func__, slu_unitcfg);
898 goto fatal_error;
899 }
900
901 app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
902 if (app_unitcfg == IO_ILLEGAL_VALUE) {
903 dev_err(&pci_dev->dev,
904 "[%s] APP_UNITCFG=%016llx\n",
905 __func__, app_unitcfg);
906 goto fatal_error;
907 }
908
909 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
910 if (gfir == IO_ILLEGAL_VALUE) {
911 dev_err(&pci_dev->dev,
912 "[%s] %s: GFIR=%016llx\n", __func__,
913 (gfir & GFIR_ERR_TRIGGER) ? "err" : "info",
914 gfir);
915 goto fatal_error;
916 }
917
918 gfir_masked = genwqe_fir_checking(cd);
919 if (gfir_masked == IO_ILLEGAL_VALUE)
920 goto fatal_error;
921
922 /*
923 * GFIR ErrorTrigger bits set => reset the card!
924 * Never do this for old/manufacturing images!
925 */
926 if ((gfir_masked) && !cd->skip_recovery &&
927 genwqe_recovery_on_fatal_gfir_required(cd)) {
928
929 cd->card_state = GENWQE_CARD_FATAL_ERROR;
930
931 rc = genwqe_recover_card(cd, 0);
932 if (rc < 0) {
933 /* FIXME Card is unusable and needs unbind! */
934 goto fatal_error;
935 }
936 }
937
c1f732ad
KSS
938 if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) {
939 /* Userspace requested card bitstream reload */
940 rc = genwqe_reload_bistream(cd);
941 if (rc)
942 goto fatal_error;
943 }
944
12eb4683
FH
945 cd->last_gfir = gfir;
946 cond_resched();
947 }
948
949 return 0;
950
951 fatal_error:
fb145456
KSS
952 if (cd->use_platform_recovery) {
953 /*
954 * Since we use raw accessors, EEH errors won't be detected
955 * by the platform until we do a non-raw MMIO or config space
956 * read
957 */
958 readq(cd->mmio + IO_SLC_CFGREG_GFIR);
959
960 /* We do nothing if the card is going over PCI recovery */
961 if (pci_channel_offline(pci_dev))
962 return -EIO;
963 }
964
12eb4683
FH
965 dev_err(&pci_dev->dev,
966 "[%s] card unusable. Please trigger unbind!\n", __func__);
967
968 /* Bring down logical devices to inform user space via udev remove. */
969 cd->card_state = GENWQE_CARD_FATAL_ERROR;
970 genwqe_stop(cd);
971
972 /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */
973 while (!kthread_should_stop())
974 cond_resched();
975
976 return -EIO;
977}
978
979static int genwqe_health_check_start(struct genwqe_dev *cd)
980{
981 int rc;
982
983 if (genwqe_health_check_interval <= 0)
984 return 0; /* valid for disabling the service */
985
986 /* moved before request_irq() */
987 /* init_waitqueue_head(&cd->health_waitq); */
988
989 cd->health_thread = kthread_run(genwqe_health_thread, cd,
990 GENWQE_DEVNAME "%d_health",
991 cd->card_idx);
992 if (IS_ERR(cd->health_thread)) {
993 rc = PTR_ERR(cd->health_thread);
994 cd->health_thread = NULL;
995 return rc;
996 }
997 return 0;
998}
999
1000static int genwqe_health_thread_running(struct genwqe_dev *cd)
1001{
1002 return cd->health_thread != NULL;
1003}
1004
1005static int genwqe_health_check_stop(struct genwqe_dev *cd)
1006{
1007 int rc;
1008
1009 if (!genwqe_health_thread_running(cd))
1010 return -EIO;
1011
1012 rc = kthread_stop(cd->health_thread);
1013 cd->health_thread = NULL;
1014 return 0;
1015}
1016
1017/**
1018 * genwqe_pci_setup() - Allocate PCIe related resources for our card
1019 */
1020static int genwqe_pci_setup(struct genwqe_dev *cd)
1021{
1022 int err, bars;
1023 struct pci_dev *pci_dev = cd->pci_dev;
1024
1025 bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
1026 err = pci_enable_device_mem(pci_dev);
1027 if (err) {
1028 dev_err(&pci_dev->dev,
1029 "err: failed to enable pci memory (err=%d)\n", err);
1030 goto err_out;
1031 }
1032
1033 /* Reserve PCI I/O and memory resources */
1034 err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
1035 if (err) {
1036 dev_err(&pci_dev->dev,
1037 "[%s] err: request bars failed (%d)\n", __func__, err);
1038 err = -EIO;
1039 goto err_disable_device;
1040 }
1041
1042 /* check for 64-bit DMA address supported (DAC) */
1043 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
1044 err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
1045 if (err) {
1046 dev_err(&pci_dev->dev,
1047 "err: DMA64 consistent mask error\n");
1048 err = -EIO;
1049 goto out_release_resources;
1050 }
1051 /* check for 32-bit DMA address supported (SAC) */
1052 } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
1053 err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
1054 if (err) {
1055 dev_err(&pci_dev->dev,
1056 "err: DMA32 consistent mask error\n");
1057 err = -EIO;
1058 goto out_release_resources;
1059 }
1060 } else {
1061 dev_err(&pci_dev->dev,
1062 "err: neither DMA32 nor DMA64 supported\n");
1063 err = -EIO;
1064 goto out_release_resources;
1065 }
1066
1067 pci_set_master(pci_dev);
1068 pci_enable_pcie_error_reporting(pci_dev);
1069
fb145456
KSS
1070 /* EEH recovery requires PCIe fundamental reset */
1071 pci_dev->needs_freset = 1;
1072
12eb4683
FH
1073 /* request complete BAR-0 space (length = 0) */
1074 cd->mmio_len = pci_resource_len(pci_dev, 0);
1075 cd->mmio = pci_iomap(pci_dev, 0, 0);
1076 if (cd->mmio == NULL) {
1077 dev_err(&pci_dev->dev,
1078 "[%s] err: mapping BAR0 failed\n", __func__);
1079 err = -ENOMEM;
1080 goto out_release_resources;
1081 }
1082
1083 cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
1084
1085 err = genwqe_read_ids(cd);
1086 if (err)
1087 goto out_iounmap;
1088
1089 return 0;
1090
1091 out_iounmap:
1092 pci_iounmap(pci_dev, cd->mmio);
1093 out_release_resources:
1094 pci_release_selected_regions(pci_dev, bars);
1095 err_disable_device:
1096 pci_disable_device(pci_dev);
1097 err_out:
1098 return err;
1099}
1100
1101/**
1102 * genwqe_pci_remove() - Free PCIe related resources for our card
1103 */
1104static void genwqe_pci_remove(struct genwqe_dev *cd)
1105{
1106 int bars;
1107 struct pci_dev *pci_dev = cd->pci_dev;
1108
1109 if (cd->mmio)
1110 pci_iounmap(pci_dev, cd->mmio);
1111
1112 bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
1113 pci_release_selected_regions(pci_dev, bars);
1114 pci_disable_device(pci_dev);
1115}
1116
1117/**
1118 * genwqe_probe() - Device initialization
1119 * @pdev: PCI device information struct
1120 *
1121 * Callable for multiple cards. This function is called on bind.
1122 *
1123 * Return: 0 if succeeded, < 0 when failed
1124 */
1125static int genwqe_probe(struct pci_dev *pci_dev,
1126 const struct pci_device_id *id)
1127{
1128 int err;
1129 struct genwqe_dev *cd;
1130
1131 genwqe_init_crc32();
1132
1133 cd = genwqe_dev_alloc();
1134 if (IS_ERR(cd)) {
1135 dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n",
1136 (int)PTR_ERR(cd));
1137 return PTR_ERR(cd);
1138 }
1139
1140 dev_set_drvdata(&pci_dev->dev, cd);
1141 cd->pci_dev = pci_dev;
1142
1143 err = genwqe_pci_setup(cd);
1144 if (err < 0) {
1145 dev_err(&pci_dev->dev,
1146 "err: problems with PCI setup (err=%d)\n", err);
1147 goto out_free_dev;
1148 }
1149
1150 err = genwqe_start(cd);
1151 if (err < 0) {
1152 dev_err(&pci_dev->dev,
1153 "err: cannot start card services! (err=%d)\n", err);
1154 goto out_pci_remove;
1155 }
1156
1157 if (genwqe_is_privileged(cd)) {
1158 err = genwqe_health_check_start(cd);
1159 if (err < 0) {
1160 dev_err(&pci_dev->dev,
1161 "err: cannot start health checking! "
1162 "(err=%d)\n", err);
1163 goto out_stop_services;
1164 }
1165 }
1166 return 0;
1167
1168 out_stop_services:
1169 genwqe_stop(cd);
1170 out_pci_remove:
1171 genwqe_pci_remove(cd);
1172 out_free_dev:
1173 genwqe_dev_free(cd);
1174 return err;
1175}
1176
1177/**
1178 * genwqe_remove() - Called when device is removed (hot-plugable)
1179 *
1180 * Or when driver is unloaded respecitively when unbind is done.
1181 */
1182static void genwqe_remove(struct pci_dev *pci_dev)
1183{
1184 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1185
1186 genwqe_health_check_stop(cd);
1187
1188 /*
1189 * genwqe_stop() must survive if it is called twice
1190 * sequentially. This happens when the health thread calls it
1191 * and fails on genwqe_bus_reset().
1192 */
1193 genwqe_stop(cd);
1194 genwqe_pci_remove(cd);
1195 genwqe_dev_free(cd);
1196}
1197
1198/*
1199 * genwqe_err_error_detected() - Error detection callback
1200 *
1201 * This callback is called by the PCI subsystem whenever a PCI bus
1202 * error is detected.
1203 */
1204static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
1205 enum pci_channel_state state)
1206{
1207 struct genwqe_dev *cd;
1208
1209 dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
1210
12eb4683
FH
1211 cd = dev_get_drvdata(&pci_dev->dev);
1212 if (cd == NULL)
fb145456 1213 return PCI_ERS_RESULT_DISCONNECT;
12eb4683 1214
fb145456
KSS
1215 /* Stop the card */
1216 genwqe_health_check_stop(cd);
1217 genwqe_stop(cd);
1218
1219 /*
1220 * On permanent failure, the PCI code will call device remove
1221 * after the return of this function.
1222 * genwqe_stop() can be called twice.
1223 */
1224 if (state == pci_channel_io_perm_failure) {
12eb4683 1225 return PCI_ERS_RESULT_DISCONNECT;
fb145456
KSS
1226 } else {
1227 genwqe_pci_remove(cd);
1228 return PCI_ERS_RESULT_NEED_RESET;
12eb4683 1229 }
fb145456
KSS
1230}
1231
1232static pci_ers_result_t genwqe_err_slot_reset(struct pci_dev *pci_dev)
1233{
1234 int rc;
1235 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
12eb4683 1236
fb145456
KSS
1237 rc = genwqe_pci_setup(cd);
1238 if (!rc) {
1239 return PCI_ERS_RESULT_RECOVERED;
1240 } else {
1241 dev_err(&pci_dev->dev,
1242 "err: problems with PCI setup (err=%d)\n", rc);
1243 return PCI_ERS_RESULT_DISCONNECT;
1244 }
12eb4683
FH
1245}
1246
1247static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
1248{
1249 return PCI_ERS_RESULT_NONE;
1250}
1251
fb145456 1252static void genwqe_err_resume(struct pci_dev *pci_dev)
12eb4683 1253{
fb145456
KSS
1254 int rc;
1255 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1256
1257 rc = genwqe_start(cd);
1258 if (!rc) {
1259 rc = genwqe_health_check_start(cd);
1260 if (rc)
1261 dev_err(&pci_dev->dev,
1262 "err: cannot start health checking! (err=%d)\n",
1263 rc);
1264 } else {
1265 dev_err(&pci_dev->dev,
1266 "err: cannot start card services! (err=%d)\n", rc);
1267 }
12eb4683
FH
1268}
1269
1270static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
1271{
1272 struct genwqe_dev *cd = dev_get_drvdata(&dev->dev);
1273
1274 if (numvfs > 0) {
1275 genwqe_setup_vf_jtimer(cd);
1276 pci_enable_sriov(dev, numvfs);
1277 return numvfs;
1278 }
1279 if (numvfs == 0) {
1280 pci_disable_sriov(dev);
1281 return 0;
1282 }
1283 return 0;
1284}
1285
1286static struct pci_error_handlers genwqe_err_handler = {
1287 .error_detected = genwqe_err_error_detected,
1288 .mmio_enabled = genwqe_err_result_none,
1289 .link_reset = genwqe_err_result_none,
fb145456 1290 .slot_reset = genwqe_err_slot_reset,
12eb4683
FH
1291 .resume = genwqe_err_resume,
1292};
1293
1294static struct pci_driver genwqe_driver = {
1295 .name = genwqe_driver_name,
1296 .id_table = genwqe_device_table,
1297 .probe = genwqe_probe,
1298 .remove = genwqe_remove,
1299 .sriov_configure = genwqe_sriov_configure,
1300 .err_handler = &genwqe_err_handler,
1301};
1302
1303/**
1304 * genwqe_init_module() - Driver registration and initialization
1305 */
1306static int __init genwqe_init_module(void)
1307{
1308 int rc;
1309
1310 class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME);
1311 if (IS_ERR(class_genwqe)) {
1312 pr_err("[%s] create class failed\n", __func__);
1313 return -ENOMEM;
1314 }
1315
1316 debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
1317 if (!debugfs_genwqe) {
1318 rc = -ENOMEM;
1319 goto err_out;
1320 }
1321
1322 rc = pci_register_driver(&genwqe_driver);
1323 if (rc != 0) {
1324 pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc);
1325 goto err_out0;
1326 }
1327
1328 return rc;
1329
1330 err_out0:
1331 debugfs_remove(debugfs_genwqe);
1332 err_out:
1333 class_destroy(class_genwqe);
1334 return rc;
1335}
1336
1337/**
1338 * genwqe_exit_module() - Driver exit
1339 */
1340static void __exit genwqe_exit_module(void)
1341{
1342 pci_unregister_driver(&genwqe_driver);
1343 debugfs_remove(debugfs_genwqe);
1344 class_destroy(class_genwqe);
1345}
1346
1347module_init(genwqe_init_module);
1348module_exit(genwqe_exit_module);