]>
Commit | Line | Data |
---|---|---|
c465def6 | 1 | /* |
45e829ea | 2 | * PCIe AER software error injection support. |
c465def6 | 3 | * |
45e829ea | 4 | * Debuging PCIe AER code is quite difficult because it is hard to |
c465def6 HY |
5 | * trigger various real hardware errors. Software based error |
6 | * injection can fake almost all kinds of errors with the help of a | |
7 | * user space helper tool aer-inject, which can be gotten from: | |
8 | * http://www.kernel.org/pub/linux/utils/pci/aer-inject/ | |
9 | * | |
10 | * Copyright 2009 Intel Corporation. | |
11 | * Huang Ying <ying.huang@intel.com> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; version 2 | |
16 | * of the License. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/miscdevice.h> | |
23 | #include <linux/pci.h> | |
5a0e3ad6 | 24 | #include <linux/slab.h> |
c465def6 | 25 | #include <linux/fs.h> |
c9a91883 | 26 | #include <linux/uaccess.h> |
cc5d153a | 27 | #include <linux/stddef.h> |
c465def6 HY |
28 | #include "aerdrv.h" |
29 | ||
457d9d08 | 30 | /* Override the existing corrected and uncorrected error masks */ |
90ab5ee9 | 31 | static bool aer_mask_override; |
457d9d08 PB |
32 | module_param(aer_mask_override, bool, 0); |
33 | ||
c9a91883 | 34 | struct aer_error_inj { |
c465def6 HY |
35 | u8 bus; |
36 | u8 dev; | |
37 | u8 fn; | |
38 | u32 uncor_status; | |
39 | u32 cor_status; | |
40 | u32 header_log0; | |
41 | u32 header_log1; | |
42 | u32 header_log2; | |
43 | u32 header_log3; | |
28ef241f | 44 | u32 domain; |
c465def6 HY |
45 | }; |
46 | ||
c9a91883 | 47 | struct aer_error { |
c465def6 | 48 | struct list_head list; |
28ef241f | 49 | u32 domain; |
c465def6 HY |
50 | unsigned int bus; |
51 | unsigned int devfn; | |
52 | int pos_cap_err; | |
53 | ||
54 | u32 uncor_status; | |
55 | u32 cor_status; | |
56 | u32 header_log0; | |
57 | u32 header_log1; | |
58 | u32 header_log2; | |
59 | u32 header_log3; | |
60 | u32 root_status; | |
61 | u32 source_id; | |
62 | }; | |
63 | ||
c9a91883 | 64 | struct pci_bus_ops { |
c465def6 HY |
65 | struct list_head list; |
66 | struct pci_bus *bus; | |
67 | struct pci_ops *ops; | |
68 | }; | |
69 | ||
70 | static LIST_HEAD(einjected); | |
71 | ||
72 | static LIST_HEAD(pci_bus_ops_list); | |
73 | ||
74 | /* Protect einjected and pci_bus_ops_list */ | |
75 | static DEFINE_SPINLOCK(inject_lock); | |
76 | ||
28ef241f | 77 | static void aer_error_init(struct aer_error *err, u32 domain, |
cc5d153a AP |
78 | unsigned int bus, unsigned int devfn, |
79 | int pos_cap_err) | |
c465def6 HY |
80 | { |
81 | INIT_LIST_HEAD(&err->list); | |
cc5d153a | 82 | err->domain = domain; |
c465def6 HY |
83 | err->bus = bus; |
84 | err->devfn = devfn; | |
85 | err->pos_cap_err = pos_cap_err; | |
86 | } | |
87 | ||
88 | /* inject_lock must be held before calling */ | |
28ef241f | 89 | static struct aer_error *__find_aer_error(u32 domain, unsigned int bus, |
cc5d153a | 90 | unsigned int devfn) |
c465def6 HY |
91 | { |
92 | struct aer_error *err; | |
93 | ||
94 | list_for_each_entry(err, &einjected, list) { | |
cc5d153a AP |
95 | if (domain == err->domain && |
96 | bus == err->bus && | |
97 | devfn == err->devfn) | |
c465def6 HY |
98 | return err; |
99 | } | |
100 | return NULL; | |
101 | } | |
102 | ||
103 | /* inject_lock must be held before calling */ | |
104 | static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) | |
105 | { | |
cc5d153a AP |
106 | int domain = pci_domain_nr(dev->bus); |
107 | if (domain < 0) | |
108 | return NULL; | |
28ef241f | 109 | return __find_aer_error(domain, dev->bus->number, dev->devfn); |
c465def6 HY |
110 | } |
111 | ||
112 | /* inject_lock must be held before calling */ | |
113 | static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus) | |
114 | { | |
115 | struct pci_bus_ops *bus_ops; | |
116 | ||
117 | list_for_each_entry(bus_ops, &pci_bus_ops_list, list) { | |
118 | if (bus_ops->bus == bus) | |
119 | return bus_ops->ops; | |
120 | } | |
121 | return NULL; | |
122 | } | |
123 | ||
124 | static struct pci_bus_ops *pci_bus_ops_pop(void) | |
125 | { | |
126 | unsigned long flags; | |
127 | struct pci_bus_ops *bus_ops = NULL; | |
128 | ||
129 | spin_lock_irqsave(&inject_lock, flags); | |
130 | if (list_empty(&pci_bus_ops_list)) | |
131 | bus_ops = NULL; | |
132 | else { | |
133 | struct list_head *lh = pci_bus_ops_list.next; | |
134 | list_del(lh); | |
135 | bus_ops = list_entry(lh, struct pci_bus_ops, list); | |
136 | } | |
137 | spin_unlock_irqrestore(&inject_lock, flags); | |
138 | return bus_ops; | |
139 | } | |
140 | ||
141 | static u32 *find_pci_config_dword(struct aer_error *err, int where, | |
142 | int *prw1cs) | |
143 | { | |
144 | int rw1cs = 0; | |
145 | u32 *target = NULL; | |
146 | ||
147 | if (err->pos_cap_err == -1) | |
148 | return NULL; | |
149 | ||
150 | switch (where - err->pos_cap_err) { | |
151 | case PCI_ERR_UNCOR_STATUS: | |
152 | target = &err->uncor_status; | |
153 | rw1cs = 1; | |
154 | break; | |
155 | case PCI_ERR_COR_STATUS: | |
156 | target = &err->cor_status; | |
157 | rw1cs = 1; | |
158 | break; | |
159 | case PCI_ERR_HEADER_LOG: | |
160 | target = &err->header_log0; | |
161 | break; | |
162 | case PCI_ERR_HEADER_LOG+4: | |
163 | target = &err->header_log1; | |
164 | break; | |
165 | case PCI_ERR_HEADER_LOG+8: | |
c9a91883 | 166 | target = &err->header_log2; |
c465def6 HY |
167 | break; |
168 | case PCI_ERR_HEADER_LOG+12: | |
169 | target = &err->header_log3; | |
170 | break; | |
171 | case PCI_ERR_ROOT_STATUS: | |
172 | target = &err->root_status; | |
173 | rw1cs = 1; | |
174 | break; | |
f647a44f | 175 | case PCI_ERR_ROOT_ERR_SRC: |
c465def6 HY |
176 | target = &err->source_id; |
177 | break; | |
178 | } | |
179 | if (prw1cs) | |
180 | *prw1cs = rw1cs; | |
181 | return target; | |
182 | } | |
183 | ||
3b0a6d1a BH |
184 | static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn, |
185 | int where, int size, u32 *val) | |
c465def6 HY |
186 | { |
187 | u32 *sim; | |
188 | struct aer_error *err; | |
189 | unsigned long flags; | |
190 | struct pci_ops *ops; | |
7e8fbdc6 | 191 | struct pci_ops *my_ops; |
cc5d153a | 192 | int domain; |
7e8fbdc6 | 193 | int rv; |
c465def6 HY |
194 | |
195 | spin_lock_irqsave(&inject_lock, flags); | |
196 | if (size != sizeof(u32)) | |
197 | goto out; | |
cc5d153a AP |
198 | domain = pci_domain_nr(bus); |
199 | if (domain < 0) | |
200 | goto out; | |
28ef241f | 201 | err = __find_aer_error(domain, bus->number, devfn); |
c465def6 HY |
202 | if (!err) |
203 | goto out; | |
204 | ||
205 | sim = find_pci_config_dword(err, where, NULL); | |
206 | if (sim) { | |
207 | *val = *sim; | |
208 | spin_unlock_irqrestore(&inject_lock, flags); | |
209 | return 0; | |
210 | } | |
211 | out: | |
212 | ops = __find_pci_bus_ops(bus); | |
7e8fbdc6 DD |
213 | /* |
214 | * pci_lock must already be held, so we can directly | |
215 | * manipulate bus->ops. Many config access functions, | |
216 | * including pci_generic_config_read() require the original | |
217 | * bus->ops be installed to function, so temporarily put them | |
218 | * back. | |
219 | */ | |
220 | my_ops = bus->ops; | |
221 | bus->ops = ops; | |
222 | rv = ops->read(bus, devfn, where, size, val); | |
223 | bus->ops = my_ops; | |
c465def6 | 224 | spin_unlock_irqrestore(&inject_lock, flags); |
7e8fbdc6 | 225 | return rv; |
c465def6 HY |
226 | } |
227 | ||
3b0a6d1a BH |
228 | static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn, |
229 | int where, int size, u32 val) | |
c465def6 HY |
230 | { |
231 | u32 *sim; | |
232 | struct aer_error *err; | |
233 | unsigned long flags; | |
234 | int rw1cs; | |
235 | struct pci_ops *ops; | |
7e8fbdc6 | 236 | struct pci_ops *my_ops; |
cc5d153a | 237 | int domain; |
7e8fbdc6 | 238 | int rv; |
c465def6 HY |
239 | |
240 | spin_lock_irqsave(&inject_lock, flags); | |
241 | if (size != sizeof(u32)) | |
242 | goto out; | |
cc5d153a AP |
243 | domain = pci_domain_nr(bus); |
244 | if (domain < 0) | |
245 | goto out; | |
28ef241f | 246 | err = __find_aer_error(domain, bus->number, devfn); |
c465def6 HY |
247 | if (!err) |
248 | goto out; | |
249 | ||
250 | sim = find_pci_config_dword(err, where, &rw1cs); | |
251 | if (sim) { | |
252 | if (rw1cs) | |
253 | *sim ^= val; | |
254 | else | |
255 | *sim = val; | |
256 | spin_unlock_irqrestore(&inject_lock, flags); | |
257 | return 0; | |
258 | } | |
259 | out: | |
260 | ops = __find_pci_bus_ops(bus); | |
7e8fbdc6 DD |
261 | /* |
262 | * pci_lock must already be held, so we can directly | |
263 | * manipulate bus->ops. Many config access functions, | |
264 | * including pci_generic_config_write() require the original | |
265 | * bus->ops be installed to function, so temporarily put them | |
266 | * back. | |
267 | */ | |
268 | my_ops = bus->ops; | |
269 | bus->ops = ops; | |
270 | rv = ops->write(bus, devfn, where, size, val); | |
271 | bus->ops = my_ops; | |
c465def6 | 272 | spin_unlock_irqrestore(&inject_lock, flags); |
7e8fbdc6 | 273 | return rv; |
c465def6 HY |
274 | } |
275 | ||
3b0a6d1a BH |
276 | static struct pci_ops aer_inj_pci_ops = { |
277 | .read = aer_inj_read_config, | |
278 | .write = aer_inj_write_config, | |
c465def6 HY |
279 | }; |
280 | ||
281 | static void pci_bus_ops_init(struct pci_bus_ops *bus_ops, | |
282 | struct pci_bus *bus, | |
283 | struct pci_ops *ops) | |
284 | { | |
285 | INIT_LIST_HEAD(&bus_ops->list); | |
286 | bus_ops->bus = bus; | |
287 | bus_ops->ops = ops; | |
288 | } | |
289 | ||
290 | static int pci_bus_set_aer_ops(struct pci_bus *bus) | |
291 | { | |
292 | struct pci_ops *ops; | |
293 | struct pci_bus_ops *bus_ops; | |
294 | unsigned long flags; | |
295 | ||
296 | bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL); | |
297 | if (!bus_ops) | |
298 | return -ENOMEM; | |
3b0a6d1a | 299 | ops = pci_bus_set_ops(bus, &aer_inj_pci_ops); |
c465def6 | 300 | spin_lock_irqsave(&inject_lock, flags); |
3b0a6d1a | 301 | if (ops == &aer_inj_pci_ops) |
c465def6 HY |
302 | goto out; |
303 | pci_bus_ops_init(bus_ops, bus, ops); | |
304 | list_add(&bus_ops->list, &pci_bus_ops_list); | |
305 | bus_ops = NULL; | |
306 | out: | |
307 | spin_unlock_irqrestore(&inject_lock, flags); | |
c9a91883 | 308 | kfree(bus_ops); |
c465def6 HY |
309 | return 0; |
310 | } | |
311 | ||
312 | static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) | |
313 | { | |
314 | while (1) { | |
b44d7db3 | 315 | if (!pci_is_pcie(dev)) |
c465def6 | 316 | break; |
62f87c0e | 317 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) |
c465def6 HY |
318 | return dev; |
319 | if (!dev->bus->self) | |
320 | break; | |
321 | dev = dev->bus->self; | |
322 | } | |
323 | return NULL; | |
324 | } | |
325 | ||
326 | static int find_aer_device_iter(struct device *device, void *data) | |
327 | { | |
328 | struct pcie_device **result = data; | |
329 | struct pcie_device *pcie_dev; | |
330 | ||
331 | if (device->bus == &pcie_port_bus_type) { | |
332 | pcie_dev = to_pcie_device(device); | |
333 | if (pcie_dev->service & PCIE_PORT_SERVICE_AER) { | |
334 | *result = pcie_dev; | |
335 | return 1; | |
336 | } | |
337 | } | |
338 | return 0; | |
339 | } | |
340 | ||
341 | static int find_aer_device(struct pci_dev *dev, struct pcie_device **result) | |
342 | { | |
343 | return device_for_each_child(&dev->dev, result, find_aer_device_iter); | |
344 | } | |
345 | ||
346 | static int aer_inject(struct aer_error_inj *einj) | |
347 | { | |
348 | struct aer_error *err, *rperr; | |
349 | struct aer_error *err_alloc = NULL, *rperr_alloc = NULL; | |
350 | struct pci_dev *dev, *rpdev; | |
351 | struct pcie_device *edev; | |
352 | unsigned long flags; | |
353 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); | |
354 | int pos_cap_err, rp_pos_cap_err; | |
40294d8f | 355 | u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0; |
c465def6 HY |
356 | int ret = 0; |
357 | ||
28ef241f | 358 | dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn); |
c465def6 | 359 | if (!dev) |
1d024355 | 360 | return -ENODEV; |
c465def6 HY |
361 | rpdev = pcie_find_root_port(dev); |
362 | if (!rpdev) { | |
e82b14bd | 363 | ret = -ENODEV; |
c465def6 HY |
364 | goto out_put; |
365 | } | |
366 | ||
367 | pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | |
368 | if (!pos_cap_err) { | |
e82b14bd | 369 | ret = -EPERM; |
c465def6 HY |
370 | goto out_put; |
371 | } | |
372 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); | |
bd1f46de AP |
373 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask); |
374 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | |
375 | &uncor_mask); | |
c465def6 HY |
376 | |
377 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); | |
378 | if (!rp_pos_cap_err) { | |
e82b14bd | 379 | ret = -EPERM; |
c465def6 HY |
380 | goto out_put; |
381 | } | |
382 | ||
383 | err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); | |
384 | if (!err_alloc) { | |
385 | ret = -ENOMEM; | |
386 | goto out_put; | |
387 | } | |
388 | rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); | |
389 | if (!rperr_alloc) { | |
390 | ret = -ENOMEM; | |
391 | goto out_put; | |
392 | } | |
393 | ||
457d9d08 PB |
394 | if (aer_mask_override) { |
395 | cor_mask_orig = cor_mask; | |
396 | cor_mask &= !(einj->cor_status); | |
397 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, | |
398 | cor_mask); | |
399 | ||
400 | uncor_mask_orig = uncor_mask; | |
401 | uncor_mask &= !(einj->uncor_status); | |
402 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | |
403 | uncor_mask); | |
404 | } | |
405 | ||
c465def6 HY |
406 | spin_lock_irqsave(&inject_lock, flags); |
407 | ||
408 | err = __find_aer_error_by_dev(dev); | |
409 | if (!err) { | |
410 | err = err_alloc; | |
411 | err_alloc = NULL; | |
cc5d153a AP |
412 | aer_error_init(err, einj->domain, einj->bus, devfn, |
413 | pos_cap_err); | |
c465def6 HY |
414 | list_add(&err->list, &einjected); |
415 | } | |
416 | err->uncor_status |= einj->uncor_status; | |
417 | err->cor_status |= einj->cor_status; | |
418 | err->header_log0 = einj->header_log0; | |
419 | err->header_log1 = einj->header_log1; | |
420 | err->header_log2 = einj->header_log2; | |
421 | err->header_log3 = einj->header_log3; | |
422 | ||
457d9d08 PB |
423 | if (!aer_mask_override && einj->cor_status && |
424 | !(einj->cor_status & ~cor_mask)) { | |
b49bfd32 | 425 | ret = -EINVAL; |
227f0647 | 426 | printk(KERN_WARNING "The correctable error(s) is masked by device\n"); |
b49bfd32 YS |
427 | spin_unlock_irqrestore(&inject_lock, flags); |
428 | goto out_put; | |
429 | } | |
457d9d08 PB |
430 | if (!aer_mask_override && einj->uncor_status && |
431 | !(einj->uncor_status & ~uncor_mask)) { | |
b49bfd32 | 432 | ret = -EINVAL; |
227f0647 | 433 | printk(KERN_WARNING "The uncorrectable error(s) is masked by device\n"); |
b49bfd32 YS |
434 | spin_unlock_irqrestore(&inject_lock, flags); |
435 | goto out_put; | |
436 | } | |
437 | ||
c465def6 HY |
438 | rperr = __find_aer_error_by_dev(rpdev); |
439 | if (!rperr) { | |
440 | rperr = rperr_alloc; | |
441 | rperr_alloc = NULL; | |
cc5d153a AP |
442 | aer_error_init(rperr, pci_domain_nr(rpdev->bus), |
443 | rpdev->bus->number, rpdev->devfn, | |
c465def6 HY |
444 | rp_pos_cap_err); |
445 | list_add(&rperr->list, &einjected); | |
446 | } | |
447 | if (einj->cor_status) { | |
448 | if (rperr->root_status & PCI_ERR_ROOT_COR_RCV) | |
449 | rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; | |
450 | else | |
451 | rperr->root_status |= PCI_ERR_ROOT_COR_RCV; | |
452 | rperr->source_id &= 0xffff0000; | |
453 | rperr->source_id |= (einj->bus << 8) | devfn; | |
454 | } | |
455 | if (einj->uncor_status) { | |
456 | if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV) | |
457 | rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; | |
458 | if (sever & einj->uncor_status) { | |
459 | rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV; | |
460 | if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)) | |
461 | rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL; | |
462 | } else | |
463 | rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV; | |
464 | rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV; | |
465 | rperr->source_id &= 0x0000ffff; | |
466 | rperr->source_id |= ((einj->bus << 8) | devfn) << 16; | |
467 | } | |
468 | spin_unlock_irqrestore(&inject_lock, flags); | |
469 | ||
457d9d08 PB |
470 | if (aer_mask_override) { |
471 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, | |
472 | cor_mask_orig); | |
473 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | |
474 | uncor_mask_orig); | |
475 | } | |
476 | ||
c465def6 HY |
477 | ret = pci_bus_set_aer_ops(dev->bus); |
478 | if (ret) | |
479 | goto out_put; | |
480 | ret = pci_bus_set_aer_ops(rpdev->bus); | |
481 | if (ret) | |
482 | goto out_put; | |
483 | ||
46256f83 YS |
484 | if (find_aer_device(rpdev, &edev)) { |
485 | if (!get_service_data(edev)) { | |
486 | printk(KERN_WARNING "AER service is not initialized\n"); | |
487 | ret = -EINVAL; | |
488 | goto out_put; | |
489 | } | |
c465def6 | 490 | aer_irq(-1, edev); |
3c78bc61 | 491 | } else |
c465def6 HY |
492 | ret = -EINVAL; |
493 | out_put: | |
c9a91883 HS |
494 | kfree(err_alloc); |
495 | kfree(rperr_alloc); | |
c465def6 HY |
496 | pci_dev_put(dev); |
497 | return ret; | |
498 | } | |
499 | ||
500 | static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, | |
501 | size_t usize, loff_t *off) | |
502 | { | |
503 | struct aer_error_inj einj; | |
504 | int ret; | |
505 | ||
506 | if (!capable(CAP_SYS_ADMIN)) | |
507 | return -EPERM; | |
cc5d153a AP |
508 | if (usize < offsetof(struct aer_error_inj, domain) || |
509 | usize > sizeof(einj)) | |
c465def6 HY |
510 | return -EINVAL; |
511 | ||
cc5d153a | 512 | memset(&einj, 0, sizeof(einj)); |
c465def6 HY |
513 | if (copy_from_user(&einj, ubuf, usize)) |
514 | return -EFAULT; | |
515 | ||
516 | ret = aer_inject(&einj); | |
517 | return ret ? ret : usize; | |
518 | } | |
519 | ||
520 | static const struct file_operations aer_inject_fops = { | |
521 | .write = aer_inject_write, | |
522 | .owner = THIS_MODULE, | |
6038f373 | 523 | .llseek = noop_llseek, |
c465def6 HY |
524 | }; |
525 | ||
526 | static struct miscdevice aer_inject_device = { | |
527 | .minor = MISC_DYNAMIC_MINOR, | |
528 | .name = "aer_inject", | |
529 | .fops = &aer_inject_fops, | |
530 | }; | |
531 | ||
532 | static int __init aer_inject_init(void) | |
533 | { | |
534 | return misc_register(&aer_inject_device); | |
535 | } | |
536 | ||
537 | static void __exit aer_inject_exit(void) | |
538 | { | |
539 | struct aer_error *err, *err_next; | |
540 | unsigned long flags; | |
541 | struct pci_bus_ops *bus_ops; | |
542 | ||
543 | misc_deregister(&aer_inject_device); | |
544 | ||
545 | while ((bus_ops = pci_bus_ops_pop())) { | |
546 | pci_bus_set_ops(bus_ops->bus, bus_ops->ops); | |
547 | kfree(bus_ops); | |
548 | } | |
549 | ||
550 | spin_lock_irqsave(&inject_lock, flags); | |
476f644e | 551 | list_for_each_entry_safe(err, err_next, &einjected, list) { |
c465def6 HY |
552 | list_del(&err->list); |
553 | kfree(err); | |
554 | } | |
555 | spin_unlock_irqrestore(&inject_lock, flags); | |
556 | } | |
557 | ||
558 | module_init(aer_inject_init); | |
559 | module_exit(aer_inject_exit); | |
560 | ||
7e8af37a | 561 | MODULE_DESCRIPTION("PCIe AER software error injector"); |
c465def6 | 562 | MODULE_LICENSE("GPL"); |