]>
Commit | Line | Data |
---|---|---|
c465def6 | 1 | /* |
45e829ea | 2 | * PCIe AER software error injection support. |
c465def6 | 3 | * |
45e829ea | 4 | * Debuging PCIe AER code is quite difficult because it is hard to |
c465def6 HY |
5 | * trigger various real hardware errors. Software based error |
6 | * injection can fake almost all kinds of errors with the help of a | |
7 | * user space helper tool aer-inject, which can be gotten from: | |
8 | * http://www.kernel.org/pub/linux/utils/pci/aer-inject/ | |
9 | * | |
10 | * Copyright 2009 Intel Corporation. | |
11 | * Huang Ying <ying.huang@intel.com> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; version 2 | |
16 | * of the License. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/miscdevice.h> | |
23 | #include <linux/pci.h> | |
5a0e3ad6 | 24 | #include <linux/slab.h> |
c465def6 | 25 | #include <linux/fs.h> |
c9a91883 | 26 | #include <linux/uaccess.h> |
cc5d153a | 27 | #include <linux/stddef.h> |
c465def6 HY |
28 | #include "aerdrv.h" |
29 | ||
457d9d08 | 30 | /* Override the existing corrected and uncorrected error masks */ |
90ab5ee9 | 31 | static bool aer_mask_override; |
457d9d08 PB |
32 | module_param(aer_mask_override, bool, 0); |
33 | ||
c9a91883 | 34 | struct aer_error_inj { |
c465def6 HY |
35 | u8 bus; |
36 | u8 dev; | |
37 | u8 fn; | |
38 | u32 uncor_status; | |
39 | u32 cor_status; | |
40 | u32 header_log0; | |
41 | u32 header_log1; | |
42 | u32 header_log2; | |
43 | u32 header_log3; | |
28ef241f | 44 | u32 domain; |
c465def6 HY |
45 | }; |
46 | ||
c9a91883 | 47 | struct aer_error { |
c465def6 | 48 | struct list_head list; |
28ef241f | 49 | u32 domain; |
c465def6 HY |
50 | unsigned int bus; |
51 | unsigned int devfn; | |
52 | int pos_cap_err; | |
53 | ||
54 | u32 uncor_status; | |
55 | u32 cor_status; | |
56 | u32 header_log0; | |
57 | u32 header_log1; | |
58 | u32 header_log2; | |
59 | u32 header_log3; | |
60 | u32 root_status; | |
61 | u32 source_id; | |
62 | }; | |
63 | ||
c9a91883 | 64 | struct pci_bus_ops { |
c465def6 HY |
65 | struct list_head list; |
66 | struct pci_bus *bus; | |
67 | struct pci_ops *ops; | |
68 | }; | |
69 | ||
70 | static LIST_HEAD(einjected); | |
71 | ||
72 | static LIST_HEAD(pci_bus_ops_list); | |
73 | ||
74 | /* Protect einjected and pci_bus_ops_list */ | |
75 | static DEFINE_SPINLOCK(inject_lock); | |
76 | ||
28ef241f | 77 | static void aer_error_init(struct aer_error *err, u32 domain, |
cc5d153a AP |
78 | unsigned int bus, unsigned int devfn, |
79 | int pos_cap_err) | |
c465def6 HY |
80 | { |
81 | INIT_LIST_HEAD(&err->list); | |
cc5d153a | 82 | err->domain = domain; |
c465def6 HY |
83 | err->bus = bus; |
84 | err->devfn = devfn; | |
85 | err->pos_cap_err = pos_cap_err; | |
86 | } | |
87 | ||
88 | /* inject_lock must be held before calling */ | |
28ef241f | 89 | static struct aer_error *__find_aer_error(u32 domain, unsigned int bus, |
cc5d153a | 90 | unsigned int devfn) |
c465def6 HY |
91 | { |
92 | struct aer_error *err; | |
93 | ||
94 | list_for_each_entry(err, &einjected, list) { | |
cc5d153a AP |
95 | if (domain == err->domain && |
96 | bus == err->bus && | |
97 | devfn == err->devfn) | |
c465def6 HY |
98 | return err; |
99 | } | |
100 | return NULL; | |
101 | } | |
102 | ||
103 | /* inject_lock must be held before calling */ | |
104 | static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) | |
105 | { | |
cc5d153a AP |
106 | int domain = pci_domain_nr(dev->bus); |
107 | if (domain < 0) | |
108 | return NULL; | |
28ef241f | 109 | return __find_aer_error(domain, dev->bus->number, dev->devfn); |
c465def6 HY |
110 | } |
111 | ||
112 | /* inject_lock must be held before calling */ | |
113 | static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus) | |
114 | { | |
115 | struct pci_bus_ops *bus_ops; | |
116 | ||
117 | list_for_each_entry(bus_ops, &pci_bus_ops_list, list) { | |
118 | if (bus_ops->bus == bus) | |
119 | return bus_ops->ops; | |
120 | } | |
121 | return NULL; | |
122 | } | |
123 | ||
124 | static struct pci_bus_ops *pci_bus_ops_pop(void) | |
125 | { | |
126 | unsigned long flags; | |
0e6053dc | 127 | struct pci_bus_ops *bus_ops; |
c465def6 HY |
128 | |
129 | spin_lock_irqsave(&inject_lock, flags); | |
0e6053dc GT |
130 | bus_ops = list_first_entry_or_null(&pci_bus_ops_list, |
131 | struct pci_bus_ops, list); | |
132 | if (bus_ops) | |
133 | list_del(&bus_ops->list); | |
c465def6 HY |
134 | spin_unlock_irqrestore(&inject_lock, flags); |
135 | return bus_ops; | |
136 | } | |
137 | ||
138 | static u32 *find_pci_config_dword(struct aer_error *err, int where, | |
139 | int *prw1cs) | |
140 | { | |
141 | int rw1cs = 0; | |
142 | u32 *target = NULL; | |
143 | ||
144 | if (err->pos_cap_err == -1) | |
145 | return NULL; | |
146 | ||
147 | switch (where - err->pos_cap_err) { | |
148 | case PCI_ERR_UNCOR_STATUS: | |
149 | target = &err->uncor_status; | |
150 | rw1cs = 1; | |
151 | break; | |
152 | case PCI_ERR_COR_STATUS: | |
153 | target = &err->cor_status; | |
154 | rw1cs = 1; | |
155 | break; | |
156 | case PCI_ERR_HEADER_LOG: | |
157 | target = &err->header_log0; | |
158 | break; | |
159 | case PCI_ERR_HEADER_LOG+4: | |
160 | target = &err->header_log1; | |
161 | break; | |
162 | case PCI_ERR_HEADER_LOG+8: | |
c9a91883 | 163 | target = &err->header_log2; |
c465def6 HY |
164 | break; |
165 | case PCI_ERR_HEADER_LOG+12: | |
166 | target = &err->header_log3; | |
167 | break; | |
168 | case PCI_ERR_ROOT_STATUS: | |
169 | target = &err->root_status; | |
170 | rw1cs = 1; | |
171 | break; | |
f647a44f | 172 | case PCI_ERR_ROOT_ERR_SRC: |
c465def6 HY |
173 | target = &err->source_id; |
174 | break; | |
175 | } | |
176 | if (prw1cs) | |
177 | *prw1cs = rw1cs; | |
178 | return target; | |
179 | } | |
180 | ||
3b0a6d1a BH |
181 | static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn, |
182 | int where, int size, u32 *val) | |
c465def6 HY |
183 | { |
184 | u32 *sim; | |
185 | struct aer_error *err; | |
186 | unsigned long flags; | |
187 | struct pci_ops *ops; | |
7e8fbdc6 | 188 | struct pci_ops *my_ops; |
cc5d153a | 189 | int domain; |
7e8fbdc6 | 190 | int rv; |
c465def6 HY |
191 | |
192 | spin_lock_irqsave(&inject_lock, flags); | |
193 | if (size != sizeof(u32)) | |
194 | goto out; | |
cc5d153a AP |
195 | domain = pci_domain_nr(bus); |
196 | if (domain < 0) | |
197 | goto out; | |
28ef241f | 198 | err = __find_aer_error(domain, bus->number, devfn); |
c465def6 HY |
199 | if (!err) |
200 | goto out; | |
201 | ||
202 | sim = find_pci_config_dword(err, where, NULL); | |
203 | if (sim) { | |
204 | *val = *sim; | |
205 | spin_unlock_irqrestore(&inject_lock, flags); | |
206 | return 0; | |
207 | } | |
208 | out: | |
209 | ops = __find_pci_bus_ops(bus); | |
7e8fbdc6 DD |
210 | /* |
211 | * pci_lock must already be held, so we can directly | |
212 | * manipulate bus->ops. Many config access functions, | |
213 | * including pci_generic_config_read() require the original | |
214 | * bus->ops be installed to function, so temporarily put them | |
215 | * back. | |
216 | */ | |
217 | my_ops = bus->ops; | |
218 | bus->ops = ops; | |
219 | rv = ops->read(bus, devfn, where, size, val); | |
220 | bus->ops = my_ops; | |
c465def6 | 221 | spin_unlock_irqrestore(&inject_lock, flags); |
7e8fbdc6 | 222 | return rv; |
c465def6 HY |
223 | } |
224 | ||
3b0a6d1a BH |
225 | static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn, |
226 | int where, int size, u32 val) | |
c465def6 HY |
227 | { |
228 | u32 *sim; | |
229 | struct aer_error *err; | |
230 | unsigned long flags; | |
231 | int rw1cs; | |
232 | struct pci_ops *ops; | |
7e8fbdc6 | 233 | struct pci_ops *my_ops; |
cc5d153a | 234 | int domain; |
7e8fbdc6 | 235 | int rv; |
c465def6 HY |
236 | |
237 | spin_lock_irqsave(&inject_lock, flags); | |
238 | if (size != sizeof(u32)) | |
239 | goto out; | |
cc5d153a AP |
240 | domain = pci_domain_nr(bus); |
241 | if (domain < 0) | |
242 | goto out; | |
28ef241f | 243 | err = __find_aer_error(domain, bus->number, devfn); |
c465def6 HY |
244 | if (!err) |
245 | goto out; | |
246 | ||
247 | sim = find_pci_config_dword(err, where, &rw1cs); | |
248 | if (sim) { | |
249 | if (rw1cs) | |
250 | *sim ^= val; | |
251 | else | |
252 | *sim = val; | |
253 | spin_unlock_irqrestore(&inject_lock, flags); | |
254 | return 0; | |
255 | } | |
256 | out: | |
257 | ops = __find_pci_bus_ops(bus); | |
7e8fbdc6 DD |
258 | /* |
259 | * pci_lock must already be held, so we can directly | |
260 | * manipulate bus->ops. Many config access functions, | |
261 | * including pci_generic_config_write() require the original | |
262 | * bus->ops be installed to function, so temporarily put them | |
263 | * back. | |
264 | */ | |
265 | my_ops = bus->ops; | |
266 | bus->ops = ops; | |
267 | rv = ops->write(bus, devfn, where, size, val); | |
268 | bus->ops = my_ops; | |
c465def6 | 269 | spin_unlock_irqrestore(&inject_lock, flags); |
7e8fbdc6 | 270 | return rv; |
c465def6 HY |
271 | } |
272 | ||
3b0a6d1a BH |
273 | static struct pci_ops aer_inj_pci_ops = { |
274 | .read = aer_inj_read_config, | |
275 | .write = aer_inj_write_config, | |
c465def6 HY |
276 | }; |
277 | ||
278 | static void pci_bus_ops_init(struct pci_bus_ops *bus_ops, | |
279 | struct pci_bus *bus, | |
280 | struct pci_ops *ops) | |
281 | { | |
282 | INIT_LIST_HEAD(&bus_ops->list); | |
283 | bus_ops->bus = bus; | |
284 | bus_ops->ops = ops; | |
285 | } | |
286 | ||
287 | static int pci_bus_set_aer_ops(struct pci_bus *bus) | |
288 | { | |
289 | struct pci_ops *ops; | |
290 | struct pci_bus_ops *bus_ops; | |
291 | unsigned long flags; | |
292 | ||
293 | bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL); | |
294 | if (!bus_ops) | |
295 | return -ENOMEM; | |
3b0a6d1a | 296 | ops = pci_bus_set_ops(bus, &aer_inj_pci_ops); |
c465def6 | 297 | spin_lock_irqsave(&inject_lock, flags); |
3b0a6d1a | 298 | if (ops == &aer_inj_pci_ops) |
c465def6 HY |
299 | goto out; |
300 | pci_bus_ops_init(bus_ops, bus, ops); | |
301 | list_add(&bus_ops->list, &pci_bus_ops_list); | |
302 | bus_ops = NULL; | |
303 | out: | |
304 | spin_unlock_irqrestore(&inject_lock, flags); | |
c9a91883 | 305 | kfree(bus_ops); |
c465def6 HY |
306 | return 0; |
307 | } | |
308 | ||
309 | static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) | |
310 | { | |
311 | while (1) { | |
b44d7db3 | 312 | if (!pci_is_pcie(dev)) |
c465def6 | 313 | break; |
62f87c0e | 314 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) |
c465def6 HY |
315 | return dev; |
316 | if (!dev->bus->self) | |
317 | break; | |
318 | dev = dev->bus->self; | |
319 | } | |
320 | return NULL; | |
321 | } | |
322 | ||
323 | static int find_aer_device_iter(struct device *device, void *data) | |
324 | { | |
325 | struct pcie_device **result = data; | |
326 | struct pcie_device *pcie_dev; | |
327 | ||
328 | if (device->bus == &pcie_port_bus_type) { | |
329 | pcie_dev = to_pcie_device(device); | |
330 | if (pcie_dev->service & PCIE_PORT_SERVICE_AER) { | |
331 | *result = pcie_dev; | |
332 | return 1; | |
333 | } | |
334 | } | |
335 | return 0; | |
336 | } | |
337 | ||
338 | static int find_aer_device(struct pci_dev *dev, struct pcie_device **result) | |
339 | { | |
340 | return device_for_each_child(&dev->dev, result, find_aer_device_iter); | |
341 | } | |
342 | ||
343 | static int aer_inject(struct aer_error_inj *einj) | |
344 | { | |
345 | struct aer_error *err, *rperr; | |
346 | struct aer_error *err_alloc = NULL, *rperr_alloc = NULL; | |
347 | struct pci_dev *dev, *rpdev; | |
348 | struct pcie_device *edev; | |
349 | unsigned long flags; | |
350 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); | |
351 | int pos_cap_err, rp_pos_cap_err; | |
40294d8f | 352 | u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0; |
c465def6 HY |
353 | int ret = 0; |
354 | ||
28ef241f | 355 | dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn); |
c465def6 | 356 | if (!dev) |
1d024355 | 357 | return -ENODEV; |
c465def6 HY |
358 | rpdev = pcie_find_root_port(dev); |
359 | if (!rpdev) { | |
e82b14bd | 360 | ret = -ENODEV; |
c465def6 HY |
361 | goto out_put; |
362 | } | |
363 | ||
364 | pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | |
365 | if (!pos_cap_err) { | |
20ac75e5 | 366 | ret = -EPROTONOSUPPORT; |
c465def6 HY |
367 | goto out_put; |
368 | } | |
369 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); | |
bd1f46de AP |
370 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask); |
371 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | |
372 | &uncor_mask); | |
c465def6 HY |
373 | |
374 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); | |
375 | if (!rp_pos_cap_err) { | |
20ac75e5 | 376 | ret = -EPROTONOSUPPORT; |
c465def6 HY |
377 | goto out_put; |
378 | } | |
379 | ||
380 | err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); | |
381 | if (!err_alloc) { | |
382 | ret = -ENOMEM; | |
383 | goto out_put; | |
384 | } | |
385 | rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); | |
386 | if (!rperr_alloc) { | |
387 | ret = -ENOMEM; | |
388 | goto out_put; | |
389 | } | |
390 | ||
457d9d08 PB |
391 | if (aer_mask_override) { |
392 | cor_mask_orig = cor_mask; | |
393 | cor_mask &= !(einj->cor_status); | |
394 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, | |
395 | cor_mask); | |
396 | ||
397 | uncor_mask_orig = uncor_mask; | |
398 | uncor_mask &= !(einj->uncor_status); | |
399 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | |
400 | uncor_mask); | |
401 | } | |
402 | ||
c465def6 HY |
403 | spin_lock_irqsave(&inject_lock, flags); |
404 | ||
405 | err = __find_aer_error_by_dev(dev); | |
406 | if (!err) { | |
407 | err = err_alloc; | |
408 | err_alloc = NULL; | |
cc5d153a AP |
409 | aer_error_init(err, einj->domain, einj->bus, devfn, |
410 | pos_cap_err); | |
c465def6 HY |
411 | list_add(&err->list, &einjected); |
412 | } | |
413 | err->uncor_status |= einj->uncor_status; | |
414 | err->cor_status |= einj->cor_status; | |
415 | err->header_log0 = einj->header_log0; | |
416 | err->header_log1 = einj->header_log1; | |
417 | err->header_log2 = einj->header_log2; | |
418 | err->header_log3 = einj->header_log3; | |
419 | ||
457d9d08 PB |
420 | if (!aer_mask_override && einj->cor_status && |
421 | !(einj->cor_status & ~cor_mask)) { | |
b49bfd32 | 422 | ret = -EINVAL; |
227f0647 | 423 | printk(KERN_WARNING "The correctable error(s) is masked by device\n"); |
b49bfd32 YS |
424 | spin_unlock_irqrestore(&inject_lock, flags); |
425 | goto out_put; | |
426 | } | |
457d9d08 PB |
427 | if (!aer_mask_override && einj->uncor_status && |
428 | !(einj->uncor_status & ~uncor_mask)) { | |
b49bfd32 | 429 | ret = -EINVAL; |
227f0647 | 430 | printk(KERN_WARNING "The uncorrectable error(s) is masked by device\n"); |
b49bfd32 YS |
431 | spin_unlock_irqrestore(&inject_lock, flags); |
432 | goto out_put; | |
433 | } | |
434 | ||
c465def6 HY |
435 | rperr = __find_aer_error_by_dev(rpdev); |
436 | if (!rperr) { | |
437 | rperr = rperr_alloc; | |
438 | rperr_alloc = NULL; | |
cc5d153a AP |
439 | aer_error_init(rperr, pci_domain_nr(rpdev->bus), |
440 | rpdev->bus->number, rpdev->devfn, | |
c465def6 HY |
441 | rp_pos_cap_err); |
442 | list_add(&rperr->list, &einjected); | |
443 | } | |
444 | if (einj->cor_status) { | |
445 | if (rperr->root_status & PCI_ERR_ROOT_COR_RCV) | |
446 | rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; | |
447 | else | |
448 | rperr->root_status |= PCI_ERR_ROOT_COR_RCV; | |
449 | rperr->source_id &= 0xffff0000; | |
450 | rperr->source_id |= (einj->bus << 8) | devfn; | |
451 | } | |
452 | if (einj->uncor_status) { | |
453 | if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV) | |
454 | rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; | |
455 | if (sever & einj->uncor_status) { | |
456 | rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV; | |
457 | if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)) | |
458 | rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL; | |
459 | } else | |
460 | rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV; | |
461 | rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV; | |
462 | rperr->source_id &= 0x0000ffff; | |
463 | rperr->source_id |= ((einj->bus << 8) | devfn) << 16; | |
464 | } | |
465 | spin_unlock_irqrestore(&inject_lock, flags); | |
466 | ||
457d9d08 PB |
467 | if (aer_mask_override) { |
468 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, | |
469 | cor_mask_orig); | |
470 | pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | |
471 | uncor_mask_orig); | |
472 | } | |
473 | ||
c465def6 HY |
474 | ret = pci_bus_set_aer_ops(dev->bus); |
475 | if (ret) | |
476 | goto out_put; | |
477 | ret = pci_bus_set_aer_ops(rpdev->bus); | |
478 | if (ret) | |
479 | goto out_put; | |
480 | ||
46256f83 YS |
481 | if (find_aer_device(rpdev, &edev)) { |
482 | if (!get_service_data(edev)) { | |
483 | printk(KERN_WARNING "AER service is not initialized\n"); | |
20ac75e5 | 484 | ret = -EPROTONOSUPPORT; |
46256f83 YS |
485 | goto out_put; |
486 | } | |
c465def6 | 487 | aer_irq(-1, edev); |
3c78bc61 | 488 | } else |
20ac75e5 | 489 | ret = -ENODEV; |
c465def6 | 490 | out_put: |
c9a91883 HS |
491 | kfree(err_alloc); |
492 | kfree(rperr_alloc); | |
c465def6 HY |
493 | pci_dev_put(dev); |
494 | return ret; | |
495 | } | |
496 | ||
497 | static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, | |
498 | size_t usize, loff_t *off) | |
499 | { | |
500 | struct aer_error_inj einj; | |
501 | int ret; | |
502 | ||
503 | if (!capable(CAP_SYS_ADMIN)) | |
504 | return -EPERM; | |
cc5d153a AP |
505 | if (usize < offsetof(struct aer_error_inj, domain) || |
506 | usize > sizeof(einj)) | |
c465def6 HY |
507 | return -EINVAL; |
508 | ||
cc5d153a | 509 | memset(&einj, 0, sizeof(einj)); |
c465def6 HY |
510 | if (copy_from_user(&einj, ubuf, usize)) |
511 | return -EFAULT; | |
512 | ||
513 | ret = aer_inject(&einj); | |
514 | return ret ? ret : usize; | |
515 | } | |
516 | ||
517 | static const struct file_operations aer_inject_fops = { | |
518 | .write = aer_inject_write, | |
519 | .owner = THIS_MODULE, | |
6038f373 | 520 | .llseek = noop_llseek, |
c465def6 HY |
521 | }; |
522 | ||
523 | static struct miscdevice aer_inject_device = { | |
524 | .minor = MISC_DYNAMIC_MINOR, | |
525 | .name = "aer_inject", | |
526 | .fops = &aer_inject_fops, | |
527 | }; | |
528 | ||
529 | static int __init aer_inject_init(void) | |
530 | { | |
531 | return misc_register(&aer_inject_device); | |
532 | } | |
533 | ||
534 | static void __exit aer_inject_exit(void) | |
535 | { | |
536 | struct aer_error *err, *err_next; | |
537 | unsigned long flags; | |
538 | struct pci_bus_ops *bus_ops; | |
539 | ||
540 | misc_deregister(&aer_inject_device); | |
541 | ||
542 | while ((bus_ops = pci_bus_ops_pop())) { | |
543 | pci_bus_set_ops(bus_ops->bus, bus_ops->ops); | |
544 | kfree(bus_ops); | |
545 | } | |
546 | ||
547 | spin_lock_irqsave(&inject_lock, flags); | |
476f644e | 548 | list_for_each_entry_safe(err, err_next, &einjected, list) { |
c465def6 HY |
549 | list_del(&err->list); |
550 | kfree(err); | |
551 | } | |
552 | spin_unlock_irqrestore(&inject_lock, flags); | |
553 | } | |
554 | ||
555 | module_init(aer_inject_init); | |
556 | module_exit(aer_inject_exit); | |
557 | ||
7e8af37a | 558 | MODULE_DESCRIPTION("PCIe AER software error injector"); |
c465def6 | 559 | MODULE_LICENSE("GPL"); |