]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/pci/msi.c
MSI: Consolidate MSI-X irq freeing code
[mirror_ubuntu-hirsute-kernel.git] / drivers / pci / msi.c
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9 #include <linux/err.h>
10 #include <linux/mm.h>
11 #include <linux/irq.h>
12 #include <linux/interrupt.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/smp_lock.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
18 #include <linux/msi.h>
19
20 #include <asm/errno.h>
21 #include <asm/io.h>
22 #include <asm/smp.h>
23
24 #include "pci.h"
25 #include "msi.h"
26
27 static struct kmem_cache* msi_cachep;
28
29 static int pci_msi_enable = 1;
30
31 static int msi_cache_init(void)
32 {
33 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
34 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
35 if (!msi_cachep)
36 return -ENOMEM;
37
38 return 0;
39 }
40
41 static void msi_set_enable(struct pci_dev *dev, int enable)
42 {
43 int pos;
44 u16 control;
45
46 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
47 if (pos) {
48 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
49 control &= ~PCI_MSI_FLAGS_ENABLE;
50 if (enable)
51 control |= PCI_MSI_FLAGS_ENABLE;
52 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
53 }
54 }
55
56 static void msix_set_enable(struct pci_dev *dev, int enable)
57 {
58 int pos;
59 u16 control;
60
61 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
62 if (pos) {
63 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
64 control &= ~PCI_MSIX_FLAGS_ENABLE;
65 if (enable)
66 control |= PCI_MSIX_FLAGS_ENABLE;
67 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
68 }
69 }
70
71 static void msix_flush_writes(unsigned int irq)
72 {
73 struct msi_desc *entry;
74
75 entry = get_irq_msi(irq);
76 BUG_ON(!entry || !entry->dev);
77 switch (entry->msi_attrib.type) {
78 case PCI_CAP_ID_MSI:
79 /* nothing to do */
80 break;
81 case PCI_CAP_ID_MSIX:
82 {
83 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
84 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
85 readl(entry->mask_base + offset);
86 break;
87 }
88 default:
89 BUG();
90 break;
91 }
92 }
93
94 static void msi_set_mask_bit(unsigned int irq, int flag)
95 {
96 struct msi_desc *entry;
97
98 entry = get_irq_msi(irq);
99 BUG_ON(!entry || !entry->dev);
100 switch (entry->msi_attrib.type) {
101 case PCI_CAP_ID_MSI:
102 if (entry->msi_attrib.maskbit) {
103 int pos;
104 u32 mask_bits;
105
106 pos = (long)entry->mask_base;
107 pci_read_config_dword(entry->dev, pos, &mask_bits);
108 mask_bits &= ~(1);
109 mask_bits |= flag;
110 pci_write_config_dword(entry->dev, pos, mask_bits);
111 } else {
112 msi_set_enable(entry->dev, !flag);
113 }
114 break;
115 case PCI_CAP_ID_MSIX:
116 {
117 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
118 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
119 writel(flag, entry->mask_base + offset);
120 readl(entry->mask_base + offset);
121 break;
122 }
123 default:
124 BUG();
125 break;
126 }
127 entry->msi_attrib.masked = !!flag;
128 }
129
130 void read_msi_msg(unsigned int irq, struct msi_msg *msg)
131 {
132 struct msi_desc *entry = get_irq_msi(irq);
133 switch(entry->msi_attrib.type) {
134 case PCI_CAP_ID_MSI:
135 {
136 struct pci_dev *dev = entry->dev;
137 int pos = entry->msi_attrib.pos;
138 u16 data;
139
140 pci_read_config_dword(dev, msi_lower_address_reg(pos),
141 &msg->address_lo);
142 if (entry->msi_attrib.is_64) {
143 pci_read_config_dword(dev, msi_upper_address_reg(pos),
144 &msg->address_hi);
145 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
146 } else {
147 msg->address_hi = 0;
148 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
149 }
150 msg->data = data;
151 break;
152 }
153 case PCI_CAP_ID_MSIX:
154 {
155 void __iomem *base;
156 base = entry->mask_base +
157 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
158
159 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
160 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
161 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
162 break;
163 }
164 default:
165 BUG();
166 }
167 }
168
169 void write_msi_msg(unsigned int irq, struct msi_msg *msg)
170 {
171 struct msi_desc *entry = get_irq_msi(irq);
172 switch (entry->msi_attrib.type) {
173 case PCI_CAP_ID_MSI:
174 {
175 struct pci_dev *dev = entry->dev;
176 int pos = entry->msi_attrib.pos;
177
178 pci_write_config_dword(dev, msi_lower_address_reg(pos),
179 msg->address_lo);
180 if (entry->msi_attrib.is_64) {
181 pci_write_config_dword(dev, msi_upper_address_reg(pos),
182 msg->address_hi);
183 pci_write_config_word(dev, msi_data_reg(pos, 1),
184 msg->data);
185 } else {
186 pci_write_config_word(dev, msi_data_reg(pos, 0),
187 msg->data);
188 }
189 break;
190 }
191 case PCI_CAP_ID_MSIX:
192 {
193 void __iomem *base;
194 base = entry->mask_base +
195 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
196
197 writel(msg->address_lo,
198 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
199 writel(msg->address_hi,
200 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
201 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
202 break;
203 }
204 default:
205 BUG();
206 }
207 entry->msg = *msg;
208 }
209
210 void mask_msi_irq(unsigned int irq)
211 {
212 msi_set_mask_bit(irq, 1);
213 msix_flush_writes(irq);
214 }
215
216 void unmask_msi_irq(unsigned int irq)
217 {
218 msi_set_mask_bit(irq, 0);
219 msix_flush_writes(irq);
220 }
221
222 static int msi_free_irq(struct pci_dev* dev, int irq);
223
224 static int msi_init(void)
225 {
226 static int status = -ENOMEM;
227
228 if (!status)
229 return status;
230
231 status = msi_cache_init();
232 if (status < 0) {
233 pci_msi_enable = 0;
234 printk(KERN_WARNING "PCI: MSI cache init failed\n");
235 return status;
236 }
237
238 return status;
239 }
240
241 static struct msi_desc* alloc_msi_entry(void)
242 {
243 struct msi_desc *entry;
244
245 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
246 if (!entry)
247 return NULL;
248
249 entry->link.tail = entry->link.head = 0; /* single message */
250 entry->dev = NULL;
251
252 return entry;
253 }
254
255 #ifdef CONFIG_PM
256 static void __pci_restore_msi_state(struct pci_dev *dev)
257 {
258 int pos;
259 u16 control;
260 struct msi_desc *entry;
261
262 if (!dev->msi_enabled)
263 return;
264
265 entry = get_irq_msi(dev->irq);
266 pos = entry->msi_attrib.pos;
267
268 pci_intx(dev, 0); /* disable intx */
269 msi_set_enable(dev, 0);
270 write_msi_msg(dev->irq, &entry->msg);
271 if (entry->msi_attrib.maskbit)
272 msi_set_mask_bit(dev->irq, entry->msi_attrib.masked);
273
274 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
275 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
276 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
277 control |= PCI_MSI_FLAGS_ENABLE;
278 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
279 }
280
281 static void __pci_restore_msix_state(struct pci_dev *dev)
282 {
283 int pos;
284 int irq, head, tail = 0;
285 struct msi_desc *entry;
286 u16 control;
287
288 if (!dev->msix_enabled)
289 return;
290
291 /* route the table */
292 pci_intx(dev, 0); /* disable intx */
293 msix_set_enable(dev, 0);
294 irq = head = dev->first_msi_irq;
295 entry = get_irq_msi(irq);
296 pos = entry->msi_attrib.pos;
297 while (head != tail) {
298 entry = get_irq_msi(irq);
299 write_msi_msg(irq, &entry->msg);
300 msi_set_mask_bit(irq, entry->msi_attrib.masked);
301
302 tail = entry->link.tail;
303 irq = tail;
304 }
305
306 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
307 control &= ~PCI_MSIX_FLAGS_MASKALL;
308 control |= PCI_MSIX_FLAGS_ENABLE;
309 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
310 }
311
312 void pci_restore_msi_state(struct pci_dev *dev)
313 {
314 __pci_restore_msi_state(dev);
315 __pci_restore_msix_state(dev);
316 }
317 #endif /* CONFIG_PM */
318
319 /**
320 * msi_capability_init - configure device's MSI capability structure
321 * @dev: pointer to the pci_dev data structure of MSI device function
322 *
323 * Setup the MSI capability structure of device function with a single
324 * MSI irq, regardless of device function is capable of handling
325 * multiple messages. A return of zero indicates the successful setup
326 * of an entry zero with the new MSI irq or non-zero for otherwise.
327 **/
328 static int msi_capability_init(struct pci_dev *dev)
329 {
330 struct msi_desc *entry;
331 int pos, irq;
332 u16 control;
333
334 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
335
336 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
337 pci_read_config_word(dev, msi_control_reg(pos), &control);
338 /* MSI Entry Initialization */
339 entry = alloc_msi_entry();
340 if (!entry)
341 return -ENOMEM;
342
343 entry->msi_attrib.type = PCI_CAP_ID_MSI;
344 entry->msi_attrib.is_64 = is_64bit_address(control);
345 entry->msi_attrib.entry_nr = 0;
346 entry->msi_attrib.maskbit = is_mask_bit_support(control);
347 entry->msi_attrib.masked = 1;
348 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
349 entry->msi_attrib.pos = pos;
350 if (is_mask_bit_support(control)) {
351 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
352 is_64bit_address(control));
353 }
354 entry->dev = dev;
355 if (entry->msi_attrib.maskbit) {
356 unsigned int maskbits, temp;
357 /* All MSIs are unmasked by default, Mask them all */
358 pci_read_config_dword(dev,
359 msi_mask_bits_reg(pos, is_64bit_address(control)),
360 &maskbits);
361 temp = (1 << multi_msi_capable(control));
362 temp = ((temp - 1) & ~temp);
363 maskbits |= temp;
364 pci_write_config_dword(dev,
365 msi_mask_bits_reg(pos, is_64bit_address(control)),
366 maskbits);
367 }
368 /* Configure MSI capability structure */
369 irq = arch_setup_msi_irq(dev, entry);
370 if (irq < 0) {
371 kmem_cache_free(msi_cachep, entry);
372 return irq;
373 }
374 entry->link.head = irq;
375 entry->link.tail = irq;
376 dev->first_msi_irq = irq;
377 set_irq_msi(irq, entry);
378
379 /* Set MSI enabled bits */
380 pci_intx(dev, 0); /* disable intx */
381 msi_set_enable(dev, 1);
382 dev->msi_enabled = 1;
383
384 dev->irq = irq;
385 return 0;
386 }
387
388 /**
389 * msix_capability_init - configure device's MSI-X capability
390 * @dev: pointer to the pci_dev data structure of MSI-X device function
391 * @entries: pointer to an array of struct msix_entry entries
392 * @nvec: number of @entries
393 *
394 * Setup the MSI-X capability structure of device function with a
395 * single MSI-X irq. A return of zero indicates the successful setup of
396 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
397 **/
398 static int msix_capability_init(struct pci_dev *dev,
399 struct msix_entry *entries, int nvec)
400 {
401 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
402 int irq, pos, i, j, nr_entries, temp = 0;
403 unsigned long phys_addr;
404 u32 table_offset;
405 u16 control;
406 u8 bir;
407 void __iomem *base;
408
409 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
410
411 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
412 /* Request & Map MSI-X table region */
413 pci_read_config_word(dev, msi_control_reg(pos), &control);
414 nr_entries = multi_msix_capable(control);
415
416 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
417 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
418 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
419 phys_addr = pci_resource_start (dev, bir) + table_offset;
420 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
421 if (base == NULL)
422 return -ENOMEM;
423
424 /* MSI-X Table Initialization */
425 for (i = 0; i < nvec; i++) {
426 entry = alloc_msi_entry();
427 if (!entry)
428 break;
429
430 j = entries[i].entry;
431 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
432 entry->msi_attrib.is_64 = 1;
433 entry->msi_attrib.entry_nr = j;
434 entry->msi_attrib.maskbit = 1;
435 entry->msi_attrib.masked = 1;
436 entry->msi_attrib.default_irq = dev->irq;
437 entry->msi_attrib.pos = pos;
438 entry->dev = dev;
439 entry->mask_base = base;
440
441 /* Configure MSI-X capability structure */
442 irq = arch_setup_msi_irq(dev, entry);
443 if (irq < 0) {
444 kmem_cache_free(msi_cachep, entry);
445 break;
446 }
447 entries[i].vector = irq;
448 if (!head) {
449 entry->link.head = irq;
450 entry->link.tail = irq;
451 head = entry;
452 } else {
453 entry->link.head = temp;
454 entry->link.tail = tail->link.tail;
455 tail->link.tail = irq;
456 head->link.head = irq;
457 }
458 temp = irq;
459 tail = entry;
460
461 set_irq_msi(irq, entry);
462 }
463 if (i != nvec) {
464 int avail = i - 1;
465 i--;
466 for (; i >= 0; i--) {
467 irq = (entries + i)->vector;
468 msi_free_irq(dev, irq);
469 (entries + i)->vector = 0;
470 }
471 /* If we had some success report the number of irqs
472 * we succeeded in setting up.
473 */
474 if (avail <= 0)
475 avail = -EBUSY;
476 return avail;
477 }
478 dev->first_msi_irq = entries[0].vector;
479 /* Set MSI-X enabled bits */
480 pci_intx(dev, 0); /* disable intx */
481 msix_set_enable(dev, 1);
482 dev->msix_enabled = 1;
483
484 return 0;
485 }
486
487 /**
488 * pci_msi_supported - check whether MSI may be enabled on device
489 * @dev: pointer to the pci_dev data structure of MSI device function
490 *
491 * Look at global flags, the device itself, and its parent busses
492 * to return 0 if MSI are supported for the device.
493 **/
494 static
495 int pci_msi_supported(struct pci_dev * dev)
496 {
497 struct pci_bus *bus;
498
499 /* MSI must be globally enabled and supported by the device */
500 if (!pci_msi_enable || !dev || dev->no_msi)
501 return -EINVAL;
502
503 /* Any bridge which does NOT route MSI transactions from it's
504 * secondary bus to it's primary bus must set NO_MSI flag on
505 * the secondary pci_bus.
506 * We expect only arch-specific PCI host bus controller driver
507 * or quirks for specific PCI bridges to be setting NO_MSI.
508 */
509 for (bus = dev->bus; bus; bus = bus->parent)
510 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
511 return -EINVAL;
512
513 return 0;
514 }
515
516 /**
517 * pci_enable_msi - configure device's MSI capability structure
518 * @dev: pointer to the pci_dev data structure of MSI device function
519 *
520 * Setup the MSI capability structure of device function with
521 * a single MSI irq upon its software driver call to request for
522 * MSI mode enabled on its hardware device function. A return of zero
523 * indicates the successful setup of an entry zero with the new MSI
524 * irq or non-zero for otherwise.
525 **/
526 int pci_enable_msi(struct pci_dev* dev)
527 {
528 int pos, status;
529
530 if (pci_msi_supported(dev) < 0)
531 return -EINVAL;
532
533 status = msi_init();
534 if (status < 0)
535 return status;
536
537 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
538 if (!pos)
539 return -EINVAL;
540
541 WARN_ON(!!dev->msi_enabled);
542
543 /* Check whether driver already requested for MSI-X irqs */
544 if (dev->msix_enabled) {
545 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
546 "Device already has MSI-X enabled\n",
547 pci_name(dev));
548 return -EINVAL;
549 }
550 status = msi_capability_init(dev);
551 return status;
552 }
553
554 void pci_disable_msi(struct pci_dev* dev)
555 {
556 struct msi_desc *entry;
557 int default_irq;
558
559 if (!pci_msi_enable)
560 return;
561 if (!dev)
562 return;
563
564 if (!dev->msi_enabled)
565 return;
566
567 msi_set_enable(dev, 0);
568 pci_intx(dev, 1); /* enable intx */
569 dev->msi_enabled = 0;
570
571 entry = get_irq_msi(dev->first_msi_irq);
572 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
573 return;
574 }
575
576 BUG_ON(irq_has_action(dev->first_msi_irq));
577
578 default_irq = entry->msi_attrib.default_irq;
579 msi_free_irq(dev, dev->first_msi_irq);
580
581 /* Restore dev->irq to its default pin-assertion irq */
582 dev->irq = default_irq;
583
584 dev->first_msi_irq = 0;
585 }
586
587 static int msi_free_irq(struct pci_dev* dev, int irq)
588 {
589 struct msi_desc *entry;
590 int head, entry_nr, type;
591 void __iomem *base;
592
593 entry = get_irq_msi(irq);
594 if (!entry || entry->dev != dev) {
595 return -EINVAL;
596 }
597 type = entry->msi_attrib.type;
598 entry_nr = entry->msi_attrib.entry_nr;
599 head = entry->link.head;
600 base = entry->mask_base;
601 get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
602 get_irq_msi(entry->link.tail)->link.head = entry->link.head;
603
604 arch_teardown_msi_irq(irq);
605 kmem_cache_free(msi_cachep, entry);
606
607 if (type == PCI_CAP_ID_MSIX) {
608 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
609 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
610
611 if (head == irq)
612 iounmap(base);
613 }
614
615 return 0;
616 }
617
618 /**
619 * pci_enable_msix - configure device's MSI-X capability structure
620 * @dev: pointer to the pci_dev data structure of MSI-X device function
621 * @entries: pointer to an array of MSI-X entries
622 * @nvec: number of MSI-X irqs requested for allocation by device driver
623 *
624 * Setup the MSI-X capability structure of device function with the number
625 * of requested irqs upon its software driver call to request for
626 * MSI-X mode enabled on its hardware device function. A return of zero
627 * indicates the successful configuration of MSI-X capability structure
628 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
629 * Or a return of > 0 indicates that driver request is exceeding the number
630 * of irqs available. Driver should use the returned value to re-send
631 * its request.
632 **/
633 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
634 {
635 int status, pos, nr_entries;
636 int i, j;
637 u16 control;
638
639 if (!entries || pci_msi_supported(dev) < 0)
640 return -EINVAL;
641
642 status = msi_init();
643 if (status < 0)
644 return status;
645
646 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
647 if (!pos)
648 return -EINVAL;
649
650 pci_read_config_word(dev, msi_control_reg(pos), &control);
651 nr_entries = multi_msix_capable(control);
652 if (nvec > nr_entries)
653 return -EINVAL;
654
655 /* Check for any invalid entries */
656 for (i = 0; i < nvec; i++) {
657 if (entries[i].entry >= nr_entries)
658 return -EINVAL; /* invalid entry */
659 for (j = i + 1; j < nvec; j++) {
660 if (entries[i].entry == entries[j].entry)
661 return -EINVAL; /* duplicate entry */
662 }
663 }
664 WARN_ON(!!dev->msix_enabled);
665
666 /* Check whether driver already requested for MSI irq */
667 if (dev->msi_enabled) {
668 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
669 "Device already has an MSI irq assigned\n",
670 pci_name(dev));
671 return -EINVAL;
672 }
673 status = msix_capability_init(dev, entries, nvec);
674 return status;
675 }
676
677 static void msix_free_all_irqs(struct pci_dev *dev)
678 {
679 int irq, head, tail = 0;
680
681 irq = head = dev->first_msi_irq;
682 while (head != tail) {
683 tail = get_irq_msi(irq)->link.tail;
684
685 BUG_ON(irq_has_action(irq));
686
687 if (irq != head)
688 msi_free_irq(dev, irq);
689 irq = tail;
690 }
691 msi_free_irq(dev, irq);
692 dev->first_msi_irq = 0;
693 }
694
695 void pci_disable_msix(struct pci_dev* dev)
696 {
697 if (!pci_msi_enable)
698 return;
699 if (!dev)
700 return;
701
702 if (!dev->msix_enabled)
703 return;
704
705 msix_set_enable(dev, 0);
706 pci_intx(dev, 1); /* enable intx */
707 dev->msix_enabled = 0;
708
709 msix_free_all_irqs(dev);
710 }
711
712 /**
713 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
714 * @dev: pointer to the pci_dev data structure of MSI(X) device function
715 *
716 * Being called during hotplug remove, from which the device function
717 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
718 * allocated for this device function, are reclaimed to unused state,
719 * which may be used later on.
720 **/
721 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
722 {
723 if (!pci_msi_enable || !dev)
724 return;
725
726 if (dev->msi_enabled) {
727 BUG_ON(irq_has_action(dev->first_msi_irq));
728 msi_free_irq(dev, dev->first_msi_irq);
729 }
730
731 if (dev->msix_enabled)
732 msix_free_all_irqs(dev);
733 }
734
735 void pci_no_msi(void)
736 {
737 pci_msi_enable = 0;
738 }
739
740 EXPORT_SYMBOL(pci_enable_msi);
741 EXPORT_SYMBOL(pci_disable_msi);
742 EXPORT_SYMBOL(pci_enable_msix);
743 EXPORT_SYMBOL(pci_disable_msix);