]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * File: msi.c | |
3 | * Purpose: PCI Message Signaled Interrupt (MSI) | |
4 | * | |
5 | * Copyright (C) 2003-2004 Intel | |
6 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) | |
7 | */ | |
8 | ||
9 | #include <linux/err.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/irq.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/export.h> | |
14 | #include <linux/ioport.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/proc_fs.h> | |
17 | #include <linux/msi.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/errno.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/irqdomain.h> | |
23 | #include <linux/of_irq.h> | |
24 | ||
25 | #include "pci.h" | |
26 | ||
27 | static int pci_msi_enable = 1; | |
28 | int pci_msi_ignore_mask; | |
29 | ||
30 | #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) | |
31 | ||
32 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | |
33 | static struct irq_domain *pci_msi_default_domain; | |
34 | static DEFINE_MUTEX(pci_msi_domain_lock); | |
35 | ||
36 | struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) | |
37 | { | |
38 | return pci_msi_default_domain; | |
39 | } | |
40 | ||
41 | static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) | |
42 | { | |
43 | struct irq_domain *domain; | |
44 | ||
45 | domain = dev_get_msi_domain(&dev->dev); | |
46 | if (domain) | |
47 | return domain; | |
48 | ||
49 | return arch_get_pci_msi_domain(dev); | |
50 | } | |
51 | ||
52 | static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |
53 | { | |
54 | struct irq_domain *domain; | |
55 | ||
56 | domain = pci_msi_get_domain(dev); | |
57 | if (domain) | |
58 | return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); | |
59 | ||
60 | return arch_setup_msi_irqs(dev, nvec, type); | |
61 | } | |
62 | ||
63 | static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) | |
64 | { | |
65 | struct irq_domain *domain; | |
66 | ||
67 | domain = pci_msi_get_domain(dev); | |
68 | if (domain) | |
69 | pci_msi_domain_free_irqs(domain, dev); | |
70 | else | |
71 | arch_teardown_msi_irqs(dev); | |
72 | } | |
73 | #else | |
74 | #define pci_msi_setup_msi_irqs arch_setup_msi_irqs | |
75 | #define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs | |
76 | #endif | |
77 | ||
78 | /* Arch hooks */ | |
79 | ||
80 | int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | |
81 | { | |
82 | struct msi_controller *chip = dev->bus->msi; | |
83 | int err; | |
84 | ||
85 | if (!chip || !chip->setup_irq) | |
86 | return -EINVAL; | |
87 | ||
88 | err = chip->setup_irq(chip, dev, desc); | |
89 | if (err < 0) | |
90 | return err; | |
91 | ||
92 | irq_set_chip_data(desc->irq, chip); | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
97 | void __weak arch_teardown_msi_irq(unsigned int irq) | |
98 | { | |
99 | struct msi_controller *chip = irq_get_chip_data(irq); | |
100 | ||
101 | if (!chip || !chip->teardown_irq) | |
102 | return; | |
103 | ||
104 | chip->teardown_irq(chip, irq); | |
105 | } | |
106 | ||
107 | int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |
108 | { | |
109 | struct msi_desc *entry; | |
110 | int ret; | |
111 | ||
112 | /* | |
113 | * If an architecture wants to support multiple MSI, it needs to | |
114 | * override arch_setup_msi_irqs() | |
115 | */ | |
116 | if (type == PCI_CAP_ID_MSI && nvec > 1) | |
117 | return 1; | |
118 | ||
119 | for_each_pci_msi_entry(entry, dev) { | |
120 | ret = arch_setup_msi_irq(dev, entry); | |
121 | if (ret < 0) | |
122 | return ret; | |
123 | if (ret > 0) | |
124 | return -ENOSPC; | |
125 | } | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | /* | |
131 | * We have a default implementation available as a separate non-weak | |
132 | * function, as it is used by the Xen x86 PCI code | |
133 | */ | |
134 | void default_teardown_msi_irqs(struct pci_dev *dev) | |
135 | { | |
136 | int i; | |
137 | struct msi_desc *entry; | |
138 | ||
139 | for_each_pci_msi_entry(entry, dev) | |
140 | if (entry->irq) | |
141 | for (i = 0; i < entry->nvec_used; i++) | |
142 | arch_teardown_msi_irq(entry->irq + i); | |
143 | } | |
144 | ||
145 | void __weak arch_teardown_msi_irqs(struct pci_dev *dev) | |
146 | { | |
147 | return default_teardown_msi_irqs(dev); | |
148 | } | |
149 | ||
150 | static void default_restore_msi_irq(struct pci_dev *dev, int irq) | |
151 | { | |
152 | struct msi_desc *entry; | |
153 | ||
154 | entry = NULL; | |
155 | if (dev->msix_enabled) { | |
156 | for_each_pci_msi_entry(entry, dev) { | |
157 | if (irq == entry->irq) | |
158 | break; | |
159 | } | |
160 | } else if (dev->msi_enabled) { | |
161 | entry = irq_get_msi_desc(irq); | |
162 | } | |
163 | ||
164 | if (entry) | |
165 | __pci_write_msi_msg(entry, &entry->msg); | |
166 | } | |
167 | ||
168 | void __weak arch_restore_msi_irqs(struct pci_dev *dev) | |
169 | { | |
170 | return default_restore_msi_irqs(dev); | |
171 | } | |
172 | ||
173 | static inline __attribute_const__ u32 msi_mask(unsigned x) | |
174 | { | |
175 | /* Don't shift by >= width of type */ | |
176 | if (x >= 5) | |
177 | return 0xffffffff; | |
178 | return (1 << (1 << x)) - 1; | |
179 | } | |
180 | ||
181 | /* | |
182 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to | |
183 | * mask all MSI interrupts by clearing the MSI enable bit does not work | |
184 | * reliably as devices without an INTx disable bit will then generate a | |
185 | * level IRQ which will never be cleared. | |
186 | */ | |
187 | u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |
188 | { | |
189 | u32 mask_bits = desc->masked; | |
190 | ||
191 | if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) | |
192 | return 0; | |
193 | ||
194 | mask_bits &= ~mask; | |
195 | mask_bits |= flag; | |
196 | pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, | |
197 | mask_bits); | |
198 | ||
199 | return mask_bits; | |
200 | } | |
201 | ||
202 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |
203 | { | |
204 | desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); | |
205 | } | |
206 | ||
207 | /* | |
208 | * This internal function does not flush PCI writes to the device. | |
209 | * All users must ensure that they read from the device before either | |
210 | * assuming that the device state is up to date, or returning out of this | |
211 | * file. This saves a few milliseconds when initialising devices with lots | |
212 | * of MSI-X interrupts. | |
213 | */ | |
214 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) | |
215 | { | |
216 | u32 mask_bits = desc->masked; | |
217 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | |
218 | PCI_MSIX_ENTRY_VECTOR_CTRL; | |
219 | ||
220 | if (pci_msi_ignore_mask) | |
221 | return 0; | |
222 | ||
223 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
224 | if (flag) | |
225 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
226 | writel(mask_bits, desc->mask_base + offset); | |
227 | ||
228 | return mask_bits; | |
229 | } | |
230 | ||
231 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | |
232 | { | |
233 | desc->masked = __pci_msix_desc_mask_irq(desc, flag); | |
234 | } | |
235 | ||
236 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) | |
237 | { | |
238 | struct msi_desc *desc = irq_data_get_msi_desc(data); | |
239 | ||
240 | if (desc->msi_attrib.is_msix) { | |
241 | msix_mask_irq(desc, flag); | |
242 | readl(desc->mask_base); /* Flush write to device */ | |
243 | } else { | |
244 | unsigned offset = data->irq - desc->irq; | |
245 | msi_mask_irq(desc, 1 << offset, flag << offset); | |
246 | } | |
247 | } | |
248 | ||
249 | /** | |
250 | * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts | |
251 | * @data: pointer to irqdata associated to that interrupt | |
252 | */ | |
253 | void pci_msi_mask_irq(struct irq_data *data) | |
254 | { | |
255 | msi_set_mask_bit(data, 1); | |
256 | } | |
257 | ||
258 | /** | |
259 | * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts | |
260 | * @data: pointer to irqdata associated to that interrupt | |
261 | */ | |
262 | void pci_msi_unmask_irq(struct irq_data *data) | |
263 | { | |
264 | msi_set_mask_bit(data, 0); | |
265 | } | |
266 | ||
267 | void default_restore_msi_irqs(struct pci_dev *dev) | |
268 | { | |
269 | struct msi_desc *entry; | |
270 | ||
271 | for_each_pci_msi_entry(entry, dev) | |
272 | default_restore_msi_irq(dev, entry->irq); | |
273 | } | |
274 | ||
275 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |
276 | { | |
277 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); | |
278 | ||
279 | BUG_ON(dev->current_state != PCI_D0); | |
280 | ||
281 | if (entry->msi_attrib.is_msix) { | |
282 | void __iomem *base = entry->mask_base + | |
283 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | |
284 | ||
285 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); | |
286 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); | |
287 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA); | |
288 | } else { | |
289 | int pos = dev->msi_cap; | |
290 | u16 data; | |
291 | ||
292 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, | |
293 | &msg->address_lo); | |
294 | if (entry->msi_attrib.is_64) { | |
295 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, | |
296 | &msg->address_hi); | |
297 | pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); | |
298 | } else { | |
299 | msg->address_hi = 0; | |
300 | pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data); | |
301 | } | |
302 | msg->data = data; | |
303 | } | |
304 | } | |
305 | ||
306 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |
307 | { | |
308 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); | |
309 | ||
310 | if (dev->current_state != PCI_D0) { | |
311 | /* Don't touch the hardware now */ | |
312 | } else if (entry->msi_attrib.is_msix) { | |
313 | void __iomem *base; | |
314 | base = entry->mask_base + | |
315 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | |
316 | ||
317 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); | |
318 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); | |
319 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); | |
320 | } else { | |
321 | int pos = dev->msi_cap; | |
322 | u16 msgctl; | |
323 | ||
324 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); | |
325 | msgctl &= ~PCI_MSI_FLAGS_QSIZE; | |
326 | msgctl |= entry->msi_attrib.multiple << 4; | |
327 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); | |
328 | ||
329 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, | |
330 | msg->address_lo); | |
331 | if (entry->msi_attrib.is_64) { | |
332 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, | |
333 | msg->address_hi); | |
334 | pci_write_config_word(dev, pos + PCI_MSI_DATA_64, | |
335 | msg->data); | |
336 | } else { | |
337 | pci_write_config_word(dev, pos + PCI_MSI_DATA_32, | |
338 | msg->data); | |
339 | } | |
340 | } | |
341 | entry->msg = *msg; | |
342 | } | |
343 | ||
344 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) | |
345 | { | |
346 | struct msi_desc *entry = irq_get_msi_desc(irq); | |
347 | ||
348 | __pci_write_msi_msg(entry, msg); | |
349 | } | |
350 | EXPORT_SYMBOL_GPL(pci_write_msi_msg); | |
351 | ||
352 | static void free_msi_irqs(struct pci_dev *dev) | |
353 | { | |
354 | struct list_head *msi_list = dev_to_msi_list(&dev->dev); | |
355 | struct msi_desc *entry, *tmp; | |
356 | struct attribute **msi_attrs; | |
357 | struct device_attribute *dev_attr; | |
358 | int i, count = 0; | |
359 | ||
360 | for_each_pci_msi_entry(entry, dev) | |
361 | if (entry->irq) | |
362 | for (i = 0; i < entry->nvec_used; i++) | |
363 | BUG_ON(irq_has_action(entry->irq + i)); | |
364 | ||
365 | pci_msi_teardown_msi_irqs(dev); | |
366 | ||
367 | list_for_each_entry_safe(entry, tmp, msi_list, list) { | |
368 | if (entry->msi_attrib.is_msix) { | |
369 | if (list_is_last(&entry->list, msi_list)) | |
370 | iounmap(entry->mask_base); | |
371 | } | |
372 | ||
373 | list_del(&entry->list); | |
374 | kfree(entry); | |
375 | } | |
376 | ||
377 | if (dev->msi_irq_groups) { | |
378 | sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups); | |
379 | msi_attrs = dev->msi_irq_groups[0]->attrs; | |
380 | while (msi_attrs[count]) { | |
381 | dev_attr = container_of(msi_attrs[count], | |
382 | struct device_attribute, attr); | |
383 | kfree(dev_attr->attr.name); | |
384 | kfree(dev_attr); | |
385 | ++count; | |
386 | } | |
387 | kfree(msi_attrs); | |
388 | kfree(dev->msi_irq_groups[0]); | |
389 | kfree(dev->msi_irq_groups); | |
390 | dev->msi_irq_groups = NULL; | |
391 | } | |
392 | } | |
393 | ||
394 | static void pci_intx_for_msi(struct pci_dev *dev, int enable) | |
395 | { | |
396 | if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) | |
397 | pci_intx(dev, enable); | |
398 | } | |
399 | ||
400 | static void __pci_restore_msi_state(struct pci_dev *dev) | |
401 | { | |
402 | u16 control; | |
403 | struct msi_desc *entry; | |
404 | ||
405 | if (!dev->msi_enabled) | |
406 | return; | |
407 | ||
408 | entry = irq_get_msi_desc(dev->irq); | |
409 | ||
410 | pci_intx_for_msi(dev, 0); | |
411 | pci_msi_set_enable(dev, 0); | |
412 | arch_restore_msi_irqs(dev); | |
413 | ||
414 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | |
415 | msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), | |
416 | entry->masked); | |
417 | control &= ~PCI_MSI_FLAGS_QSIZE; | |
418 | control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; | |
419 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); | |
420 | } | |
421 | ||
422 | static void __pci_restore_msix_state(struct pci_dev *dev) | |
423 | { | |
424 | struct msi_desc *entry; | |
425 | ||
426 | if (!dev->msix_enabled) | |
427 | return; | |
428 | BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); | |
429 | ||
430 | /* route the table */ | |
431 | pci_intx_for_msi(dev, 0); | |
432 | pci_msix_clear_and_set_ctrl(dev, 0, | |
433 | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); | |
434 | ||
435 | arch_restore_msi_irqs(dev); | |
436 | for_each_pci_msi_entry(entry, dev) | |
437 | msix_mask_irq(entry, entry->masked); | |
438 | ||
439 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); | |
440 | } | |
441 | ||
442 | void pci_restore_msi_state(struct pci_dev *dev) | |
443 | { | |
444 | __pci_restore_msi_state(dev); | |
445 | __pci_restore_msix_state(dev); | |
446 | } | |
447 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); | |
448 | ||
449 | static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, | |
450 | char *buf) | |
451 | { | |
452 | struct msi_desc *entry; | |
453 | unsigned long irq; | |
454 | int retval; | |
455 | ||
456 | retval = kstrtoul(attr->attr.name, 10, &irq); | |
457 | if (retval) | |
458 | return retval; | |
459 | ||
460 | entry = irq_get_msi_desc(irq); | |
461 | if (entry) | |
462 | return sprintf(buf, "%s\n", | |
463 | entry->msi_attrib.is_msix ? "msix" : "msi"); | |
464 | ||
465 | return -ENODEV; | |
466 | } | |
467 | ||
468 | static int populate_msi_sysfs(struct pci_dev *pdev) | |
469 | { | |
470 | struct attribute **msi_attrs; | |
471 | struct attribute *msi_attr; | |
472 | struct device_attribute *msi_dev_attr; | |
473 | struct attribute_group *msi_irq_group; | |
474 | const struct attribute_group **msi_irq_groups; | |
475 | struct msi_desc *entry; | |
476 | int ret = -ENOMEM; | |
477 | int num_msi = 0; | |
478 | int count = 0; | |
479 | ||
480 | /* Determine how many msi entries we have */ | |
481 | for_each_pci_msi_entry(entry, pdev) | |
482 | ++num_msi; | |
483 | if (!num_msi) | |
484 | return 0; | |
485 | ||
486 | /* Dynamically create the MSI attributes for the PCI device */ | |
487 | msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); | |
488 | if (!msi_attrs) | |
489 | return -ENOMEM; | |
490 | for_each_pci_msi_entry(entry, pdev) { | |
491 | msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); | |
492 | if (!msi_dev_attr) | |
493 | goto error_attrs; | |
494 | msi_attrs[count] = &msi_dev_attr->attr; | |
495 | ||
496 | sysfs_attr_init(&msi_dev_attr->attr); | |
497 | msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", | |
498 | entry->irq); | |
499 | if (!msi_dev_attr->attr.name) | |
500 | goto error_attrs; | |
501 | msi_dev_attr->attr.mode = S_IRUGO; | |
502 | msi_dev_attr->show = msi_mode_show; | |
503 | ++count; | |
504 | } | |
505 | ||
506 | msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL); | |
507 | if (!msi_irq_group) | |
508 | goto error_attrs; | |
509 | msi_irq_group->name = "msi_irqs"; | |
510 | msi_irq_group->attrs = msi_attrs; | |
511 | ||
512 | msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL); | |
513 | if (!msi_irq_groups) | |
514 | goto error_irq_group; | |
515 | msi_irq_groups[0] = msi_irq_group; | |
516 | ||
517 | ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups); | |
518 | if (ret) | |
519 | goto error_irq_groups; | |
520 | pdev->msi_irq_groups = msi_irq_groups; | |
521 | ||
522 | return 0; | |
523 | ||
524 | error_irq_groups: | |
525 | kfree(msi_irq_groups); | |
526 | error_irq_group: | |
527 | kfree(msi_irq_group); | |
528 | error_attrs: | |
529 | count = 0; | |
530 | msi_attr = msi_attrs[count]; | |
531 | while (msi_attr) { | |
532 | msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); | |
533 | kfree(msi_attr->name); | |
534 | kfree(msi_dev_attr); | |
535 | ++count; | |
536 | msi_attr = msi_attrs[count]; | |
537 | } | |
538 | kfree(msi_attrs); | |
539 | return ret; | |
540 | } | |
541 | ||
542 | static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) | |
543 | { | |
544 | u16 control; | |
545 | struct msi_desc *entry; | |
546 | ||
547 | /* MSI Entry Initialization */ | |
548 | entry = alloc_msi_entry(&dev->dev); | |
549 | if (!entry) | |
550 | return NULL; | |
551 | ||
552 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | |
553 | ||
554 | entry->msi_attrib.is_msix = 0; | |
555 | entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); | |
556 | entry->msi_attrib.entry_nr = 0; | |
557 | entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); | |
558 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | |
559 | entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; | |
560 | entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); | |
561 | entry->nvec_used = nvec; | |
562 | ||
563 | if (control & PCI_MSI_FLAGS_64BIT) | |
564 | entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; | |
565 | else | |
566 | entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; | |
567 | ||
568 | /* Save the initial mask status */ | |
569 | if (entry->msi_attrib.maskbit) | |
570 | pci_read_config_dword(dev, entry->mask_pos, &entry->masked); | |
571 | ||
572 | return entry; | |
573 | } | |
574 | ||
575 | static int msi_verify_entries(struct pci_dev *dev) | |
576 | { | |
577 | struct msi_desc *entry; | |
578 | ||
579 | for_each_pci_msi_entry(entry, dev) { | |
580 | if (!dev->no_64bit_msi || !entry->msg.address_hi) | |
581 | continue; | |
582 | dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" | |
583 | " tried to assign one above 4G\n"); | |
584 | return -EIO; | |
585 | } | |
586 | return 0; | |
587 | } | |
588 | ||
589 | /** | |
590 | * msi_capability_init - configure device's MSI capability structure | |
591 | * @dev: pointer to the pci_dev data structure of MSI device function | |
592 | * @nvec: number of interrupts to allocate | |
593 | * | |
594 | * Setup the MSI capability structure of the device with the requested | |
595 | * number of interrupts. A return value of zero indicates the successful | |
596 | * setup of an entry with the new MSI irq. A negative return value indicates | |
597 | * an error, and a positive return value indicates the number of interrupts | |
598 | * which could have been allocated. | |
599 | */ | |
600 | static int msi_capability_init(struct pci_dev *dev, int nvec) | |
601 | { | |
602 | struct msi_desc *entry; | |
603 | int ret; | |
604 | unsigned mask; | |
605 | ||
606 | pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ | |
607 | ||
608 | entry = msi_setup_entry(dev, nvec); | |
609 | if (!entry) | |
610 | return -ENOMEM; | |
611 | ||
612 | /* All MSIs are unmasked by default, Mask them all */ | |
613 | mask = msi_mask(entry->msi_attrib.multi_cap); | |
614 | msi_mask_irq(entry, mask, mask); | |
615 | ||
616 | list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); | |
617 | ||
618 | /* Configure MSI capability structure */ | |
619 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | |
620 | if (ret) { | |
621 | msi_mask_irq(entry, mask, ~mask); | |
622 | free_msi_irqs(dev); | |
623 | return ret; | |
624 | } | |
625 | ||
626 | ret = msi_verify_entries(dev); | |
627 | if (ret) { | |
628 | msi_mask_irq(entry, mask, ~mask); | |
629 | free_msi_irqs(dev); | |
630 | return ret; | |
631 | } | |
632 | ||
633 | ret = populate_msi_sysfs(dev); | |
634 | if (ret) { | |
635 | msi_mask_irq(entry, mask, ~mask); | |
636 | free_msi_irqs(dev); | |
637 | return ret; | |
638 | } | |
639 | ||
640 | /* Set MSI enabled bits */ | |
641 | pci_intx_for_msi(dev, 0); | |
642 | pci_msi_set_enable(dev, 1); | |
643 | dev->msi_enabled = 1; | |
644 | ||
645 | pcibios_free_irq(dev); | |
646 | dev->irq = entry->irq; | |
647 | return 0; | |
648 | } | |
649 | ||
650 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) | |
651 | { | |
652 | resource_size_t phys_addr; | |
653 | u32 table_offset; | |
654 | unsigned long flags; | |
655 | u8 bir; | |
656 | ||
657 | pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, | |
658 | &table_offset); | |
659 | bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); | |
660 | flags = pci_resource_flags(dev, bir); | |
661 | if (!flags || (flags & IORESOURCE_UNSET)) | |
662 | return NULL; | |
663 | ||
664 | table_offset &= PCI_MSIX_TABLE_OFFSET; | |
665 | phys_addr = pci_resource_start(dev, bir) + table_offset; | |
666 | ||
667 | return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | |
668 | } | |
669 | ||
670 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |
671 | struct msix_entry *entries, int nvec) | |
672 | { | |
673 | struct msi_desc *entry; | |
674 | int i; | |
675 | ||
676 | for (i = 0; i < nvec; i++) { | |
677 | entry = alloc_msi_entry(&dev->dev); | |
678 | if (!entry) { | |
679 | if (!i) | |
680 | iounmap(base); | |
681 | else | |
682 | free_msi_irqs(dev); | |
683 | /* No enough memory. Don't try again */ | |
684 | return -ENOMEM; | |
685 | } | |
686 | ||
687 | entry->msi_attrib.is_msix = 1; | |
688 | entry->msi_attrib.is_64 = 1; | |
689 | entry->msi_attrib.entry_nr = entries[i].entry; | |
690 | entry->msi_attrib.default_irq = dev->irq; | |
691 | entry->mask_base = base; | |
692 | entry->nvec_used = 1; | |
693 | ||
694 | list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); | |
695 | } | |
696 | ||
697 | return 0; | |
698 | } | |
699 | ||
700 | static void msix_program_entries(struct pci_dev *dev, | |
701 | struct msix_entry *entries) | |
702 | { | |
703 | struct msi_desc *entry; | |
704 | int i = 0; | |
705 | ||
706 | for_each_pci_msi_entry(entry, dev) { | |
707 | int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + | |
708 | PCI_MSIX_ENTRY_VECTOR_CTRL; | |
709 | ||
710 | entries[i].vector = entry->irq; | |
711 | entry->masked = readl(entry->mask_base + offset); | |
712 | msix_mask_irq(entry, 1); | |
713 | i++; | |
714 | } | |
715 | } | |
716 | ||
717 | /** | |
718 | * msix_capability_init - configure device's MSI-X capability | |
719 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
720 | * @entries: pointer to an array of struct msix_entry entries | |
721 | * @nvec: number of @entries | |
722 | * | |
723 | * Setup the MSI-X capability structure of device function with a | |
724 | * single MSI-X irq. A return of zero indicates the successful setup of | |
725 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | |
726 | **/ | |
727 | static int msix_capability_init(struct pci_dev *dev, | |
728 | struct msix_entry *entries, int nvec) | |
729 | { | |
730 | int ret; | |
731 | u16 control; | |
732 | void __iomem *base; | |
733 | ||
734 | /* Ensure MSI-X is disabled while it is set up */ | |
735 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); | |
736 | ||
737 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | |
738 | /* Request & Map MSI-X table region */ | |
739 | base = msix_map_region(dev, msix_table_size(control)); | |
740 | if (!base) | |
741 | return -ENOMEM; | |
742 | ||
743 | ret = msix_setup_entries(dev, base, entries, nvec); | |
744 | if (ret) | |
745 | return ret; | |
746 | ||
747 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | |
748 | if (ret) | |
749 | goto out_avail; | |
750 | ||
751 | /* Check if all MSI entries honor device restrictions */ | |
752 | ret = msi_verify_entries(dev); | |
753 | if (ret) | |
754 | goto out_free; | |
755 | ||
756 | /* | |
757 | * Some devices require MSI-X to be enabled before we can touch the | |
758 | * MSI-X registers. We need to mask all the vectors to prevent | |
759 | * interrupts coming in before they're fully set up. | |
760 | */ | |
761 | pci_msix_clear_and_set_ctrl(dev, 0, | |
762 | PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); | |
763 | ||
764 | msix_program_entries(dev, entries); | |
765 | ||
766 | ret = populate_msi_sysfs(dev); | |
767 | if (ret) | |
768 | goto out_free; | |
769 | ||
770 | /* Set MSI-X enabled bits and unmask the function */ | |
771 | pci_intx_for_msi(dev, 0); | |
772 | dev->msix_enabled = 1; | |
773 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); | |
774 | ||
775 | pcibios_free_irq(dev); | |
776 | return 0; | |
777 | ||
778 | out_avail: | |
779 | if (ret < 0) { | |
780 | /* | |
781 | * If we had some success, report the number of irqs | |
782 | * we succeeded in setting up. | |
783 | */ | |
784 | struct msi_desc *entry; | |
785 | int avail = 0; | |
786 | ||
787 | for_each_pci_msi_entry(entry, dev) { | |
788 | if (entry->irq != 0) | |
789 | avail++; | |
790 | } | |
791 | if (avail != 0) | |
792 | ret = avail; | |
793 | } | |
794 | ||
795 | out_free: | |
796 | free_msi_irqs(dev); | |
797 | ||
798 | return ret; | |
799 | } | |
800 | ||
801 | /** | |
802 | * pci_msi_supported - check whether MSI may be enabled on a device | |
803 | * @dev: pointer to the pci_dev data structure of MSI device function | |
804 | * @nvec: how many MSIs have been requested ? | |
805 | * | |
806 | * Look at global flags, the device itself, and its parent buses | |
807 | * to determine if MSI/-X are supported for the device. If MSI/-X is | |
808 | * supported return 1, else return 0. | |
809 | **/ | |
810 | static int pci_msi_supported(struct pci_dev *dev, int nvec) | |
811 | { | |
812 | struct pci_bus *bus; | |
813 | ||
814 | /* MSI must be globally enabled and supported by the device */ | |
815 | if (!pci_msi_enable) | |
816 | return 0; | |
817 | ||
818 | if (!dev || dev->no_msi || dev->current_state != PCI_D0) | |
819 | return 0; | |
820 | ||
821 | /* | |
822 | * You can't ask to have 0 or less MSIs configured. | |
823 | * a) it's stupid .. | |
824 | * b) the list manipulation code assumes nvec >= 1. | |
825 | */ | |
826 | if (nvec < 1) | |
827 | return 0; | |
828 | ||
829 | /* | |
830 | * Any bridge which does NOT route MSI transactions from its | |
831 | * secondary bus to its primary bus must set NO_MSI flag on | |
832 | * the secondary pci_bus. | |
833 | * We expect only arch-specific PCI host bus controller driver | |
834 | * or quirks for specific PCI bridges to be setting NO_MSI. | |
835 | */ | |
836 | for (bus = dev->bus; bus; bus = bus->parent) | |
837 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) | |
838 | return 0; | |
839 | ||
840 | return 1; | |
841 | } | |
842 | ||
843 | /** | |
844 | * pci_msi_vec_count - Return the number of MSI vectors a device can send | |
845 | * @dev: device to report about | |
846 | * | |
847 | * This function returns the number of MSI vectors a device requested via | |
848 | * Multiple Message Capable register. It returns a negative errno if the | |
849 | * device is not capable sending MSI interrupts. Otherwise, the call succeeds | |
850 | * and returns a power of two, up to a maximum of 2^5 (32), according to the | |
851 | * MSI specification. | |
852 | **/ | |
853 | int pci_msi_vec_count(struct pci_dev *dev) | |
854 | { | |
855 | int ret; | |
856 | u16 msgctl; | |
857 | ||
858 | if (!dev->msi_cap) | |
859 | return -EINVAL; | |
860 | ||
861 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | |
862 | ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | |
863 | ||
864 | return ret; | |
865 | } | |
866 | EXPORT_SYMBOL(pci_msi_vec_count); | |
867 | ||
868 | void pci_msi_shutdown(struct pci_dev *dev) | |
869 | { | |
870 | struct msi_desc *desc; | |
871 | u32 mask; | |
872 | ||
873 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | |
874 | return; | |
875 | ||
876 | BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); | |
877 | desc = first_pci_msi_entry(dev); | |
878 | ||
879 | pci_msi_set_enable(dev, 0); | |
880 | pci_intx_for_msi(dev, 1); | |
881 | dev->msi_enabled = 0; | |
882 | ||
883 | /* Return the device with MSI unmasked as initial states */ | |
884 | mask = msi_mask(desc->msi_attrib.multi_cap); | |
885 | /* Keep cached state to be restored */ | |
886 | __pci_msi_desc_mask_irq(desc, mask, ~mask); | |
887 | ||
888 | /* Restore dev->irq to its default pin-assertion irq */ | |
889 | dev->irq = desc->msi_attrib.default_irq; | |
890 | pcibios_alloc_irq(dev); | |
891 | } | |
892 | ||
893 | void pci_disable_msi(struct pci_dev *dev) | |
894 | { | |
895 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | |
896 | return; | |
897 | ||
898 | pci_msi_shutdown(dev); | |
899 | free_msi_irqs(dev); | |
900 | } | |
901 | EXPORT_SYMBOL(pci_disable_msi); | |
902 | ||
903 | /** | |
904 | * pci_msix_vec_count - return the number of device's MSI-X table entries | |
905 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
906 | * This function returns the number of device's MSI-X table entries and | |
907 | * therefore the number of MSI-X vectors device is capable of sending. | |
908 | * It returns a negative errno if the device is not capable of sending MSI-X | |
909 | * interrupts. | |
910 | **/ | |
911 | int pci_msix_vec_count(struct pci_dev *dev) | |
912 | { | |
913 | u16 control; | |
914 | ||
915 | if (!dev->msix_cap) | |
916 | return -EINVAL; | |
917 | ||
918 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | |
919 | return msix_table_size(control); | |
920 | } | |
921 | EXPORT_SYMBOL(pci_msix_vec_count); | |
922 | ||
923 | /** | |
924 | * pci_enable_msix - configure device's MSI-X capability structure | |
925 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
926 | * @entries: pointer to an array of MSI-X entries | |
927 | * @nvec: number of MSI-X irqs requested for allocation by device driver | |
928 | * | |
929 | * Setup the MSI-X capability structure of device function with the number | |
930 | * of requested irqs upon its software driver call to request for | |
931 | * MSI-X mode enabled on its hardware device function. A return of zero | |
932 | * indicates the successful configuration of MSI-X capability structure | |
933 | * with new allocated MSI-X irqs. A return of < 0 indicates a failure. | |
934 | * Or a return of > 0 indicates that driver request is exceeding the number | |
935 | * of irqs or MSI-X vectors available. Driver should use the returned value to | |
936 | * re-send its request. | |
937 | **/ | |
938 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | |
939 | { | |
940 | int nr_entries; | |
941 | int i, j; | |
942 | ||
943 | if (!pci_msi_supported(dev, nvec)) | |
944 | return -EINVAL; | |
945 | ||
946 | if (!entries) | |
947 | return -EINVAL; | |
948 | ||
949 | nr_entries = pci_msix_vec_count(dev); | |
950 | if (nr_entries < 0) | |
951 | return nr_entries; | |
952 | if (nvec > nr_entries) | |
953 | return nr_entries; | |
954 | ||
955 | /* Check for any invalid entries */ | |
956 | for (i = 0; i < nvec; i++) { | |
957 | if (entries[i].entry >= nr_entries) | |
958 | return -EINVAL; /* invalid entry */ | |
959 | for (j = i + 1; j < nvec; j++) { | |
960 | if (entries[i].entry == entries[j].entry) | |
961 | return -EINVAL; /* duplicate entry */ | |
962 | } | |
963 | } | |
964 | WARN_ON(!!dev->msix_enabled); | |
965 | ||
966 | /* Check whether driver already requested for MSI irq */ | |
967 | if (dev->msi_enabled) { | |
968 | dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); | |
969 | return -EINVAL; | |
970 | } | |
971 | return msix_capability_init(dev, entries, nvec); | |
972 | } | |
973 | EXPORT_SYMBOL(pci_enable_msix); | |
974 | ||
975 | void pci_msix_shutdown(struct pci_dev *dev) | |
976 | { | |
977 | struct msi_desc *entry; | |
978 | ||
979 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | |
980 | return; | |
981 | ||
982 | /* Return the device with MSI-X masked as initial states */ | |
983 | for_each_pci_msi_entry(entry, dev) { | |
984 | /* Keep cached states to be restored */ | |
985 | __pci_msix_desc_mask_irq(entry, 1); | |
986 | } | |
987 | ||
988 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); | |
989 | pci_intx_for_msi(dev, 1); | |
990 | dev->msix_enabled = 0; | |
991 | pcibios_alloc_irq(dev); | |
992 | } | |
993 | ||
994 | void pci_disable_msix(struct pci_dev *dev) | |
995 | { | |
996 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | |
997 | return; | |
998 | ||
999 | pci_msix_shutdown(dev); | |
1000 | free_msi_irqs(dev); | |
1001 | } | |
1002 | EXPORT_SYMBOL(pci_disable_msix); | |
1003 | ||
1004 | void pci_no_msi(void) | |
1005 | { | |
1006 | pci_msi_enable = 0; | |
1007 | } | |
1008 | ||
1009 | /** | |
1010 | * pci_msi_enabled - is MSI enabled? | |
1011 | * | |
1012 | * Returns true if MSI has not been disabled by the command-line option | |
1013 | * pci=nomsi. | |
1014 | **/ | |
1015 | int pci_msi_enabled(void) | |
1016 | { | |
1017 | return pci_msi_enable; | |
1018 | } | |
1019 | EXPORT_SYMBOL(pci_msi_enabled); | |
1020 | ||
1021 | void pci_msi_init_pci_dev(struct pci_dev *dev) | |
1022 | { | |
1023 | } | |
1024 | ||
1025 | /** | |
1026 | * pci_enable_msi_range - configure device's MSI capability structure | |
1027 | * @dev: device to configure | |
1028 | * @minvec: minimal number of interrupts to configure | |
1029 | * @maxvec: maximum number of interrupts to configure | |
1030 | * | |
1031 | * This function tries to allocate a maximum possible number of interrupts in a | |
1032 | * range between @minvec and @maxvec. It returns a negative errno if an error | |
1033 | * occurs. If it succeeds, it returns the actual number of interrupts allocated | |
1034 | * and updates the @dev's irq member to the lowest new interrupt number; | |
1035 | * the other interrupt numbers allocated to this device are consecutive. | |
1036 | **/ | |
1037 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) | |
1038 | { | |
1039 | int nvec; | |
1040 | int rc; | |
1041 | ||
1042 | if (!pci_msi_supported(dev, minvec)) | |
1043 | return -EINVAL; | |
1044 | ||
1045 | WARN_ON(!!dev->msi_enabled); | |
1046 | ||
1047 | /* Check whether driver already requested MSI-X irqs */ | |
1048 | if (dev->msix_enabled) { | |
1049 | dev_info(&dev->dev, | |
1050 | "can't enable MSI (MSI-X already enabled)\n"); | |
1051 | return -EINVAL; | |
1052 | } | |
1053 | ||
1054 | if (maxvec < minvec) | |
1055 | return -ERANGE; | |
1056 | ||
1057 | nvec = pci_msi_vec_count(dev); | |
1058 | if (nvec < 0) | |
1059 | return nvec; | |
1060 | else if (nvec < minvec) | |
1061 | return -EINVAL; | |
1062 | else if (nvec > maxvec) | |
1063 | nvec = maxvec; | |
1064 | ||
1065 | do { | |
1066 | rc = msi_capability_init(dev, nvec); | |
1067 | if (rc < 0) { | |
1068 | return rc; | |
1069 | } else if (rc > 0) { | |
1070 | if (rc < minvec) | |
1071 | return -ENOSPC; | |
1072 | nvec = rc; | |
1073 | } | |
1074 | } while (rc); | |
1075 | ||
1076 | return nvec; | |
1077 | } | |
1078 | EXPORT_SYMBOL(pci_enable_msi_range); | |
1079 | ||
1080 | /** | |
1081 | * pci_enable_msix_range - configure device's MSI-X capability structure | |
1082 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
1083 | * @entries: pointer to an array of MSI-X entries | |
1084 | * @minvec: minimum number of MSI-X irqs requested | |
1085 | * @maxvec: maximum number of MSI-X irqs requested | |
1086 | * | |
1087 | * Setup the MSI-X capability structure of device function with a maximum | |
1088 | * possible number of interrupts in the range between @minvec and @maxvec | |
1089 | * upon its software driver call to request for MSI-X mode enabled on its | |
1090 | * hardware device function. It returns a negative errno if an error occurs. | |
1091 | * If it succeeds, it returns the actual number of interrupts allocated and | |
1092 | * indicates the successful configuration of MSI-X capability structure | |
1093 | * with new allocated MSI-X interrupts. | |
1094 | **/ | |
1095 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | |
1096 | int minvec, int maxvec) | |
1097 | { | |
1098 | int nvec = maxvec; | |
1099 | int rc; | |
1100 | ||
1101 | if (maxvec < minvec) | |
1102 | return -ERANGE; | |
1103 | ||
1104 | do { | |
1105 | rc = pci_enable_msix(dev, entries, nvec); | |
1106 | if (rc < 0) { | |
1107 | return rc; | |
1108 | } else if (rc > 0) { | |
1109 | if (rc < minvec) | |
1110 | return -ENOSPC; | |
1111 | nvec = rc; | |
1112 | } | |
1113 | } while (rc); | |
1114 | ||
1115 | return nvec; | |
1116 | } | |
1117 | EXPORT_SYMBOL(pci_enable_msix_range); | |
1118 | ||
1119 | struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) | |
1120 | { | |
1121 | return to_pci_dev(desc->dev); | |
1122 | } | |
1123 | ||
1124 | void *msi_desc_to_pci_sysdata(struct msi_desc *desc) | |
1125 | { | |
1126 | struct pci_dev *dev = msi_desc_to_pci_dev(desc); | |
1127 | ||
1128 | return dev->bus->sysdata; | |
1129 | } | |
1130 | EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); | |
1131 | ||
1132 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | |
1133 | /** | |
1134 | * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space | |
1135 | * @irq_data: Pointer to interrupt data of the MSI interrupt | |
1136 | * @msg: Pointer to the message | |
1137 | */ | |
1138 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) | |
1139 | { | |
1140 | struct msi_desc *desc = irq_data_get_msi_desc(irq_data); | |
1141 | ||
1142 | /* | |
1143 | * For MSI-X desc->irq is always equal to irq_data->irq. For | |
1144 | * MSI only the first interrupt of MULTI MSI passes the test. | |
1145 | */ | |
1146 | if (desc->irq == irq_data->irq) | |
1147 | __pci_write_msi_msg(desc, msg); | |
1148 | } | |
1149 | ||
1150 | /** | |
1151 | * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source | |
1152 | * @dev: Pointer to the PCI device | |
1153 | * @desc: Pointer to the msi descriptor | |
1154 | * | |
1155 | * The ID number is only used within the irqdomain. | |
1156 | */ | |
1157 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, | |
1158 | struct msi_desc *desc) | |
1159 | { | |
1160 | return (irq_hw_number_t)desc->msi_attrib.entry_nr | | |
1161 | PCI_DEVID(dev->bus->number, dev->devfn) << 11 | | |
1162 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; | |
1163 | } | |
1164 | ||
1165 | static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) | |
1166 | { | |
1167 | return !desc->msi_attrib.is_msix && desc->nvec_used > 1; | |
1168 | } | |
1169 | ||
1170 | /** | |
1171 | * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev | |
1172 | * @domain: The interrupt domain to check | |
1173 | * @info: The domain info for verification | |
1174 | * @dev: The device to check | |
1175 | * | |
1176 | * Returns: | |
1177 | * 0 if the functionality is supported | |
1178 | * 1 if Multi MSI is requested, but the domain does not support it | |
1179 | * -ENOTSUPP otherwise | |
1180 | */ | |
1181 | int pci_msi_domain_check_cap(struct irq_domain *domain, | |
1182 | struct msi_domain_info *info, struct device *dev) | |
1183 | { | |
1184 | struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); | |
1185 | ||
1186 | /* Special handling to support pci_enable_msi_range() */ | |
1187 | if (pci_msi_desc_is_multi_msi(desc) && | |
1188 | !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) | |
1189 | return 1; | |
1190 | else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) | |
1191 | return -ENOTSUPP; | |
1192 | ||
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | static int pci_msi_domain_handle_error(struct irq_domain *domain, | |
1197 | struct msi_desc *desc, int error) | |
1198 | { | |
1199 | /* Special handling to support pci_enable_msi_range() */ | |
1200 | if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) | |
1201 | return 1; | |
1202 | ||
1203 | return error; | |
1204 | } | |
1205 | ||
1206 | #ifdef GENERIC_MSI_DOMAIN_OPS | |
1207 | static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, | |
1208 | struct msi_desc *desc) | |
1209 | { | |
1210 | arg->desc = desc; | |
1211 | arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), | |
1212 | desc); | |
1213 | } | |
1214 | #else | |
1215 | #define pci_msi_domain_set_desc NULL | |
1216 | #endif | |
1217 | ||
1218 | static struct msi_domain_ops pci_msi_domain_ops_default = { | |
1219 | .set_desc = pci_msi_domain_set_desc, | |
1220 | .msi_check = pci_msi_domain_check_cap, | |
1221 | .handle_error = pci_msi_domain_handle_error, | |
1222 | }; | |
1223 | ||
1224 | static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) | |
1225 | { | |
1226 | struct msi_domain_ops *ops = info->ops; | |
1227 | ||
1228 | if (ops == NULL) { | |
1229 | info->ops = &pci_msi_domain_ops_default; | |
1230 | } else { | |
1231 | if (ops->set_desc == NULL) | |
1232 | ops->set_desc = pci_msi_domain_set_desc; | |
1233 | if (ops->msi_check == NULL) | |
1234 | ops->msi_check = pci_msi_domain_check_cap; | |
1235 | if (ops->handle_error == NULL) | |
1236 | ops->handle_error = pci_msi_domain_handle_error; | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) | |
1241 | { | |
1242 | struct irq_chip *chip = info->chip; | |
1243 | ||
1244 | BUG_ON(!chip); | |
1245 | if (!chip->irq_write_msi_msg) | |
1246 | chip->irq_write_msi_msg = pci_msi_domain_write_msg; | |
1247 | } | |
1248 | ||
1249 | /** | |
1250 | * pci_msi_create_irq_domain - Create a MSI interrupt domain | |
1251 | * @fwnode: Optional fwnode of the interrupt controller | |
1252 | * @info: MSI domain info | |
1253 | * @parent: Parent irq domain | |
1254 | * | |
1255 | * Updates the domain and chip ops and creates a MSI interrupt domain. | |
1256 | * | |
1257 | * Returns: | |
1258 | * A domain pointer or NULL in case of failure. | |
1259 | */ | |
1260 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, | |
1261 | struct msi_domain_info *info, | |
1262 | struct irq_domain *parent) | |
1263 | { | |
1264 | struct irq_domain *domain; | |
1265 | ||
1266 | if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) | |
1267 | pci_msi_domain_update_dom_ops(info); | |
1268 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | |
1269 | pci_msi_domain_update_chip_ops(info); | |
1270 | ||
1271 | domain = msi_create_irq_domain(fwnode, info, parent); | |
1272 | if (!domain) | |
1273 | return NULL; | |
1274 | ||
1275 | domain->bus_token = DOMAIN_BUS_PCI_MSI; | |
1276 | return domain; | |
1277 | } | |
1278 | ||
1279 | /** | |
1280 | * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain | |
1281 | * @domain: The interrupt domain to allocate from | |
1282 | * @dev: The device for which to allocate | |
1283 | * @nvec: The number of interrupts to allocate | |
1284 | * @type: Unused to allow simpler migration from the arch_XXX interfaces | |
1285 | * | |
1286 | * Returns: | |
1287 | * A virtual interrupt number or an error code in case of failure | |
1288 | */ | |
1289 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, | |
1290 | int nvec, int type) | |
1291 | { | |
1292 | return msi_domain_alloc_irqs(domain, &dev->dev, nvec); | |
1293 | } | |
1294 | ||
1295 | /** | |
1296 | * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain | |
1297 | * @domain: The interrupt domain | |
1298 | * @dev: The device for which to free interrupts | |
1299 | */ | |
1300 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) | |
1301 | { | |
1302 | msi_domain_free_irqs(domain, &dev->dev); | |
1303 | } | |
1304 | ||
1305 | /** | |
1306 | * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain | |
1307 | * @fwnode: Optional fwnode of the interrupt controller | |
1308 | * @info: MSI domain info | |
1309 | * @parent: Parent irq domain | |
1310 | * | |
1311 | * Returns: A domain pointer or NULL in case of failure. If successful | |
1312 | * the default PCI/MSI irqdomain pointer is updated. | |
1313 | */ | |
1314 | struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, | |
1315 | struct msi_domain_info *info, struct irq_domain *parent) | |
1316 | { | |
1317 | struct irq_domain *domain; | |
1318 | ||
1319 | mutex_lock(&pci_msi_domain_lock); | |
1320 | if (pci_msi_default_domain) { | |
1321 | pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); | |
1322 | domain = NULL; | |
1323 | } else { | |
1324 | domain = pci_msi_create_irq_domain(fwnode, info, parent); | |
1325 | pci_msi_default_domain = domain; | |
1326 | } | |
1327 | mutex_unlock(&pci_msi_domain_lock); | |
1328 | ||
1329 | return domain; | |
1330 | } | |
1331 | ||
1332 | static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) | |
1333 | { | |
1334 | u32 *pa = data; | |
1335 | ||
1336 | *pa = alias; | |
1337 | return 0; | |
1338 | } | |
1339 | /** | |
1340 | * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) | |
1341 | * @domain: The interrupt domain | |
1342 | * @pdev: The PCI device. | |
1343 | * | |
1344 | * The RID for a device is formed from the alias, with a firmware | |
1345 | * supplied mapping applied | |
1346 | * | |
1347 | * Returns: The RID. | |
1348 | */ | |
1349 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) | |
1350 | { | |
1351 | struct device_node *of_node; | |
1352 | u32 rid = 0; | |
1353 | ||
1354 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); | |
1355 | ||
1356 | of_node = irq_domain_get_of_node(domain); | |
1357 | if (of_node) | |
1358 | rid = of_msi_map_rid(&pdev->dev, of_node, rid); | |
1359 | ||
1360 | return rid; | |
1361 | } | |
1362 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |