]>
Commit | Line | Data |
---|---|---|
5aeecaf4 | 1 | #include <linux/interrupt.h> |
ad3ad3f6 | 2 | #include <linux/dmar.h> |
2ae21010 | 3 | #include <linux/spinlock.h> |
5a0e3ad6 | 4 | #include <linux/slab.h> |
2ae21010 | 5 | #include <linux/jiffies.h> |
20f3097b | 6 | #include <linux/hpet.h> |
2ae21010 | 7 | #include <linux/pci.h> |
b6fcb33a | 8 | #include <linux/irq.h> |
8b48463f LZ |
9 | #include <linux/intel-iommu.h> |
10 | #include <linux/acpi.h> | |
ad3ad3f6 | 11 | #include <asm/io_apic.h> |
17483a1f | 12 | #include <asm/smp.h> |
6d652ea1 | 13 | #include <asm/cpu.h> |
8a8f422d | 14 | #include <asm/irq_remapping.h> |
f007e99c | 15 | #include <asm/pci-direct.h> |
5e2b930b | 16 | #include <asm/msidef.h> |
ad3ad3f6 | 17 | |
8a8f422d | 18 | #include "irq_remapping.h" |
736baef4 | 19 | |
eef93fdb JR |
20 | struct ioapic_scope { |
21 | struct intel_iommu *iommu; | |
22 | unsigned int id; | |
23 | unsigned int bus; /* PCI bus number */ | |
24 | unsigned int devfn; /* PCI devfn number */ | |
25 | }; | |
26 | ||
27 | struct hpet_scope { | |
28 | struct intel_iommu *iommu; | |
29 | u8 id; | |
30 | unsigned int bus; | |
31 | unsigned int devfn; | |
32 | }; | |
33 | ||
34 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | |
0c3f173a | 35 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) |
eef93fdb | 36 | |
ad3ad3f6 | 37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
20f3097b SS |
38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
39 | static int ir_ioapic_num, ir_hpet_num; | |
d1423d56 | 40 | |
3a5670e8 JL |
41 | /* |
42 | * Lock ordering: | |
43 | * ->dmar_global_lock | |
44 | * ->irq_2_ir_lock | |
45 | * ->qi->q_lock | |
46 | * ->iommu->register_lock | |
47 | * Note: | |
48 | * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called | |
49 | * in single-threaded environment with interrupt disabled, so no need to tabke | |
50 | * the dmar_global_lock. | |
51 | */ | |
96f8e98b | 52 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
d585d060 | 53 | |
694835dc JL |
54 | static int __init parse_ioapics_under_ir(void); |
55 | ||
e420dfb4 YL |
56 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
57 | { | |
dced35ae | 58 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
349d6767 | 59 | return cfg ? &cfg->irq_2_iommu : NULL; |
0b8f1efa YL |
60 | } |
61 | ||
6a7885c4 | 62 | static int get_irte(int irq, struct irte *entry) |
b6fcb33a | 63 | { |
d585d060 | 64 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 65 | unsigned long flags; |
d585d060 | 66 | int index; |
b6fcb33a | 67 | |
d585d060 | 68 | if (!entry || !irq_iommu) |
b6fcb33a SS |
69 | return -1; |
70 | ||
96f8e98b | 71 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 72 | |
af437469 GE |
73 | if (unlikely(!irq_iommu->iommu)) { |
74 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | |
75 | return -1; | |
76 | } | |
77 | ||
e420dfb4 YL |
78 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
79 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 80 | |
96f8e98b | 81 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
82 | return 0; |
83 | } | |
84 | ||
263b5e86 | 85 | static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
b6fcb33a SS |
86 | { |
87 | struct ir_table *table = iommu->ir_table; | |
d585d060 | 88 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
9b1b0e42 | 89 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
b6fcb33a | 90 | unsigned int mask = 0; |
4c5502b1 | 91 | unsigned long flags; |
9f4c7448 | 92 | int index; |
b6fcb33a | 93 | |
d585d060 | 94 | if (!count || !irq_iommu) |
e420dfb4 | 95 | return -1; |
e420dfb4 | 96 | |
b6fcb33a SS |
97 | if (count > 1) { |
98 | count = __roundup_pow_of_two(count); | |
99 | mask = ilog2(count); | |
100 | } | |
101 | ||
102 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
103 | printk(KERN_ERR | |
104 | "Requested mask %x exceeds the max invalidation handle" | |
105 | " mask value %Lx\n", mask, | |
106 | ecap_max_handle_mask(iommu->ecap)); | |
107 | return -1; | |
108 | } | |
109 | ||
96f8e98b | 110 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
360eb3c5 JL |
111 | index = bitmap_find_free_region(table->bitmap, |
112 | INTR_REMAP_TABLE_ENTRIES, mask); | |
113 | if (index < 0) { | |
114 | pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); | |
115 | } else { | |
116 | cfg->remapped = 1; | |
117 | irq_iommu->iommu = iommu; | |
118 | irq_iommu->irte_index = index; | |
119 | irq_iommu->sub_handle = 0; | |
120 | irq_iommu->irte_mask = mask; | |
121 | } | |
96f8e98b | 122 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
123 | |
124 | return index; | |
125 | } | |
126 | ||
704126ad | 127 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
128 | { |
129 | struct qi_desc desc; | |
130 | ||
131 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
132 | | QI_IEC_SELECTIVE; | |
133 | desc.high = 0; | |
134 | ||
704126ad | 135 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
136 | } |
137 | ||
263b5e86 | 138 | static int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
b6fcb33a | 139 | { |
d585d060 | 140 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 141 | unsigned long flags; |
d585d060 | 142 | int index; |
b6fcb33a | 143 | |
d585d060 | 144 | if (!irq_iommu) |
b6fcb33a | 145 | return -1; |
b6fcb33a | 146 | |
96f8e98b | 147 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
148 | *sub_handle = irq_iommu->sub_handle; |
149 | index = irq_iommu->irte_index; | |
96f8e98b | 150 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
151 | return index; |
152 | } | |
153 | ||
263b5e86 | 154 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
b6fcb33a | 155 | { |
d585d060 | 156 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
9b1b0e42 | 157 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
4c5502b1 | 158 | unsigned long flags; |
e420dfb4 | 159 | |
d585d060 | 160 | if (!irq_iommu) |
0b8f1efa | 161 | return -1; |
d585d060 | 162 | |
96f8e98b | 163 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
0b8f1efa | 164 | |
9b1b0e42 | 165 | cfg->remapped = 1; |
e420dfb4 YL |
166 | irq_iommu->iommu = iommu; |
167 | irq_iommu->irte_index = index; | |
168 | irq_iommu->sub_handle = subhandle; | |
169 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 170 | |
96f8e98b | 171 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
172 | |
173 | return 0; | |
174 | } | |
175 | ||
263b5e86 | 176 | static int modify_irte(int irq, struct irte *irte_modified) |
b6fcb33a | 177 | { |
d585d060 | 178 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
b6fcb33a | 179 | struct intel_iommu *iommu; |
4c5502b1 | 180 | unsigned long flags; |
d585d060 TG |
181 | struct irte *irte; |
182 | int rc, index; | |
b6fcb33a | 183 | |
d585d060 | 184 | if (!irq_iommu) |
b6fcb33a | 185 | return -1; |
d585d060 | 186 | |
96f8e98b | 187 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 188 | |
e420dfb4 | 189 | iommu = irq_iommu->iommu; |
b6fcb33a | 190 | |
e420dfb4 | 191 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
192 | irte = &iommu->ir_table->base[index]; |
193 | ||
c513b67e LT |
194 | set_64bit(&irte->low, irte_modified->low); |
195 | set_64bit(&irte->high, irte_modified->high); | |
b6fcb33a SS |
196 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
197 | ||
704126ad | 198 | rc = qi_flush_iec(iommu, index, 0); |
96f8e98b | 199 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
200 | |
201 | return rc; | |
b6fcb33a SS |
202 | } |
203 | ||
263b5e86 | 204 | static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
20f3097b SS |
205 | { |
206 | int i; | |
207 | ||
208 | for (i = 0; i < MAX_HPET_TBS; i++) | |
209 | if (ir_hpet[i].id == hpet_id) | |
210 | return ir_hpet[i].iommu; | |
211 | return NULL; | |
212 | } | |
213 | ||
263b5e86 | 214 | static struct intel_iommu *map_ioapic_to_ir(int apic) |
89027d35 SS |
215 | { |
216 | int i; | |
217 | ||
218 | for (i = 0; i < MAX_IO_APICS; i++) | |
219 | if (ir_ioapic[i].id == apic) | |
220 | return ir_ioapic[i].iommu; | |
221 | return NULL; | |
222 | } | |
223 | ||
263b5e86 | 224 | static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
75c46fa6 SS |
225 | { |
226 | struct dmar_drhd_unit *drhd; | |
227 | ||
228 | drhd = dmar_find_matched_drhd_unit(dev); | |
229 | if (!drhd) | |
230 | return NULL; | |
231 | ||
232 | return drhd->iommu; | |
233 | } | |
234 | ||
c4658b4e WH |
235 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
236 | { | |
237 | struct irte *start, *entry, *end; | |
238 | struct intel_iommu *iommu; | |
239 | int index; | |
240 | ||
241 | if (irq_iommu->sub_handle) | |
242 | return 0; | |
243 | ||
244 | iommu = irq_iommu->iommu; | |
245 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
246 | ||
247 | start = iommu->ir_table->base + index; | |
248 | end = start + (1 << irq_iommu->irte_mask); | |
249 | ||
250 | for (entry = start; entry < end; entry++) { | |
c513b67e LT |
251 | set_64bit(&entry->low, 0); |
252 | set_64bit(&entry->high, 0); | |
c4658b4e | 253 | } |
360eb3c5 JL |
254 | bitmap_release_region(iommu->ir_table->bitmap, index, |
255 | irq_iommu->irte_mask); | |
c4658b4e WH |
256 | |
257 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
258 | } | |
259 | ||
9d619f65 | 260 | static int free_irte(int irq) |
b6fcb33a | 261 | { |
d585d060 | 262 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 263 | unsigned long flags; |
d585d060 | 264 | int rc; |
b6fcb33a | 265 | |
d585d060 | 266 | if (!irq_iommu) |
b6fcb33a | 267 | return -1; |
d585d060 | 268 | |
96f8e98b | 269 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 270 | |
c4658b4e | 271 | rc = clear_entries(irq_iommu); |
b6fcb33a | 272 | |
e420dfb4 YL |
273 | irq_iommu->iommu = NULL; |
274 | irq_iommu->irte_index = 0; | |
275 | irq_iommu->sub_handle = 0; | |
276 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 277 | |
96f8e98b | 278 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 279 | |
704126ad | 280 | return rc; |
b6fcb33a SS |
281 | } |
282 | ||
f007e99c WH |
283 | /* |
284 | * source validation type | |
285 | */ | |
286 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | |
25985edc | 287 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ |
f007e99c WH |
288 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ |
289 | ||
290 | /* | |
291 | * source-id qualifier | |
292 | */ | |
293 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | |
294 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | |
295 | * the third least significant bit | |
296 | */ | |
297 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | |
298 | * the second and third least significant bits | |
299 | */ | |
300 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | |
301 | * the least three significant bits | |
302 | */ | |
303 | ||
304 | /* | |
305 | * set SVT, SQ and SID fields of irte to verify | |
306 | * source ids of interrupt requests | |
307 | */ | |
308 | static void set_irte_sid(struct irte *irte, unsigned int svt, | |
309 | unsigned int sq, unsigned int sid) | |
310 | { | |
d1423d56 CW |
311 | if (disable_sourceid_checking) |
312 | svt = SVT_NO_VERIFY; | |
f007e99c WH |
313 | irte->svt = svt; |
314 | irte->sq = sq; | |
315 | irte->sid = sid; | |
316 | } | |
317 | ||
263b5e86 | 318 | static int set_ioapic_sid(struct irte *irte, int apic) |
f007e99c WH |
319 | { |
320 | int i; | |
321 | u16 sid = 0; | |
322 | ||
323 | if (!irte) | |
324 | return -1; | |
325 | ||
3a5670e8 | 326 | down_read(&dmar_global_lock); |
f007e99c WH |
327 | for (i = 0; i < MAX_IO_APICS; i++) { |
328 | if (ir_ioapic[i].id == apic) { | |
329 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | |
330 | break; | |
331 | } | |
332 | } | |
3a5670e8 | 333 | up_read(&dmar_global_lock); |
f007e99c WH |
334 | |
335 | if (sid == 0) { | |
336 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | |
337 | return -1; | |
338 | } | |
339 | ||
2fe2c602 | 340 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid); |
f007e99c WH |
341 | |
342 | return 0; | |
343 | } | |
344 | ||
263b5e86 | 345 | static int set_hpet_sid(struct irte *irte, u8 id) |
20f3097b SS |
346 | { |
347 | int i; | |
348 | u16 sid = 0; | |
349 | ||
350 | if (!irte) | |
351 | return -1; | |
352 | ||
3a5670e8 | 353 | down_read(&dmar_global_lock); |
20f3097b SS |
354 | for (i = 0; i < MAX_HPET_TBS; i++) { |
355 | if (ir_hpet[i].id == id) { | |
356 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | |
357 | break; | |
358 | } | |
359 | } | |
3a5670e8 | 360 | up_read(&dmar_global_lock); |
20f3097b SS |
361 | |
362 | if (sid == 0) { | |
363 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | |
364 | return -1; | |
365 | } | |
366 | ||
367 | /* | |
368 | * Should really use SQ_ALL_16. Some platforms are broken. | |
369 | * While we figure out the right quirks for these broken platforms, use | |
370 | * SQ_13_IGNORE_3 for now. | |
371 | */ | |
372 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
579305f7 AW |
377 | struct set_msi_sid_data { |
378 | struct pci_dev *pdev; | |
379 | u16 alias; | |
380 | }; | |
381 | ||
382 | static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) | |
383 | { | |
384 | struct set_msi_sid_data *data = opaque; | |
385 | ||
386 | data->pdev = pdev; | |
387 | data->alias = alias; | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
263b5e86 | 392 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
f007e99c | 393 | { |
579305f7 | 394 | struct set_msi_sid_data data; |
f007e99c WH |
395 | |
396 | if (!irte || !dev) | |
397 | return -1; | |
398 | ||
579305f7 | 399 | pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); |
f007e99c | 400 | |
579305f7 AW |
401 | /* |
402 | * DMA alias provides us with a PCI device and alias. The only case | |
403 | * where the it will return an alias on a different bus than the | |
404 | * device is the case of a PCIe-to-PCI bridge, where the alias is for | |
405 | * the subordinate bus. In this case we can only verify the bus. | |
406 | * | |
407 | * If the alias device is on a different bus than our source device | |
408 | * then we have a topology based alias, use it. | |
409 | * | |
410 | * Otherwise, the alias is for a device DMA quirk and we cannot | |
411 | * assume that MSI uses the same requester ID. Therefore use the | |
412 | * original device. | |
413 | */ | |
414 | if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) | |
415 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | |
416 | PCI_DEVID(PCI_BUS_NUM(data.alias), | |
417 | dev->bus->number)); | |
418 | else if (data.pdev->bus->number != dev->bus->number) | |
419 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); | |
420 | else | |
421 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | |
422 | PCI_DEVID(dev->bus->number, dev->devfn)); | |
f007e99c WH |
423 | |
424 | return 0; | |
425 | } | |
426 | ||
95a02e97 | 427 | static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) |
2ae21010 SS |
428 | { |
429 | u64 addr; | |
c416daa9 | 430 | u32 sts; |
2ae21010 SS |
431 | unsigned long flags; |
432 | ||
433 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
434 | ||
1f5b3c3f | 435 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
2ae21010 SS |
436 | |
437 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
438 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
439 | ||
440 | /* Set interrupt-remapping table pointer */ | |
f63ef690 | 441 | writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
442 | |
443 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
444 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
1f5b3c3f | 445 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
2ae21010 SS |
446 | |
447 | /* | |
448 | * global invalidation of interrupt entry cache before enabling | |
449 | * interrupt-remapping. | |
450 | */ | |
451 | qi_global_iec(iommu); | |
452 | ||
1f5b3c3f | 453 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
2ae21010 SS |
454 | |
455 | /* Enable interrupt-remapping */ | |
2ae21010 | 456 | iommu->gcmd |= DMA_GCMD_IRE; |
af8d102f | 457 | iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ |
c416daa9 | 458 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
459 | |
460 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
461 | readl, (sts & DMA_GSTS_IRES), sts); | |
462 | ||
af8d102f AL |
463 | /* |
464 | * With CFI clear in the Global Command register, we should be | |
465 | * protected from dangerous (i.e. compatibility) interrupts | |
466 | * regardless of x2apic status. Check just to be sure. | |
467 | */ | |
468 | if (sts & DMA_GSTS_CFIS) | |
469 | WARN(1, KERN_WARNING | |
470 | "Compatibility-format IRQs enabled despite intr remapping;\n" | |
471 | "you are vulnerable to IRQ injection.\n"); | |
472 | ||
1f5b3c3f | 473 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
2ae21010 SS |
474 | } |
475 | ||
476 | ||
95a02e97 | 477 | static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) |
2ae21010 SS |
478 | { |
479 | struct ir_table *ir_table; | |
480 | struct page *pages; | |
360eb3c5 | 481 | unsigned long *bitmap; |
2ae21010 SS |
482 | |
483 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | |
fa4b57cc | 484 | GFP_ATOMIC); |
2ae21010 SS |
485 | |
486 | if (!iommu->ir_table) | |
487 | return -ENOMEM; | |
488 | ||
824cd75b SS |
489 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
490 | INTR_REMAP_PAGE_ORDER); | |
2ae21010 SS |
491 | |
492 | if (!pages) { | |
360eb3c5 JL |
493 | pr_err("IR%d: failed to allocate pages of order %d\n", |
494 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); | |
2ae21010 SS |
495 | kfree(iommu->ir_table); |
496 | return -ENOMEM; | |
497 | } | |
498 | ||
360eb3c5 JL |
499 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), |
500 | sizeof(long), GFP_ATOMIC); | |
501 | if (bitmap == NULL) { | |
502 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); | |
503 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); | |
504 | kfree(ir_table); | |
505 | return -ENOMEM; | |
506 | } | |
507 | ||
2ae21010 | 508 | ir_table->base = page_address(pages); |
360eb3c5 | 509 | ir_table->bitmap = bitmap; |
2ae21010 | 510 | |
95a02e97 | 511 | iommu_set_irq_remapping(iommu, mode); |
2ae21010 SS |
512 | return 0; |
513 | } | |
514 | ||
eba67e5d SS |
515 | /* |
516 | * Disable Interrupt Remapping. | |
517 | */ | |
95a02e97 | 518 | static void iommu_disable_irq_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
519 | { |
520 | unsigned long flags; | |
521 | u32 sts; | |
522 | ||
523 | if (!ecap_ir_support(iommu->ecap)) | |
524 | return; | |
525 | ||
b24696bc FY |
526 | /* |
527 | * global invalidation of interrupt entry cache before disabling | |
528 | * interrupt-remapping. | |
529 | */ | |
530 | qi_global_iec(iommu); | |
531 | ||
1f5b3c3f | 532 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eba67e5d SS |
533 | |
534 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
535 | if (!(sts & DMA_GSTS_IRES)) | |
536 | goto end; | |
537 | ||
538 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
539 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
540 | ||
541 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
542 | readl, !(sts & DMA_GSTS_IRES), sts); | |
543 | ||
544 | end: | |
1f5b3c3f | 545 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eba67e5d SS |
546 | } |
547 | ||
41750d31 SS |
548 | static int __init dmar_x2apic_optout(void) |
549 | { | |
550 | struct acpi_table_dmar *dmar; | |
551 | dmar = (struct acpi_table_dmar *)dmar_tbl; | |
552 | if (!dmar || no_x2apic_optout) | |
553 | return 0; | |
554 | return dmar->flags & DMAR_X2APIC_OPT_OUT; | |
555 | } | |
556 | ||
95a02e97 | 557 | static int __init intel_irq_remapping_supported(void) |
93758238 WH |
558 | { |
559 | struct dmar_drhd_unit *drhd; | |
7c919779 | 560 | struct intel_iommu *iommu; |
93758238 | 561 | |
95a02e97 | 562 | if (disable_irq_remap) |
03ea8155 | 563 | return 0; |
03bbcb2e | 564 | if (irq_remap_broken) { |
05104a4e NH |
565 | printk(KERN_WARNING |
566 | "This system BIOS has enabled interrupt remapping\n" | |
567 | "on a chipset that contains an erratum making that\n" | |
568 | "feature unstable. To maintain system stability\n" | |
569 | "interrupt remapping is being disabled. Please\n" | |
570 | "contact your BIOS vendor for an update\n"); | |
571 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | |
03bbcb2e NH |
572 | disable_irq_remap = 1; |
573 | return 0; | |
574 | } | |
03ea8155 | 575 | |
074835f0 YS |
576 | if (!dmar_ir_support()) |
577 | return 0; | |
578 | ||
7c919779 | 579 | for_each_iommu(iommu, drhd) |
93758238 WH |
580 | if (!ecap_ir_support(iommu->ecap)) |
581 | return 0; | |
93758238 WH |
582 | |
583 | return 1; | |
584 | } | |
585 | ||
95a02e97 | 586 | static int __init intel_enable_irq_remapping(void) |
2ae21010 SS |
587 | { |
588 | struct dmar_drhd_unit *drhd; | |
7c919779 | 589 | struct intel_iommu *iommu; |
af8d102f | 590 | bool x2apic_present; |
2ae21010 | 591 | int setup = 0; |
41750d31 | 592 | int eim = 0; |
2ae21010 | 593 | |
af8d102f AL |
594 | x2apic_present = x2apic_supported(); |
595 | ||
e936d077 YS |
596 | if (parse_ioapics_under_ir() != 1) { |
597 | printk(KERN_INFO "Not enable interrupt remapping\n"); | |
af8d102f | 598 | goto error; |
e936d077 YS |
599 | } |
600 | ||
af8d102f | 601 | if (x2apic_present) { |
b977e73a JL |
602 | pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); |
603 | ||
41750d31 | 604 | eim = !dmar_x2apic_optout(); |
af8d102f AL |
605 | if (!eim) |
606 | printk(KERN_WARNING | |
607 | "Your BIOS is broken and requested that x2apic be disabled.\n" | |
608 | "This will slightly decrease performance.\n" | |
609 | "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); | |
41750d31 SS |
610 | } |
611 | ||
7c919779 | 612 | for_each_iommu(iommu, drhd) { |
34aaaa94 HW |
613 | /* |
614 | * If the queued invalidation is already initialized, | |
615 | * shouldn't disable it. | |
616 | */ | |
617 | if (iommu->qi) | |
618 | continue; | |
619 | ||
1531a6a6 SS |
620 | /* |
621 | * Clear previous faults. | |
622 | */ | |
623 | dmar_fault(-1, iommu); | |
624 | ||
625 | /* | |
626 | * Disable intr remapping and queued invalidation, if already | |
627 | * enabled prior to OS handover. | |
628 | */ | |
95a02e97 | 629 | iommu_disable_irq_remapping(iommu); |
1531a6a6 SS |
630 | |
631 | dmar_disable_qi(iommu); | |
632 | } | |
633 | ||
2ae21010 SS |
634 | /* |
635 | * check for the Interrupt-remapping support | |
636 | */ | |
7c919779 | 637 | for_each_iommu(iommu, drhd) { |
2ae21010 SS |
638 | if (!ecap_ir_support(iommu->ecap)) |
639 | continue; | |
640 | ||
641 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
642 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | |
643 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
af8d102f | 644 | goto error; |
2ae21010 SS |
645 | } |
646 | } | |
647 | ||
648 | /* | |
649 | * Enable queued invalidation for all the DRHD's. | |
650 | */ | |
7c919779 JL |
651 | for_each_iommu(iommu, drhd) { |
652 | int ret = dmar_enable_qi(iommu); | |
2ae21010 SS |
653 | |
654 | if (ret) { | |
655 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | |
656 | " invalidation, ecap %Lx, ret %d\n", | |
657 | drhd->reg_base_addr, iommu->ecap, ret); | |
af8d102f | 658 | goto error; |
2ae21010 SS |
659 | } |
660 | } | |
661 | ||
662 | /* | |
663 | * Setup Interrupt-remapping for all the DRHD's now. | |
664 | */ | |
7c919779 | 665 | for_each_iommu(iommu, drhd) { |
2ae21010 SS |
666 | if (!ecap_ir_support(iommu->ecap)) |
667 | continue; | |
668 | ||
95a02e97 | 669 | if (intel_setup_irq_remapping(iommu, eim)) |
2ae21010 SS |
670 | goto error; |
671 | ||
672 | setup = 1; | |
673 | } | |
674 | ||
675 | if (!setup) | |
676 | goto error; | |
677 | ||
95a02e97 | 678 | irq_remapping_enabled = 1; |
afcc8a40 JR |
679 | |
680 | /* | |
681 | * VT-d has a different layout for IO-APIC entries when | |
682 | * interrupt remapping is enabled. So it needs a special routine | |
683 | * to print IO-APIC entries for debugging purposes too. | |
684 | */ | |
685 | x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; | |
686 | ||
41750d31 | 687 | pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); |
2ae21010 | 688 | |
41750d31 | 689 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
2ae21010 SS |
690 | |
691 | error: | |
692 | /* | |
693 | * handle error condition gracefully here! | |
694 | */ | |
af8d102f AL |
695 | |
696 | if (x2apic_present) | |
d01140df | 697 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); |
af8d102f | 698 | |
2ae21010 SS |
699 | return -1; |
700 | } | |
ad3ad3f6 | 701 | |
20f3097b SS |
702 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
703 | struct intel_iommu *iommu) | |
704 | { | |
705 | struct acpi_dmar_pci_path *path; | |
706 | u8 bus; | |
707 | int count; | |
708 | ||
709 | bus = scope->bus; | |
710 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
711 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
712 | / sizeof(struct acpi_dmar_pci_path); | |
713 | ||
714 | while (--count > 0) { | |
715 | /* | |
716 | * Access PCI directly due to the PCI | |
717 | * subsystem isn't initialized yet. | |
718 | */ | |
fa5f508f | 719 | bus = read_pci_config_byte(bus, path->device, path->function, |
20f3097b SS |
720 | PCI_SECONDARY_BUS); |
721 | path++; | |
722 | } | |
723 | ir_hpet[ir_hpet_num].bus = bus; | |
fa5f508f | 724 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); |
20f3097b SS |
725 | ir_hpet[ir_hpet_num].iommu = iommu; |
726 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | |
727 | ir_hpet_num++; | |
728 | } | |
729 | ||
f007e99c WH |
730 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
731 | struct intel_iommu *iommu) | |
732 | { | |
733 | struct acpi_dmar_pci_path *path; | |
734 | u8 bus; | |
735 | int count; | |
736 | ||
737 | bus = scope->bus; | |
738 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
739 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
740 | / sizeof(struct acpi_dmar_pci_path); | |
741 | ||
742 | while (--count > 0) { | |
743 | /* | |
744 | * Access PCI directly due to the PCI | |
745 | * subsystem isn't initialized yet. | |
746 | */ | |
fa5f508f | 747 | bus = read_pci_config_byte(bus, path->device, path->function, |
f007e99c WH |
748 | PCI_SECONDARY_BUS); |
749 | path++; | |
750 | } | |
751 | ||
752 | ir_ioapic[ir_ioapic_num].bus = bus; | |
fa5f508f | 753 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); |
f007e99c WH |
754 | ir_ioapic[ir_ioapic_num].iommu = iommu; |
755 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | |
756 | ir_ioapic_num++; | |
757 | } | |
758 | ||
20f3097b SS |
759 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
760 | struct intel_iommu *iommu) | |
ad3ad3f6 SS |
761 | { |
762 | struct acpi_dmar_hardware_unit *drhd; | |
763 | struct acpi_dmar_device_scope *scope; | |
764 | void *start, *end; | |
765 | ||
766 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
767 | ||
768 | start = (void *)(drhd + 1); | |
769 | end = ((void *)drhd) + header->length; | |
770 | ||
771 | while (start < end) { | |
772 | scope = start; | |
773 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | |
774 | if (ir_ioapic_num == MAX_IO_APICS) { | |
775 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | |
776 | return -1; | |
777 | } | |
778 | ||
680a7524 YL |
779 | printk(KERN_INFO "IOAPIC id %d under DRHD base " |
780 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | |
781 | drhd->address, iommu->seq_id); | |
ad3ad3f6 | 782 | |
f007e99c | 783 | ir_parse_one_ioapic_scope(scope, iommu); |
20f3097b SS |
784 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { |
785 | if (ir_hpet_num == MAX_HPET_TBS) { | |
786 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | |
787 | return -1; | |
788 | } | |
789 | ||
790 | printk(KERN_INFO "HPET id %d under DRHD base" | |
791 | " 0x%Lx\n", scope->enumeration_id, | |
792 | drhd->address); | |
793 | ||
794 | ir_parse_one_hpet_scope(scope, iommu); | |
ad3ad3f6 SS |
795 | } |
796 | start += scope->length; | |
797 | } | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | /* | |
803 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
804 | * hardware unit. | |
805 | */ | |
694835dc | 806 | static int __init parse_ioapics_under_ir(void) |
ad3ad3f6 SS |
807 | { |
808 | struct dmar_drhd_unit *drhd; | |
7c919779 | 809 | struct intel_iommu *iommu; |
ad3ad3f6 | 810 | int ir_supported = 0; |
32ab31e0 | 811 | int ioapic_idx; |
ad3ad3f6 | 812 | |
7c919779 | 813 | for_each_iommu(iommu, drhd) |
ad3ad3f6 | 814 | if (ecap_ir_support(iommu->ecap)) { |
20f3097b | 815 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
ad3ad3f6 SS |
816 | return -1; |
817 | ||
818 | ir_supported = 1; | |
819 | } | |
ad3ad3f6 | 820 | |
32ab31e0 SF |
821 | if (!ir_supported) |
822 | return 0; | |
823 | ||
824 | for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { | |
825 | int ioapic_id = mpc_ioapic_id(ioapic_idx); | |
826 | if (!map_ioapic_to_ir(ioapic_id)) { | |
827 | pr_err(FW_BUG "ioapic %d has no mapping iommu, " | |
828 | "interrupt remapping will be disabled\n", | |
829 | ioapic_id); | |
830 | return -1; | |
831 | } | |
ad3ad3f6 SS |
832 | } |
833 | ||
32ab31e0 | 834 | return 1; |
ad3ad3f6 | 835 | } |
b24696bc | 836 | |
6a7885c4 | 837 | static int __init ir_dev_scope_init(void) |
c2c7286a | 838 | { |
3a5670e8 JL |
839 | int ret; |
840 | ||
95a02e97 | 841 | if (!irq_remapping_enabled) |
c2c7286a SS |
842 | return 0; |
843 | ||
3a5670e8 JL |
844 | down_write(&dmar_global_lock); |
845 | ret = dmar_dev_scope_init(); | |
846 | up_write(&dmar_global_lock); | |
847 | ||
848 | return ret; | |
c2c7286a SS |
849 | } |
850 | rootfs_initcall(ir_dev_scope_init); | |
851 | ||
95a02e97 | 852 | static void disable_irq_remapping(void) |
b24696bc FY |
853 | { |
854 | struct dmar_drhd_unit *drhd; | |
855 | struct intel_iommu *iommu = NULL; | |
856 | ||
857 | /* | |
858 | * Disable Interrupt-remapping for all the DRHD's now. | |
859 | */ | |
860 | for_each_iommu(iommu, drhd) { | |
861 | if (!ecap_ir_support(iommu->ecap)) | |
862 | continue; | |
863 | ||
95a02e97 | 864 | iommu_disable_irq_remapping(iommu); |
b24696bc FY |
865 | } |
866 | } | |
867 | ||
95a02e97 | 868 | static int reenable_irq_remapping(int eim) |
b24696bc FY |
869 | { |
870 | struct dmar_drhd_unit *drhd; | |
871 | int setup = 0; | |
872 | struct intel_iommu *iommu = NULL; | |
873 | ||
874 | for_each_iommu(iommu, drhd) | |
875 | if (iommu->qi) | |
876 | dmar_reenable_qi(iommu); | |
877 | ||
878 | /* | |
879 | * Setup Interrupt-remapping for all the DRHD's now. | |
880 | */ | |
881 | for_each_iommu(iommu, drhd) { | |
882 | if (!ecap_ir_support(iommu->ecap)) | |
883 | continue; | |
884 | ||
885 | /* Set up interrupt remapping for iommu.*/ | |
95a02e97 | 886 | iommu_set_irq_remapping(iommu, eim); |
b24696bc FY |
887 | setup = 1; |
888 | } | |
889 | ||
890 | if (!setup) | |
891 | goto error; | |
892 | ||
893 | return 0; | |
894 | ||
895 | error: | |
896 | /* | |
897 | * handle error condition gracefully here! | |
898 | */ | |
899 | return -1; | |
900 | } | |
901 | ||
0c3f173a JR |
902 | static void prepare_irte(struct irte *irte, int vector, |
903 | unsigned int dest) | |
904 | { | |
905 | memset(irte, 0, sizeof(*irte)); | |
906 | ||
907 | irte->present = 1; | |
908 | irte->dst_mode = apic->irq_dest_mode; | |
909 | /* | |
910 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the | |
911 | * actual level or edge trigger will be setup in the IO-APIC | |
912 | * RTE. This will help simplify level triggered irq migration. | |
913 | * For more details, see the comments (in io_apic.c) explainig IO-APIC | |
914 | * irq migration in the presence of interrupt-remapping. | |
915 | */ | |
916 | irte->trigger_mode = 0; | |
917 | irte->dlvry_mode = apic->irq_delivery_mode; | |
918 | irte->vector = vector; | |
919 | irte->dest_id = IRTE_DEST(dest); | |
920 | irte->redir_hint = 1; | |
921 | } | |
922 | ||
923 | static int intel_setup_ioapic_entry(int irq, | |
924 | struct IO_APIC_route_entry *route_entry, | |
925 | unsigned int destination, int vector, | |
926 | struct io_apic_irq_attr *attr) | |
927 | { | |
928 | int ioapic_id = mpc_ioapic_id(attr->ioapic); | |
3a5670e8 | 929 | struct intel_iommu *iommu; |
0c3f173a JR |
930 | struct IR_IO_APIC_route_entry *entry; |
931 | struct irte irte; | |
932 | int index; | |
933 | ||
3a5670e8 JL |
934 | down_read(&dmar_global_lock); |
935 | iommu = map_ioapic_to_ir(ioapic_id); | |
0c3f173a JR |
936 | if (!iommu) { |
937 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); | |
3a5670e8 JL |
938 | index = -ENODEV; |
939 | } else { | |
940 | index = alloc_irte(iommu, irq, 1); | |
941 | if (index < 0) { | |
942 | pr_warn("Failed to allocate IRTE for ioapic %d\n", | |
943 | ioapic_id); | |
944 | index = -ENOMEM; | |
945 | } | |
0c3f173a | 946 | } |
3a5670e8 JL |
947 | up_read(&dmar_global_lock); |
948 | if (index < 0) | |
949 | return index; | |
0c3f173a JR |
950 | |
951 | prepare_irte(&irte, vector, destination); | |
952 | ||
953 | /* Set source-id of interrupt request */ | |
954 | set_ioapic_sid(&irte, ioapic_id); | |
955 | ||
956 | modify_irte(irq, &irte); | |
957 | ||
958 | apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " | |
959 | "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " | |
960 | "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " | |
961 | "Avail:%X Vector:%02X Dest:%08X " | |
962 | "SID:%04X SQ:%X SVT:%X)\n", | |
963 | attr->ioapic, irte.present, irte.fpd, irte.dst_mode, | |
964 | irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, | |
965 | irte.avail, irte.vector, irte.dest_id, | |
966 | irte.sid, irte.sq, irte.svt); | |
967 | ||
3a5670e8 | 968 | entry = (struct IR_IO_APIC_route_entry *)route_entry; |
0c3f173a JR |
969 | memset(entry, 0, sizeof(*entry)); |
970 | ||
971 | entry->index2 = (index >> 15) & 0x1; | |
972 | entry->zero = 0; | |
973 | entry->format = 1; | |
974 | entry->index = (index & 0x7fff); | |
975 | /* | |
976 | * IO-APIC RTE will be configured with virtual vector. | |
977 | * irq handler will do the explicit EOI to the io-apic. | |
978 | */ | |
979 | entry->vector = attr->ioapic_pin; | |
980 | entry->mask = 0; /* enable IRQ */ | |
981 | entry->trigger = attr->trigger; | |
982 | entry->polarity = attr->polarity; | |
983 | ||
984 | /* Mask level triggered irqs. | |
985 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | |
986 | */ | |
987 | if (attr->trigger) | |
988 | entry->mask = 1; | |
989 | ||
990 | return 0; | |
991 | } | |
992 | ||
4c1bad6a JR |
993 | /* |
994 | * Migrate the IO-APIC irq in the presence of intr-remapping. | |
995 | * | |
996 | * For both level and edge triggered, irq migration is a simple atomic | |
997 | * update(of vector and cpu destination) of IRTE and flush the hardware cache. | |
998 | * | |
999 | * For level triggered, we eliminate the io-apic RTE modification (with the | |
1000 | * updated vector information), by using a virtual vector (io-apic pin number). | |
1001 | * Real vector that is used for interrupting cpu will be coming from | |
1002 | * the interrupt-remapping table entry. | |
1003 | * | |
1004 | * As the migration is a simple atomic update of IRTE, the same mechanism | |
1005 | * is used to migrate MSI irq's in the presence of interrupt-remapping. | |
1006 | */ | |
1007 | static int | |
1008 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |
1009 | bool force) | |
1010 | { | |
1011 | struct irq_cfg *cfg = data->chip_data; | |
1012 | unsigned int dest, irq = data->irq; | |
1013 | struct irte irte; | |
ff164324 | 1014 | int err; |
4c1bad6a | 1015 | |
7eb9ae07 SS |
1016 | if (!config_enabled(CONFIG_SMP)) |
1017 | return -EINVAL; | |
1018 | ||
4c1bad6a JR |
1019 | if (!cpumask_intersects(mask, cpu_online_mask)) |
1020 | return -EINVAL; | |
1021 | ||
1022 | if (get_irte(irq, &irte)) | |
1023 | return -EBUSY; | |
1024 | ||
ff164324 AG |
1025 | err = assign_irq_vector(irq, cfg, mask); |
1026 | if (err) | |
1027 | return err; | |
4c1bad6a | 1028 | |
ff164324 AG |
1029 | err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); |
1030 | if (err) { | |
ed88bed8 | 1031 | if (assign_irq_vector(irq, cfg, data->affinity)) |
ff164324 AG |
1032 | pr_err("Failed to recover vector for irq %d\n", irq); |
1033 | return err; | |
1034 | } | |
4c1bad6a JR |
1035 | |
1036 | irte.vector = cfg->vector; | |
1037 | irte.dest_id = IRTE_DEST(dest); | |
1038 | ||
1039 | /* | |
1040 | * Atomically updates the IRTE with the new destination, vector | |
1041 | * and flushes the interrupt entry cache. | |
1042 | */ | |
1043 | modify_irte(irq, &irte); | |
1044 | ||
1045 | /* | |
1046 | * After this point, all the interrupts will start arriving | |
1047 | * at the new destination. So, time to cleanup the previous | |
1048 | * vector allocation. | |
1049 | */ | |
1050 | if (cfg->move_in_progress) | |
1051 | send_cleanup_vector(cfg); | |
1052 | ||
1053 | cpumask_copy(data->affinity, mask); | |
1054 | return 0; | |
1055 | } | |
0c3f173a | 1056 | |
5e2b930b JR |
1057 | static void intel_compose_msi_msg(struct pci_dev *pdev, |
1058 | unsigned int irq, unsigned int dest, | |
1059 | struct msi_msg *msg, u8 hpet_id) | |
1060 | { | |
1061 | struct irq_cfg *cfg; | |
1062 | struct irte irte; | |
c558df4a | 1063 | u16 sub_handle = 0; |
5e2b930b JR |
1064 | int ir_index; |
1065 | ||
1066 | cfg = irq_get_chip_data(irq); | |
1067 | ||
1068 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | |
1069 | BUG_ON(ir_index == -1); | |
1070 | ||
1071 | prepare_irte(&irte, cfg->vector, dest); | |
1072 | ||
1073 | /* Set source-id of interrupt request */ | |
1074 | if (pdev) | |
1075 | set_msi_sid(&irte, pdev); | |
1076 | else | |
1077 | set_hpet_sid(&irte, hpet_id); | |
1078 | ||
1079 | modify_irte(irq, &irte); | |
1080 | ||
1081 | msg->address_hi = MSI_ADDR_BASE_HI; | |
1082 | msg->data = sub_handle; | |
1083 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | | |
1084 | MSI_ADDR_IR_SHV | | |
1085 | MSI_ADDR_IR_INDEX1(ir_index) | | |
1086 | MSI_ADDR_IR_INDEX2(ir_index); | |
1087 | } | |
1088 | ||
1089 | /* | |
1090 | * Map the PCI dev to the corresponding remapping hardware unit | |
1091 | * and allocate 'nvec' consecutive interrupt-remapping table entries | |
1092 | * in it. | |
1093 | */ | |
1094 | static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) | |
1095 | { | |
1096 | struct intel_iommu *iommu; | |
1097 | int index; | |
1098 | ||
3a5670e8 | 1099 | down_read(&dmar_global_lock); |
5e2b930b JR |
1100 | iommu = map_dev_to_ir(dev); |
1101 | if (!iommu) { | |
1102 | printk(KERN_ERR | |
1103 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | |
3a5670e8 JL |
1104 | index = -ENOENT; |
1105 | } else { | |
1106 | index = alloc_irte(iommu, irq, nvec); | |
1107 | if (index < 0) { | |
1108 | printk(KERN_ERR | |
1109 | "Unable to allocate %d IRTE for PCI %s\n", | |
1110 | nvec, pci_name(dev)); | |
1111 | index = -ENOSPC; | |
1112 | } | |
5e2b930b | 1113 | } |
3a5670e8 | 1114 | up_read(&dmar_global_lock); |
5e2b930b | 1115 | |
5e2b930b JR |
1116 | return index; |
1117 | } | |
1118 | ||
1119 | static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |
1120 | int index, int sub_handle) | |
1121 | { | |
1122 | struct intel_iommu *iommu; | |
3a5670e8 | 1123 | int ret = -ENOENT; |
5e2b930b | 1124 | |
3a5670e8 | 1125 | down_read(&dmar_global_lock); |
5e2b930b | 1126 | iommu = map_dev_to_ir(pdev); |
3a5670e8 JL |
1127 | if (iommu) { |
1128 | /* | |
1129 | * setup the mapping between the irq and the IRTE | |
1130 | * base index, the sub_handle pointing to the | |
1131 | * appropriate interrupt remap table entry. | |
1132 | */ | |
1133 | set_irte_irq(irq, iommu, index, sub_handle); | |
1134 | ret = 0; | |
1135 | } | |
1136 | up_read(&dmar_global_lock); | |
5e2b930b | 1137 | |
3a5670e8 | 1138 | return ret; |
5e2b930b JR |
1139 | } |
1140 | ||
5fc24d8c | 1141 | static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) |
5e2b930b | 1142 | { |
3a5670e8 JL |
1143 | int ret = -1; |
1144 | struct intel_iommu *iommu; | |
5e2b930b JR |
1145 | int index; |
1146 | ||
3a5670e8 JL |
1147 | down_read(&dmar_global_lock); |
1148 | iommu = map_hpet_to_ir(id); | |
1149 | if (iommu) { | |
1150 | index = alloc_irte(iommu, irq, 1); | |
1151 | if (index >= 0) | |
1152 | ret = 0; | |
1153 | } | |
1154 | up_read(&dmar_global_lock); | |
5e2b930b | 1155 | |
3a5670e8 | 1156 | return ret; |
5e2b930b JR |
1157 | } |
1158 | ||
736baef4 | 1159 | struct irq_remap_ops intel_irq_remap_ops = { |
95a02e97 SS |
1160 | .supported = intel_irq_remapping_supported, |
1161 | .prepare = dmar_table_init, | |
1162 | .enable = intel_enable_irq_remapping, | |
1163 | .disable = disable_irq_remapping, | |
1164 | .reenable = reenable_irq_remapping, | |
4f3d8b67 | 1165 | .enable_faulting = enable_drhd_fault_handling, |
0c3f173a | 1166 | .setup_ioapic_entry = intel_setup_ioapic_entry, |
4c1bad6a | 1167 | .set_affinity = intel_ioapic_set_affinity, |
9d619f65 | 1168 | .free_irq = free_irte, |
5e2b930b JR |
1169 | .compose_msi_msg = intel_compose_msi_msg, |
1170 | .msi_alloc_irq = intel_msi_alloc_irq, | |
1171 | .msi_setup_irq = intel_msi_setup_irq, | |
5fc24d8c | 1172 | .alloc_hpet_msi = intel_alloc_hpet_msi, |
736baef4 | 1173 | }; |
6b197249 JL |
1174 | |
1175 | int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | |
1176 | { | |
1177 | return irq_remapping_enabled ? -ENOSYS : 0; | |
1178 | } |