]>
Commit | Line | Data |
---|---|---|
9f10e5bf JR |
1 | |
2 | #define pr_fmt(fmt) "DMAR-IR: " fmt | |
3 | ||
5aeecaf4 | 4 | #include <linux/interrupt.h> |
ad3ad3f6 | 5 | #include <linux/dmar.h> |
2ae21010 | 6 | #include <linux/spinlock.h> |
5a0e3ad6 | 7 | #include <linux/slab.h> |
2ae21010 | 8 | #include <linux/jiffies.h> |
20f3097b | 9 | #include <linux/hpet.h> |
2ae21010 | 10 | #include <linux/pci.h> |
b6fcb33a | 11 | #include <linux/irq.h> |
8b48463f LZ |
12 | #include <linux/intel-iommu.h> |
13 | #include <linux/acpi.h> | |
ad3ad3f6 | 14 | #include <asm/io_apic.h> |
17483a1f | 15 | #include <asm/smp.h> |
6d652ea1 | 16 | #include <asm/cpu.h> |
8a8f422d | 17 | #include <asm/irq_remapping.h> |
f007e99c | 18 | #include <asm/pci-direct.h> |
5e2b930b | 19 | #include <asm/msidef.h> |
ad3ad3f6 | 20 | |
8a8f422d | 21 | #include "irq_remapping.h" |
736baef4 | 22 | |
eef93fdb JR |
23 | struct ioapic_scope { |
24 | struct intel_iommu *iommu; | |
25 | unsigned int id; | |
26 | unsigned int bus; /* PCI bus number */ | |
27 | unsigned int devfn; /* PCI devfn number */ | |
28 | }; | |
29 | ||
30 | struct hpet_scope { | |
31 | struct intel_iommu *iommu; | |
32 | u8 id; | |
33 | unsigned int bus; | |
34 | unsigned int devfn; | |
35 | }; | |
36 | ||
37 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | |
13d09b66 | 38 | #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8) |
eef93fdb | 39 | |
13d09b66 | 40 | static int __read_mostly eim_mode; |
ad3ad3f6 | 41 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
20f3097b | 42 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
d1423d56 | 43 | |
3a5670e8 JL |
44 | /* |
45 | * Lock ordering: | |
46 | * ->dmar_global_lock | |
47 | * ->irq_2_ir_lock | |
48 | * ->qi->q_lock | |
49 | * ->iommu->register_lock | |
50 | * Note: | |
51 | * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called | |
52 | * in single-threaded environment with interrupt disabled, so no need to tabke | |
53 | * the dmar_global_lock. | |
54 | */ | |
96f8e98b | 55 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
d585d060 | 56 | |
694835dc JL |
57 | static int __init parse_ioapics_under_ir(void); |
58 | ||
e420dfb4 YL |
59 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
60 | { | |
91411da1 | 61 | struct irq_cfg *cfg = irq_cfg(irq); |
349d6767 | 62 | return cfg ? &cfg->irq_2_iommu : NULL; |
0b8f1efa YL |
63 | } |
64 | ||
6a7885c4 | 65 | static int get_irte(int irq, struct irte *entry) |
b6fcb33a | 66 | { |
d585d060 | 67 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 68 | unsigned long flags; |
d585d060 | 69 | int index; |
b6fcb33a | 70 | |
d585d060 | 71 | if (!entry || !irq_iommu) |
b6fcb33a SS |
72 | return -1; |
73 | ||
96f8e98b | 74 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 75 | |
af437469 GE |
76 | if (unlikely(!irq_iommu->iommu)) { |
77 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | |
78 | return -1; | |
79 | } | |
80 | ||
e420dfb4 YL |
81 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
82 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 83 | |
96f8e98b | 84 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
85 | return 0; |
86 | } | |
87 | ||
263b5e86 | 88 | static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
b6fcb33a SS |
89 | { |
90 | struct ir_table *table = iommu->ir_table; | |
d585d060 | 91 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
91411da1 | 92 | struct irq_cfg *cfg = irq_cfg(irq); |
b6fcb33a | 93 | unsigned int mask = 0; |
4c5502b1 | 94 | unsigned long flags; |
9f4c7448 | 95 | int index; |
b6fcb33a | 96 | |
d585d060 | 97 | if (!count || !irq_iommu) |
e420dfb4 | 98 | return -1; |
e420dfb4 | 99 | |
b6fcb33a SS |
100 | if (count > 1) { |
101 | count = __roundup_pow_of_two(count); | |
102 | mask = ilog2(count); | |
103 | } | |
104 | ||
105 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
9f10e5bf | 106 | pr_err("Requested mask %x exceeds the max invalidation handle" |
b6fcb33a SS |
107 | " mask value %Lx\n", mask, |
108 | ecap_max_handle_mask(iommu->ecap)); | |
109 | return -1; | |
110 | } | |
111 | ||
96f8e98b | 112 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
360eb3c5 JL |
113 | index = bitmap_find_free_region(table->bitmap, |
114 | INTR_REMAP_TABLE_ENTRIES, mask); | |
115 | if (index < 0) { | |
116 | pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); | |
117 | } else { | |
118 | cfg->remapped = 1; | |
119 | irq_iommu->iommu = iommu; | |
120 | irq_iommu->irte_index = index; | |
121 | irq_iommu->sub_handle = 0; | |
122 | irq_iommu->irte_mask = mask; | |
123 | } | |
96f8e98b | 124 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
125 | |
126 | return index; | |
127 | } | |
128 | ||
704126ad | 129 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
130 | { |
131 | struct qi_desc desc; | |
132 | ||
133 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
134 | | QI_IEC_SELECTIVE; | |
135 | desc.high = 0; | |
136 | ||
704126ad | 137 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
138 | } |
139 | ||
263b5e86 | 140 | static int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
b6fcb33a | 141 | { |
d585d060 | 142 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 143 | unsigned long flags; |
d585d060 | 144 | int index; |
b6fcb33a | 145 | |
d585d060 | 146 | if (!irq_iommu) |
b6fcb33a | 147 | return -1; |
b6fcb33a | 148 | |
96f8e98b | 149 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
150 | *sub_handle = irq_iommu->sub_handle; |
151 | index = irq_iommu->irte_index; | |
96f8e98b | 152 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
153 | return index; |
154 | } | |
155 | ||
263b5e86 | 156 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
b6fcb33a | 157 | { |
d585d060 | 158 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
91411da1 | 159 | struct irq_cfg *cfg = irq_cfg(irq); |
4c5502b1 | 160 | unsigned long flags; |
e420dfb4 | 161 | |
d585d060 | 162 | if (!irq_iommu) |
0b8f1efa | 163 | return -1; |
d585d060 | 164 | |
96f8e98b | 165 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
0b8f1efa | 166 | |
9b1b0e42 | 167 | cfg->remapped = 1; |
e420dfb4 YL |
168 | irq_iommu->iommu = iommu; |
169 | irq_iommu->irte_index = index; | |
170 | irq_iommu->sub_handle = subhandle; | |
171 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 172 | |
96f8e98b | 173 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
174 | |
175 | return 0; | |
176 | } | |
177 | ||
263b5e86 | 178 | static int modify_irte(int irq, struct irte *irte_modified) |
b6fcb33a | 179 | { |
d585d060 | 180 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
b6fcb33a | 181 | struct intel_iommu *iommu; |
4c5502b1 | 182 | unsigned long flags; |
d585d060 TG |
183 | struct irte *irte; |
184 | int rc, index; | |
b6fcb33a | 185 | |
d585d060 | 186 | if (!irq_iommu) |
b6fcb33a | 187 | return -1; |
d585d060 | 188 | |
96f8e98b | 189 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 190 | |
e420dfb4 | 191 | iommu = irq_iommu->iommu; |
b6fcb33a | 192 | |
e420dfb4 | 193 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
194 | irte = &iommu->ir_table->base[index]; |
195 | ||
c513b67e LT |
196 | set_64bit(&irte->low, irte_modified->low); |
197 | set_64bit(&irte->high, irte_modified->high); | |
b6fcb33a SS |
198 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
199 | ||
704126ad | 200 | rc = qi_flush_iec(iommu, index, 0); |
96f8e98b | 201 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
202 | |
203 | return rc; | |
b6fcb33a SS |
204 | } |
205 | ||
263b5e86 | 206 | static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
20f3097b SS |
207 | { |
208 | int i; | |
209 | ||
210 | for (i = 0; i < MAX_HPET_TBS; i++) | |
a7a3dad9 | 211 | if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) |
20f3097b SS |
212 | return ir_hpet[i].iommu; |
213 | return NULL; | |
214 | } | |
215 | ||
263b5e86 | 216 | static struct intel_iommu *map_ioapic_to_ir(int apic) |
89027d35 SS |
217 | { |
218 | int i; | |
219 | ||
220 | for (i = 0; i < MAX_IO_APICS; i++) | |
a7a3dad9 | 221 | if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) |
89027d35 SS |
222 | return ir_ioapic[i].iommu; |
223 | return NULL; | |
224 | } | |
225 | ||
263b5e86 | 226 | static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
75c46fa6 SS |
227 | { |
228 | struct dmar_drhd_unit *drhd; | |
229 | ||
230 | drhd = dmar_find_matched_drhd_unit(dev); | |
231 | if (!drhd) | |
232 | return NULL; | |
233 | ||
234 | return drhd->iommu; | |
235 | } | |
236 | ||
c4658b4e WH |
237 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
238 | { | |
239 | struct irte *start, *entry, *end; | |
240 | struct intel_iommu *iommu; | |
241 | int index; | |
242 | ||
243 | if (irq_iommu->sub_handle) | |
244 | return 0; | |
245 | ||
246 | iommu = irq_iommu->iommu; | |
247 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
248 | ||
249 | start = iommu->ir_table->base + index; | |
250 | end = start + (1 << irq_iommu->irte_mask); | |
251 | ||
252 | for (entry = start; entry < end; entry++) { | |
c513b67e LT |
253 | set_64bit(&entry->low, 0); |
254 | set_64bit(&entry->high, 0); | |
c4658b4e | 255 | } |
360eb3c5 JL |
256 | bitmap_release_region(iommu->ir_table->bitmap, index, |
257 | irq_iommu->irte_mask); | |
c4658b4e WH |
258 | |
259 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
260 | } | |
261 | ||
9d619f65 | 262 | static int free_irte(int irq) |
b6fcb33a | 263 | { |
d585d060 | 264 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 265 | unsigned long flags; |
d585d060 | 266 | int rc; |
b6fcb33a | 267 | |
d585d060 | 268 | if (!irq_iommu) |
b6fcb33a | 269 | return -1; |
d585d060 | 270 | |
96f8e98b | 271 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 272 | |
c4658b4e | 273 | rc = clear_entries(irq_iommu); |
b6fcb33a | 274 | |
e420dfb4 YL |
275 | irq_iommu->iommu = NULL; |
276 | irq_iommu->irte_index = 0; | |
277 | irq_iommu->sub_handle = 0; | |
278 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 279 | |
96f8e98b | 280 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 281 | |
704126ad | 282 | return rc; |
b6fcb33a SS |
283 | } |
284 | ||
f007e99c WH |
285 | /* |
286 | * source validation type | |
287 | */ | |
288 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | |
25985edc | 289 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ |
f007e99c WH |
290 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ |
291 | ||
292 | /* | |
293 | * source-id qualifier | |
294 | */ | |
295 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | |
296 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | |
297 | * the third least significant bit | |
298 | */ | |
299 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | |
300 | * the second and third least significant bits | |
301 | */ | |
302 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | |
303 | * the least three significant bits | |
304 | */ | |
305 | ||
306 | /* | |
307 | * set SVT, SQ and SID fields of irte to verify | |
308 | * source ids of interrupt requests | |
309 | */ | |
310 | static void set_irte_sid(struct irte *irte, unsigned int svt, | |
311 | unsigned int sq, unsigned int sid) | |
312 | { | |
d1423d56 CW |
313 | if (disable_sourceid_checking) |
314 | svt = SVT_NO_VERIFY; | |
f007e99c WH |
315 | irte->svt = svt; |
316 | irte->sq = sq; | |
317 | irte->sid = sid; | |
318 | } | |
319 | ||
263b5e86 | 320 | static int set_ioapic_sid(struct irte *irte, int apic) |
f007e99c WH |
321 | { |
322 | int i; | |
323 | u16 sid = 0; | |
324 | ||
325 | if (!irte) | |
326 | return -1; | |
327 | ||
3a5670e8 | 328 | down_read(&dmar_global_lock); |
f007e99c | 329 | for (i = 0; i < MAX_IO_APICS; i++) { |
a7a3dad9 | 330 | if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { |
f007e99c WH |
331 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; |
332 | break; | |
333 | } | |
334 | } | |
3a5670e8 | 335 | up_read(&dmar_global_lock); |
f007e99c WH |
336 | |
337 | if (sid == 0) { | |
9f10e5bf | 338 | pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic); |
f007e99c WH |
339 | return -1; |
340 | } | |
341 | ||
2fe2c602 | 342 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid); |
f007e99c WH |
343 | |
344 | return 0; | |
345 | } | |
346 | ||
263b5e86 | 347 | static int set_hpet_sid(struct irte *irte, u8 id) |
20f3097b SS |
348 | { |
349 | int i; | |
350 | u16 sid = 0; | |
351 | ||
352 | if (!irte) | |
353 | return -1; | |
354 | ||
3a5670e8 | 355 | down_read(&dmar_global_lock); |
20f3097b | 356 | for (i = 0; i < MAX_HPET_TBS; i++) { |
a7a3dad9 | 357 | if (ir_hpet[i].iommu && ir_hpet[i].id == id) { |
20f3097b SS |
358 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; |
359 | break; | |
360 | } | |
361 | } | |
3a5670e8 | 362 | up_read(&dmar_global_lock); |
20f3097b SS |
363 | |
364 | if (sid == 0) { | |
9f10e5bf | 365 | pr_warn("Failed to set source-id of HPET block (%d)\n", id); |
20f3097b SS |
366 | return -1; |
367 | } | |
368 | ||
369 | /* | |
370 | * Should really use SQ_ALL_16. Some platforms are broken. | |
371 | * While we figure out the right quirks for these broken platforms, use | |
372 | * SQ_13_IGNORE_3 for now. | |
373 | */ | |
374 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | |
375 | ||
376 | return 0; | |
377 | } | |
378 | ||
579305f7 AW |
379 | struct set_msi_sid_data { |
380 | struct pci_dev *pdev; | |
381 | u16 alias; | |
382 | }; | |
383 | ||
384 | static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) | |
385 | { | |
386 | struct set_msi_sid_data *data = opaque; | |
387 | ||
388 | data->pdev = pdev; | |
389 | data->alias = alias; | |
390 | ||
391 | return 0; | |
392 | } | |
393 | ||
263b5e86 | 394 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
f007e99c | 395 | { |
579305f7 | 396 | struct set_msi_sid_data data; |
f007e99c WH |
397 | |
398 | if (!irte || !dev) | |
399 | return -1; | |
400 | ||
579305f7 | 401 | pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); |
f007e99c | 402 | |
579305f7 AW |
403 | /* |
404 | * DMA alias provides us with a PCI device and alias. The only case | |
405 | * where the it will return an alias on a different bus than the | |
406 | * device is the case of a PCIe-to-PCI bridge, where the alias is for | |
407 | * the subordinate bus. In this case we can only verify the bus. | |
408 | * | |
409 | * If the alias device is on a different bus than our source device | |
410 | * then we have a topology based alias, use it. | |
411 | * | |
412 | * Otherwise, the alias is for a device DMA quirk and we cannot | |
413 | * assume that MSI uses the same requester ID. Therefore use the | |
414 | * original device. | |
415 | */ | |
416 | if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) | |
417 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | |
418 | PCI_DEVID(PCI_BUS_NUM(data.alias), | |
419 | dev->bus->number)); | |
420 | else if (data.pdev->bus->number != dev->bus->number) | |
421 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); | |
422 | else | |
423 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | |
424 | PCI_DEVID(dev->bus->number, dev->devfn)); | |
f007e99c WH |
425 | |
426 | return 0; | |
427 | } | |
428 | ||
95a02e97 | 429 | static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) |
2ae21010 SS |
430 | { |
431 | u64 addr; | |
c416daa9 | 432 | u32 sts; |
2ae21010 SS |
433 | unsigned long flags; |
434 | ||
435 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
436 | ||
1f5b3c3f | 437 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
2ae21010 SS |
438 | |
439 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
440 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
441 | ||
442 | /* Set interrupt-remapping table pointer */ | |
f63ef690 | 443 | writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
444 | |
445 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
446 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
1f5b3c3f | 447 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
2ae21010 SS |
448 | |
449 | /* | |
450 | * global invalidation of interrupt entry cache before enabling | |
451 | * interrupt-remapping. | |
452 | */ | |
453 | qi_global_iec(iommu); | |
454 | ||
1f5b3c3f | 455 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
2ae21010 SS |
456 | |
457 | /* Enable interrupt-remapping */ | |
2ae21010 | 458 | iommu->gcmd |= DMA_GCMD_IRE; |
af8d102f | 459 | iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ |
c416daa9 | 460 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
461 | |
462 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
463 | readl, (sts & DMA_GSTS_IRES), sts); | |
464 | ||
af8d102f AL |
465 | /* |
466 | * With CFI clear in the Global Command register, we should be | |
467 | * protected from dangerous (i.e. compatibility) interrupts | |
468 | * regardless of x2apic status. Check just to be sure. | |
469 | */ | |
470 | if (sts & DMA_GSTS_CFIS) | |
471 | WARN(1, KERN_WARNING | |
472 | "Compatibility-format IRQs enabled despite intr remapping;\n" | |
473 | "you are vulnerable to IRQ injection.\n"); | |
474 | ||
1f5b3c3f | 475 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
2ae21010 SS |
476 | } |
477 | ||
a7a3dad9 | 478 | static int intel_setup_irq_remapping(struct intel_iommu *iommu) |
2ae21010 SS |
479 | { |
480 | struct ir_table *ir_table; | |
481 | struct page *pages; | |
360eb3c5 | 482 | unsigned long *bitmap; |
2ae21010 | 483 | |
a7a3dad9 JL |
484 | if (iommu->ir_table) |
485 | return 0; | |
2ae21010 | 486 | |
e3a981d6 | 487 | ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL); |
a7a3dad9 | 488 | if (!ir_table) |
2ae21010 SS |
489 | return -ENOMEM; |
490 | ||
e3a981d6 | 491 | pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, |
824cd75b | 492 | INTR_REMAP_PAGE_ORDER); |
2ae21010 SS |
493 | |
494 | if (!pages) { | |
360eb3c5 JL |
495 | pr_err("IR%d: failed to allocate pages of order %d\n", |
496 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); | |
a7a3dad9 | 497 | goto out_free_table; |
2ae21010 SS |
498 | } |
499 | ||
360eb3c5 JL |
500 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), |
501 | sizeof(long), GFP_ATOMIC); | |
502 | if (bitmap == NULL) { | |
503 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); | |
a7a3dad9 | 504 | goto out_free_pages; |
360eb3c5 JL |
505 | } |
506 | ||
2ae21010 | 507 | ir_table->base = page_address(pages); |
360eb3c5 | 508 | ir_table->bitmap = bitmap; |
a7a3dad9 | 509 | iommu->ir_table = ir_table; |
2ae21010 | 510 | return 0; |
a7a3dad9 JL |
511 | |
512 | out_free_pages: | |
513 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); | |
514 | out_free_table: | |
515 | kfree(ir_table); | |
516 | return -ENOMEM; | |
517 | } | |
518 | ||
519 | static void intel_teardown_irq_remapping(struct intel_iommu *iommu) | |
520 | { | |
521 | if (iommu && iommu->ir_table) { | |
522 | free_pages((unsigned long)iommu->ir_table->base, | |
523 | INTR_REMAP_PAGE_ORDER); | |
524 | kfree(iommu->ir_table->bitmap); | |
525 | kfree(iommu->ir_table); | |
526 | iommu->ir_table = NULL; | |
527 | } | |
2ae21010 SS |
528 | } |
529 | ||
eba67e5d SS |
530 | /* |
531 | * Disable Interrupt Remapping. | |
532 | */ | |
95a02e97 | 533 | static void iommu_disable_irq_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
534 | { |
535 | unsigned long flags; | |
536 | u32 sts; | |
537 | ||
538 | if (!ecap_ir_support(iommu->ecap)) | |
539 | return; | |
540 | ||
b24696bc FY |
541 | /* |
542 | * global invalidation of interrupt entry cache before disabling | |
543 | * interrupt-remapping. | |
544 | */ | |
545 | qi_global_iec(iommu); | |
546 | ||
1f5b3c3f | 547 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eba67e5d SS |
548 | |
549 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
550 | if (!(sts & DMA_GSTS_IRES)) | |
551 | goto end; | |
552 | ||
553 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
554 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
555 | ||
556 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
557 | readl, !(sts & DMA_GSTS_IRES), sts); | |
558 | ||
559 | end: | |
1f5b3c3f | 560 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eba67e5d SS |
561 | } |
562 | ||
41750d31 SS |
563 | static int __init dmar_x2apic_optout(void) |
564 | { | |
565 | struct acpi_table_dmar *dmar; | |
566 | dmar = (struct acpi_table_dmar *)dmar_tbl; | |
567 | if (!dmar || no_x2apic_optout) | |
568 | return 0; | |
569 | return dmar->flags & DMAR_X2APIC_OPT_OUT; | |
570 | } | |
571 | ||
11190302 TG |
572 | static void __init intel_cleanup_irq_remapping(void) |
573 | { | |
574 | struct dmar_drhd_unit *drhd; | |
575 | struct intel_iommu *iommu; | |
576 | ||
577 | for_each_iommu(iommu, drhd) { | |
578 | if (ecap_ir_support(iommu->ecap)) { | |
579 | iommu_disable_irq_remapping(iommu); | |
580 | intel_teardown_irq_remapping(iommu); | |
581 | } | |
582 | } | |
583 | ||
584 | if (x2apic_supported()) | |
9f10e5bf | 585 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); |
11190302 TG |
586 | } |
587 | ||
588 | static int __init intel_prepare_irq_remapping(void) | |
2ae21010 SS |
589 | { |
590 | struct dmar_drhd_unit *drhd; | |
7c919779 | 591 | struct intel_iommu *iommu; |
2ae21010 | 592 | |
2966d956 | 593 | if (irq_remap_broken) { |
9f10e5bf | 594 | pr_warn("This system BIOS has enabled interrupt remapping\n" |
2966d956 JL |
595 | "on a chipset that contains an erratum making that\n" |
596 | "feature unstable. To maintain system stability\n" | |
597 | "interrupt remapping is being disabled. Please\n" | |
598 | "contact your BIOS vendor for an update\n"); | |
599 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | |
2966d956 JL |
600 | return -ENODEV; |
601 | } | |
602 | ||
11190302 | 603 | if (dmar_table_init() < 0) |
2966d956 JL |
604 | return -ENODEV; |
605 | ||
606 | if (!dmar_ir_support()) | |
607 | return -ENODEV; | |
af8d102f | 608 | |
e936d077 | 609 | if (parse_ioapics_under_ir() != 1) { |
9f10e5bf | 610 | pr_info("Not enabling interrupt remapping\n"); |
af8d102f | 611 | goto error; |
e936d077 YS |
612 | } |
613 | ||
69cf1d8a | 614 | /* First make sure all IOMMUs support IRQ remapping */ |
2966d956 | 615 | for_each_iommu(iommu, drhd) |
69cf1d8a JR |
616 | if (!ecap_ir_support(iommu->ecap)) |
617 | goto error; | |
618 | ||
619 | /* Do the allocations early */ | |
620 | for_each_iommu(iommu, drhd) | |
621 | if (intel_setup_irq_remapping(iommu)) | |
11190302 | 622 | goto error; |
69cf1d8a | 623 | |
11190302 | 624 | return 0; |
2966d956 | 625 | |
11190302 TG |
626 | error: |
627 | intel_cleanup_irq_remapping(); | |
2966d956 | 628 | return -ENODEV; |
11190302 TG |
629 | } |
630 | ||
631 | static int __init intel_enable_irq_remapping(void) | |
632 | { | |
633 | struct dmar_drhd_unit *drhd; | |
634 | struct intel_iommu *iommu; | |
2f119c78 | 635 | bool setup = false; |
11190302 TG |
636 | int eim = 0; |
637 | ||
638 | if (x2apic_supported()) { | |
41750d31 | 639 | eim = !dmar_x2apic_optout(); |
af8d102f | 640 | if (!eim) |
68c1b89c | 641 | pr_info("x2apic is disabled because BIOS sets x2apic opt out bit. You can use 'intremap=no_x2apic_optout' to override the BIOS setting.\n"); |
41750d31 SS |
642 | } |
643 | ||
7c919779 | 644 | for_each_iommu(iommu, drhd) { |
34aaaa94 HW |
645 | /* |
646 | * If the queued invalidation is already initialized, | |
647 | * shouldn't disable it. | |
648 | */ | |
649 | if (iommu->qi) | |
650 | continue; | |
651 | ||
1531a6a6 SS |
652 | /* |
653 | * Clear previous faults. | |
654 | */ | |
655 | dmar_fault(-1, iommu); | |
656 | ||
657 | /* | |
658 | * Disable intr remapping and queued invalidation, if already | |
659 | * enabled prior to OS handover. | |
660 | */ | |
95a02e97 | 661 | iommu_disable_irq_remapping(iommu); |
1531a6a6 SS |
662 | |
663 | dmar_disable_qi(iommu); | |
664 | } | |
665 | ||
2ae21010 SS |
666 | /* |
667 | * check for the Interrupt-remapping support | |
668 | */ | |
69cf1d8a | 669 | for_each_iommu(iommu, drhd) |
2ae21010 | 670 | if (eim && !ecap_eim_support(iommu->ecap)) { |
9f10e5bf JR |
671 | pr_info("DRHD %Lx: EIM not supported by DRHD, " |
672 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
13d09b66 | 673 | eim = 0; |
2ae21010 | 674 | } |
13d09b66 JL |
675 | eim_mode = eim; |
676 | if (eim) | |
677 | pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); | |
2ae21010 SS |
678 | |
679 | /* | |
680 | * Enable queued invalidation for all the DRHD's. | |
681 | */ | |
7c919779 JL |
682 | for_each_iommu(iommu, drhd) { |
683 | int ret = dmar_enable_qi(iommu); | |
2ae21010 SS |
684 | |
685 | if (ret) { | |
9f10e5bf | 686 | pr_err("DRHD %Lx: failed to enable queued, " |
2ae21010 SS |
687 | " invalidation, ecap %Lx, ret %d\n", |
688 | drhd->reg_base_addr, iommu->ecap, ret); | |
af8d102f | 689 | goto error; |
2ae21010 SS |
690 | } |
691 | } | |
692 | ||
693 | /* | |
694 | * Setup Interrupt-remapping for all the DRHD's now. | |
695 | */ | |
7c919779 | 696 | for_each_iommu(iommu, drhd) { |
a7a3dad9 | 697 | iommu_set_irq_remapping(iommu, eim); |
2f119c78 | 698 | setup = true; |
2ae21010 SS |
699 | } |
700 | ||
701 | if (!setup) | |
702 | goto error; | |
703 | ||
95a02e97 | 704 | irq_remapping_enabled = 1; |
afcc8a40 JR |
705 | |
706 | /* | |
707 | * VT-d has a different layout for IO-APIC entries when | |
708 | * interrupt remapping is enabled. So it needs a special routine | |
709 | * to print IO-APIC entries for debugging purposes too. | |
710 | */ | |
711 | x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; | |
712 | ||
41750d31 | 713 | pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); |
2ae21010 | 714 | |
41750d31 | 715 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
2ae21010 SS |
716 | |
717 | error: | |
11190302 | 718 | intel_cleanup_irq_remapping(); |
2ae21010 SS |
719 | return -1; |
720 | } | |
ad3ad3f6 | 721 | |
a7a3dad9 JL |
722 | static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
723 | struct intel_iommu *iommu, | |
724 | struct acpi_dmar_hardware_unit *drhd) | |
20f3097b SS |
725 | { |
726 | struct acpi_dmar_pci_path *path; | |
727 | u8 bus; | |
a7a3dad9 | 728 | int count, free = -1; |
20f3097b SS |
729 | |
730 | bus = scope->bus; | |
731 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
732 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
733 | / sizeof(struct acpi_dmar_pci_path); | |
734 | ||
735 | while (--count > 0) { | |
736 | /* | |
737 | * Access PCI directly due to the PCI | |
738 | * subsystem isn't initialized yet. | |
739 | */ | |
fa5f508f | 740 | bus = read_pci_config_byte(bus, path->device, path->function, |
20f3097b SS |
741 | PCI_SECONDARY_BUS); |
742 | path++; | |
743 | } | |
a7a3dad9 JL |
744 | |
745 | for (count = 0; count < MAX_HPET_TBS; count++) { | |
746 | if (ir_hpet[count].iommu == iommu && | |
747 | ir_hpet[count].id == scope->enumeration_id) | |
748 | return 0; | |
749 | else if (ir_hpet[count].iommu == NULL && free == -1) | |
750 | free = count; | |
751 | } | |
752 | if (free == -1) { | |
753 | pr_warn("Exceeded Max HPET blocks\n"); | |
754 | return -ENOSPC; | |
755 | } | |
756 | ||
757 | ir_hpet[free].iommu = iommu; | |
758 | ir_hpet[free].id = scope->enumeration_id; | |
759 | ir_hpet[free].bus = bus; | |
760 | ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); | |
761 | pr_info("HPET id %d under DRHD base 0x%Lx\n", | |
762 | scope->enumeration_id, drhd->address); | |
763 | ||
764 | return 0; | |
20f3097b SS |
765 | } |
766 | ||
a7a3dad9 JL |
767 | static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
768 | struct intel_iommu *iommu, | |
769 | struct acpi_dmar_hardware_unit *drhd) | |
f007e99c WH |
770 | { |
771 | struct acpi_dmar_pci_path *path; | |
772 | u8 bus; | |
a7a3dad9 | 773 | int count, free = -1; |
f007e99c WH |
774 | |
775 | bus = scope->bus; | |
776 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
777 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
778 | / sizeof(struct acpi_dmar_pci_path); | |
779 | ||
780 | while (--count > 0) { | |
781 | /* | |
782 | * Access PCI directly due to the PCI | |
783 | * subsystem isn't initialized yet. | |
784 | */ | |
fa5f508f | 785 | bus = read_pci_config_byte(bus, path->device, path->function, |
f007e99c WH |
786 | PCI_SECONDARY_BUS); |
787 | path++; | |
788 | } | |
789 | ||
a7a3dad9 JL |
790 | for (count = 0; count < MAX_IO_APICS; count++) { |
791 | if (ir_ioapic[count].iommu == iommu && | |
792 | ir_ioapic[count].id == scope->enumeration_id) | |
793 | return 0; | |
794 | else if (ir_ioapic[count].iommu == NULL && free == -1) | |
795 | free = count; | |
796 | } | |
797 | if (free == -1) { | |
798 | pr_warn("Exceeded Max IO APICS\n"); | |
799 | return -ENOSPC; | |
800 | } | |
801 | ||
802 | ir_ioapic[free].bus = bus; | |
803 | ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); | |
804 | ir_ioapic[free].iommu = iommu; | |
805 | ir_ioapic[free].id = scope->enumeration_id; | |
806 | pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", | |
807 | scope->enumeration_id, drhd->address, iommu->seq_id); | |
808 | ||
809 | return 0; | |
f007e99c WH |
810 | } |
811 | ||
20f3097b SS |
812 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
813 | struct intel_iommu *iommu) | |
ad3ad3f6 | 814 | { |
a7a3dad9 | 815 | int ret = 0; |
ad3ad3f6 SS |
816 | struct acpi_dmar_hardware_unit *drhd; |
817 | struct acpi_dmar_device_scope *scope; | |
818 | void *start, *end; | |
819 | ||
820 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
ad3ad3f6 SS |
821 | start = (void *)(drhd + 1); |
822 | end = ((void *)drhd) + header->length; | |
823 | ||
a7a3dad9 | 824 | while (start < end && ret == 0) { |
ad3ad3f6 | 825 | scope = start; |
a7a3dad9 JL |
826 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) |
827 | ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); | |
828 | else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) | |
829 | ret = ir_parse_one_hpet_scope(scope, iommu, drhd); | |
830 | start += scope->length; | |
831 | } | |
ad3ad3f6 | 832 | |
a7a3dad9 JL |
833 | return ret; |
834 | } | |
20f3097b | 835 | |
a7a3dad9 JL |
836 | static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) |
837 | { | |
838 | int i; | |
20f3097b | 839 | |
a7a3dad9 JL |
840 | for (i = 0; i < MAX_HPET_TBS; i++) |
841 | if (ir_hpet[i].iommu == iommu) | |
842 | ir_hpet[i].iommu = NULL; | |
ad3ad3f6 | 843 | |
a7a3dad9 JL |
844 | for (i = 0; i < MAX_IO_APICS; i++) |
845 | if (ir_ioapic[i].iommu == iommu) | |
846 | ir_ioapic[i].iommu = NULL; | |
ad3ad3f6 SS |
847 | } |
848 | ||
849 | /* | |
850 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
851 | * hardware unit. | |
852 | */ | |
694835dc | 853 | static int __init parse_ioapics_under_ir(void) |
ad3ad3f6 SS |
854 | { |
855 | struct dmar_drhd_unit *drhd; | |
7c919779 | 856 | struct intel_iommu *iommu; |
2f119c78 | 857 | bool ir_supported = false; |
32ab31e0 | 858 | int ioapic_idx; |
ad3ad3f6 | 859 | |
7c919779 | 860 | for_each_iommu(iommu, drhd) |
ad3ad3f6 | 861 | if (ecap_ir_support(iommu->ecap)) { |
20f3097b | 862 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
ad3ad3f6 SS |
863 | return -1; |
864 | ||
2f119c78 | 865 | ir_supported = true; |
ad3ad3f6 | 866 | } |
ad3ad3f6 | 867 | |
32ab31e0 SF |
868 | if (!ir_supported) |
869 | return 0; | |
870 | ||
871 | for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { | |
872 | int ioapic_id = mpc_ioapic_id(ioapic_idx); | |
873 | if (!map_ioapic_to_ir(ioapic_id)) { | |
874 | pr_err(FW_BUG "ioapic %d has no mapping iommu, " | |
875 | "interrupt remapping will be disabled\n", | |
876 | ioapic_id); | |
877 | return -1; | |
878 | } | |
ad3ad3f6 SS |
879 | } |
880 | ||
32ab31e0 | 881 | return 1; |
ad3ad3f6 | 882 | } |
b24696bc | 883 | |
6a7885c4 | 884 | static int __init ir_dev_scope_init(void) |
c2c7286a | 885 | { |
3a5670e8 JL |
886 | int ret; |
887 | ||
95a02e97 | 888 | if (!irq_remapping_enabled) |
c2c7286a SS |
889 | return 0; |
890 | ||
3a5670e8 JL |
891 | down_write(&dmar_global_lock); |
892 | ret = dmar_dev_scope_init(); | |
893 | up_write(&dmar_global_lock); | |
894 | ||
895 | return ret; | |
c2c7286a SS |
896 | } |
897 | rootfs_initcall(ir_dev_scope_init); | |
898 | ||
95a02e97 | 899 | static void disable_irq_remapping(void) |
b24696bc FY |
900 | { |
901 | struct dmar_drhd_unit *drhd; | |
902 | struct intel_iommu *iommu = NULL; | |
903 | ||
904 | /* | |
905 | * Disable Interrupt-remapping for all the DRHD's now. | |
906 | */ | |
907 | for_each_iommu(iommu, drhd) { | |
908 | if (!ecap_ir_support(iommu->ecap)) | |
909 | continue; | |
910 | ||
95a02e97 | 911 | iommu_disable_irq_remapping(iommu); |
b24696bc FY |
912 | } |
913 | } | |
914 | ||
95a02e97 | 915 | static int reenable_irq_remapping(int eim) |
b24696bc FY |
916 | { |
917 | struct dmar_drhd_unit *drhd; | |
2f119c78 | 918 | bool setup = false; |
b24696bc FY |
919 | struct intel_iommu *iommu = NULL; |
920 | ||
921 | for_each_iommu(iommu, drhd) | |
922 | if (iommu->qi) | |
923 | dmar_reenable_qi(iommu); | |
924 | ||
925 | /* | |
926 | * Setup Interrupt-remapping for all the DRHD's now. | |
927 | */ | |
928 | for_each_iommu(iommu, drhd) { | |
929 | if (!ecap_ir_support(iommu->ecap)) | |
930 | continue; | |
931 | ||
932 | /* Set up interrupt remapping for iommu.*/ | |
95a02e97 | 933 | iommu_set_irq_remapping(iommu, eim); |
2f119c78 | 934 | setup = true; |
b24696bc FY |
935 | } |
936 | ||
937 | if (!setup) | |
938 | goto error; | |
939 | ||
940 | return 0; | |
941 | ||
942 | error: | |
943 | /* | |
944 | * handle error condition gracefully here! | |
945 | */ | |
946 | return -1; | |
947 | } | |
948 | ||
0c3f173a JR |
949 | static void prepare_irte(struct irte *irte, int vector, |
950 | unsigned int dest) | |
951 | { | |
952 | memset(irte, 0, sizeof(*irte)); | |
953 | ||
954 | irte->present = 1; | |
955 | irte->dst_mode = apic->irq_dest_mode; | |
956 | /* | |
957 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the | |
958 | * actual level or edge trigger will be setup in the IO-APIC | |
959 | * RTE. This will help simplify level triggered irq migration. | |
960 | * For more details, see the comments (in io_apic.c) explainig IO-APIC | |
961 | * irq migration in the presence of interrupt-remapping. | |
962 | */ | |
963 | irte->trigger_mode = 0; | |
964 | irte->dlvry_mode = apic->irq_delivery_mode; | |
965 | irte->vector = vector; | |
966 | irte->dest_id = IRTE_DEST(dest); | |
967 | irte->redir_hint = 1; | |
968 | } | |
969 | ||
970 | static int intel_setup_ioapic_entry(int irq, | |
971 | struct IO_APIC_route_entry *route_entry, | |
972 | unsigned int destination, int vector, | |
973 | struct io_apic_irq_attr *attr) | |
974 | { | |
975 | int ioapic_id = mpc_ioapic_id(attr->ioapic); | |
3a5670e8 | 976 | struct intel_iommu *iommu; |
0c3f173a JR |
977 | struct IR_IO_APIC_route_entry *entry; |
978 | struct irte irte; | |
979 | int index; | |
980 | ||
3a5670e8 JL |
981 | down_read(&dmar_global_lock); |
982 | iommu = map_ioapic_to_ir(ioapic_id); | |
0c3f173a JR |
983 | if (!iommu) { |
984 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); | |
3a5670e8 JL |
985 | index = -ENODEV; |
986 | } else { | |
987 | index = alloc_irte(iommu, irq, 1); | |
988 | if (index < 0) { | |
989 | pr_warn("Failed to allocate IRTE for ioapic %d\n", | |
990 | ioapic_id); | |
991 | index = -ENOMEM; | |
992 | } | |
0c3f173a | 993 | } |
3a5670e8 JL |
994 | up_read(&dmar_global_lock); |
995 | if (index < 0) | |
996 | return index; | |
0c3f173a JR |
997 | |
998 | prepare_irte(&irte, vector, destination); | |
999 | ||
1000 | /* Set source-id of interrupt request */ | |
1001 | set_ioapic_sid(&irte, ioapic_id); | |
1002 | ||
1003 | modify_irte(irq, &irte); | |
1004 | ||
1005 | apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " | |
1006 | "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " | |
1007 | "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " | |
1008 | "Avail:%X Vector:%02X Dest:%08X " | |
1009 | "SID:%04X SQ:%X SVT:%X)\n", | |
1010 | attr->ioapic, irte.present, irte.fpd, irte.dst_mode, | |
1011 | irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, | |
1012 | irte.avail, irte.vector, irte.dest_id, | |
1013 | irte.sid, irte.sq, irte.svt); | |
1014 | ||
3a5670e8 | 1015 | entry = (struct IR_IO_APIC_route_entry *)route_entry; |
0c3f173a JR |
1016 | memset(entry, 0, sizeof(*entry)); |
1017 | ||
1018 | entry->index2 = (index >> 15) & 0x1; | |
1019 | entry->zero = 0; | |
1020 | entry->format = 1; | |
1021 | entry->index = (index & 0x7fff); | |
1022 | /* | |
1023 | * IO-APIC RTE will be configured with virtual vector. | |
1024 | * irq handler will do the explicit EOI to the io-apic. | |
1025 | */ | |
1026 | entry->vector = attr->ioapic_pin; | |
1027 | entry->mask = 0; /* enable IRQ */ | |
1028 | entry->trigger = attr->trigger; | |
1029 | entry->polarity = attr->polarity; | |
1030 | ||
1031 | /* Mask level triggered irqs. | |
1032 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | |
1033 | */ | |
1034 | if (attr->trigger) | |
1035 | entry->mask = 1; | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | ||
4c1bad6a JR |
1040 | /* |
1041 | * Migrate the IO-APIC irq in the presence of intr-remapping. | |
1042 | * | |
1043 | * For both level and edge triggered, irq migration is a simple atomic | |
1044 | * update(of vector and cpu destination) of IRTE and flush the hardware cache. | |
1045 | * | |
1046 | * For level triggered, we eliminate the io-apic RTE modification (with the | |
1047 | * updated vector information), by using a virtual vector (io-apic pin number). | |
1048 | * Real vector that is used for interrupting cpu will be coming from | |
1049 | * the interrupt-remapping table entry. | |
1050 | * | |
1051 | * As the migration is a simple atomic update of IRTE, the same mechanism | |
1052 | * is used to migrate MSI irq's in the presence of interrupt-remapping. | |
1053 | */ | |
1054 | static int | |
1055 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |
1056 | bool force) | |
1057 | { | |
91411da1 | 1058 | struct irq_cfg *cfg = irqd_cfg(data); |
4c1bad6a JR |
1059 | unsigned int dest, irq = data->irq; |
1060 | struct irte irte; | |
ff164324 | 1061 | int err; |
4c1bad6a | 1062 | |
7eb9ae07 SS |
1063 | if (!config_enabled(CONFIG_SMP)) |
1064 | return -EINVAL; | |
1065 | ||
4c1bad6a JR |
1066 | if (!cpumask_intersects(mask, cpu_online_mask)) |
1067 | return -EINVAL; | |
1068 | ||
1069 | if (get_irte(irq, &irte)) | |
1070 | return -EBUSY; | |
1071 | ||
ff164324 AG |
1072 | err = assign_irq_vector(irq, cfg, mask); |
1073 | if (err) | |
1074 | return err; | |
4c1bad6a | 1075 | |
ff164324 AG |
1076 | err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); |
1077 | if (err) { | |
ed88bed8 | 1078 | if (assign_irq_vector(irq, cfg, data->affinity)) |
ff164324 AG |
1079 | pr_err("Failed to recover vector for irq %d\n", irq); |
1080 | return err; | |
1081 | } | |
4c1bad6a JR |
1082 | |
1083 | irte.vector = cfg->vector; | |
1084 | irte.dest_id = IRTE_DEST(dest); | |
1085 | ||
1086 | /* | |
1087 | * Atomically updates the IRTE with the new destination, vector | |
1088 | * and flushes the interrupt entry cache. | |
1089 | */ | |
1090 | modify_irte(irq, &irte); | |
1091 | ||
1092 | /* | |
1093 | * After this point, all the interrupts will start arriving | |
1094 | * at the new destination. So, time to cleanup the previous | |
1095 | * vector allocation. | |
1096 | */ | |
1097 | if (cfg->move_in_progress) | |
1098 | send_cleanup_vector(cfg); | |
1099 | ||
1100 | cpumask_copy(data->affinity, mask); | |
1101 | return 0; | |
1102 | } | |
0c3f173a | 1103 | |
5e2b930b JR |
1104 | static void intel_compose_msi_msg(struct pci_dev *pdev, |
1105 | unsigned int irq, unsigned int dest, | |
1106 | struct msi_msg *msg, u8 hpet_id) | |
1107 | { | |
1108 | struct irq_cfg *cfg; | |
1109 | struct irte irte; | |
c558df4a | 1110 | u16 sub_handle = 0; |
5e2b930b JR |
1111 | int ir_index; |
1112 | ||
91411da1 | 1113 | cfg = irq_cfg(irq); |
5e2b930b JR |
1114 | |
1115 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | |
1116 | BUG_ON(ir_index == -1); | |
1117 | ||
1118 | prepare_irte(&irte, cfg->vector, dest); | |
1119 | ||
1120 | /* Set source-id of interrupt request */ | |
1121 | if (pdev) | |
1122 | set_msi_sid(&irte, pdev); | |
1123 | else | |
1124 | set_hpet_sid(&irte, hpet_id); | |
1125 | ||
1126 | modify_irte(irq, &irte); | |
1127 | ||
1128 | msg->address_hi = MSI_ADDR_BASE_HI; | |
1129 | msg->data = sub_handle; | |
1130 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | | |
1131 | MSI_ADDR_IR_SHV | | |
1132 | MSI_ADDR_IR_INDEX1(ir_index) | | |
1133 | MSI_ADDR_IR_INDEX2(ir_index); | |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * Map the PCI dev to the corresponding remapping hardware unit | |
1138 | * and allocate 'nvec' consecutive interrupt-remapping table entries | |
1139 | * in it. | |
1140 | */ | |
1141 | static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) | |
1142 | { | |
1143 | struct intel_iommu *iommu; | |
1144 | int index; | |
1145 | ||
3a5670e8 | 1146 | down_read(&dmar_global_lock); |
5e2b930b JR |
1147 | iommu = map_dev_to_ir(dev); |
1148 | if (!iommu) { | |
9f10e5bf | 1149 | pr_err("Unable to map PCI %s to iommu\n", pci_name(dev)); |
3a5670e8 JL |
1150 | index = -ENOENT; |
1151 | } else { | |
1152 | index = alloc_irte(iommu, irq, nvec); | |
1153 | if (index < 0) { | |
9f10e5bf | 1154 | pr_err("Unable to allocate %d IRTE for PCI %s\n", |
3a5670e8 JL |
1155 | nvec, pci_name(dev)); |
1156 | index = -ENOSPC; | |
1157 | } | |
5e2b930b | 1158 | } |
3a5670e8 | 1159 | up_read(&dmar_global_lock); |
5e2b930b | 1160 | |
5e2b930b JR |
1161 | return index; |
1162 | } | |
1163 | ||
1164 | static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |
1165 | int index, int sub_handle) | |
1166 | { | |
1167 | struct intel_iommu *iommu; | |
3a5670e8 | 1168 | int ret = -ENOENT; |
5e2b930b | 1169 | |
3a5670e8 | 1170 | down_read(&dmar_global_lock); |
5e2b930b | 1171 | iommu = map_dev_to_ir(pdev); |
3a5670e8 JL |
1172 | if (iommu) { |
1173 | /* | |
1174 | * setup the mapping between the irq and the IRTE | |
1175 | * base index, the sub_handle pointing to the | |
1176 | * appropriate interrupt remap table entry. | |
1177 | */ | |
1178 | set_irte_irq(irq, iommu, index, sub_handle); | |
1179 | ret = 0; | |
1180 | } | |
1181 | up_read(&dmar_global_lock); | |
5e2b930b | 1182 | |
3a5670e8 | 1183 | return ret; |
5e2b930b JR |
1184 | } |
1185 | ||
5fc24d8c | 1186 | static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) |
5e2b930b | 1187 | { |
3a5670e8 JL |
1188 | int ret = -1; |
1189 | struct intel_iommu *iommu; | |
5e2b930b JR |
1190 | int index; |
1191 | ||
3a5670e8 JL |
1192 | down_read(&dmar_global_lock); |
1193 | iommu = map_hpet_to_ir(id); | |
1194 | if (iommu) { | |
1195 | index = alloc_irte(iommu, irq, 1); | |
1196 | if (index >= 0) | |
1197 | ret = 0; | |
1198 | } | |
1199 | up_read(&dmar_global_lock); | |
5e2b930b | 1200 | |
3a5670e8 | 1201 | return ret; |
5e2b930b JR |
1202 | } |
1203 | ||
736baef4 | 1204 | struct irq_remap_ops intel_irq_remap_ops = { |
11190302 | 1205 | .prepare = intel_prepare_irq_remapping, |
95a02e97 SS |
1206 | .enable = intel_enable_irq_remapping, |
1207 | .disable = disable_irq_remapping, | |
1208 | .reenable = reenable_irq_remapping, | |
4f3d8b67 | 1209 | .enable_faulting = enable_drhd_fault_handling, |
0c3f173a | 1210 | .setup_ioapic_entry = intel_setup_ioapic_entry, |
4c1bad6a | 1211 | .set_affinity = intel_ioapic_set_affinity, |
9d619f65 | 1212 | .free_irq = free_irte, |
5e2b930b JR |
1213 | .compose_msi_msg = intel_compose_msi_msg, |
1214 | .msi_alloc_irq = intel_msi_alloc_irq, | |
1215 | .msi_setup_irq = intel_msi_setup_irq, | |
5fc24d8c | 1216 | .alloc_hpet_msi = intel_alloc_hpet_msi, |
736baef4 | 1217 | }; |
6b197249 | 1218 | |
a7a3dad9 JL |
1219 | /* |
1220 | * Support of Interrupt Remapping Unit Hotplug | |
1221 | */ | |
1222 | static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) | |
1223 | { | |
1224 | int ret; | |
1225 | int eim = x2apic_enabled(); | |
1226 | ||
1227 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
1228 | pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", | |
1229 | iommu->reg_phys, iommu->ecap); | |
1230 | return -ENODEV; | |
1231 | } | |
1232 | ||
1233 | if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { | |
1234 | pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", | |
1235 | iommu->reg_phys); | |
1236 | return -ENODEV; | |
1237 | } | |
1238 | ||
1239 | /* TODO: check all IOAPICs are covered by IOMMU */ | |
1240 | ||
1241 | /* Setup Interrupt-remapping now. */ | |
1242 | ret = intel_setup_irq_remapping(iommu); | |
1243 | if (ret) { | |
1244 | pr_err("DRHD %Lx: failed to allocate resource\n", | |
1245 | iommu->reg_phys); | |
1246 | ir_remove_ioapic_hpet_scope(iommu); | |
1247 | return ret; | |
1248 | } | |
1249 | ||
1250 | if (!iommu->qi) { | |
1251 | /* Clear previous faults. */ | |
1252 | dmar_fault(-1, iommu); | |
1253 | iommu_disable_irq_remapping(iommu); | |
1254 | dmar_disable_qi(iommu); | |
1255 | } | |
1256 | ||
1257 | /* Enable queued invalidation */ | |
1258 | ret = dmar_enable_qi(iommu); | |
1259 | if (!ret) { | |
1260 | iommu_set_irq_remapping(iommu, eim); | |
1261 | } else { | |
1262 | pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", | |
1263 | iommu->reg_phys, iommu->ecap, ret); | |
1264 | intel_teardown_irq_remapping(iommu); | |
1265 | ir_remove_ioapic_hpet_scope(iommu); | |
1266 | } | |
1267 | ||
1268 | return ret; | |
1269 | } | |
1270 | ||
6b197249 JL |
1271 | int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) |
1272 | { | |
a7a3dad9 JL |
1273 | int ret = 0; |
1274 | struct intel_iommu *iommu = dmaru->iommu; | |
1275 | ||
1276 | if (!irq_remapping_enabled) | |
1277 | return 0; | |
1278 | if (iommu == NULL) | |
1279 | return -EINVAL; | |
1280 | if (!ecap_ir_support(iommu->ecap)) | |
1281 | return 0; | |
1282 | ||
1283 | if (insert) { | |
1284 | if (!iommu->ir_table) | |
1285 | ret = dmar_ir_add(dmaru, iommu); | |
1286 | } else { | |
1287 | if (iommu->ir_table) { | |
1288 | if (!bitmap_empty(iommu->ir_table->bitmap, | |
1289 | INTR_REMAP_TABLE_ENTRIES)) { | |
1290 | ret = -EBUSY; | |
1291 | } else { | |
1292 | iommu_disable_irq_remapping(iommu); | |
1293 | intel_teardown_irq_remapping(iommu); | |
1294 | ir_remove_ioapic_hpet_scope(iommu); | |
1295 | } | |
1296 | } | |
1297 | } | |
1298 | ||
1299 | return ret; | |
6b197249 | 1300 | } |