]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/iommu/intel_irq_remapping.c
x86/apic: Make cpu_mask_to_apicid() operations return error code
[mirror_ubuntu-artful-kernel.git] / drivers / iommu / intel_irq_remapping.c
CommitLineData
5aeecaf4 1#include <linux/interrupt.h>
ad3ad3f6 2#include <linux/dmar.h>
2ae21010 3#include <linux/spinlock.h>
5a0e3ad6 4#include <linux/slab.h>
2ae21010 5#include <linux/jiffies.h>
20f3097b 6#include <linux/hpet.h>
2ae21010 7#include <linux/pci.h>
b6fcb33a 8#include <linux/irq.h>
ad3ad3f6 9#include <asm/io_apic.h>
17483a1f 10#include <asm/smp.h>
6d652ea1 11#include <asm/cpu.h>
38717946 12#include <linux/intel-iommu.h>
46f06b72 13#include <acpi/acpi.h>
8a8f422d 14#include <asm/irq_remapping.h>
f007e99c 15#include <asm/pci-direct.h>
5e2b930b 16#include <asm/msidef.h>
ad3ad3f6 17
8a8f422d 18#include "irq_remapping.h"
736baef4 19
eef93fdb
JR
20struct ioapic_scope {
21 struct intel_iommu *iommu;
22 unsigned int id;
23 unsigned int bus; /* PCI bus number */
24 unsigned int devfn; /* PCI devfn number */
25};
26
27struct hpet_scope {
28 struct intel_iommu *iommu;
29 u8 id;
30 unsigned int bus;
31 unsigned int devfn;
32};
33
34#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
0c3f173a 35#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
eef93fdb 36
ad3ad3f6 37static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
20f3097b
SS
38static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39static int ir_ioapic_num, ir_hpet_num;
d1423d56 40
96f8e98b 41static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
d585d060 42
e420dfb4
YL
43static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
44{
dced35ae 45 struct irq_cfg *cfg = irq_get_chip_data(irq);
349d6767 46 return cfg ? &cfg->irq_2_iommu : NULL;
0b8f1efa
YL
47}
48
b6fcb33a
SS
49int get_irte(int irq, struct irte *entry)
50{
d585d060 51 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
4c5502b1 52 unsigned long flags;
d585d060 53 int index;
b6fcb33a 54
d585d060 55 if (!entry || !irq_iommu)
b6fcb33a
SS
56 return -1;
57
96f8e98b 58 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
b6fcb33a 59
e420dfb4
YL
60 index = irq_iommu->irte_index + irq_iommu->sub_handle;
61 *entry = *(irq_iommu->iommu->ir_table->base + index);
b6fcb33a 62
96f8e98b 63 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33a
SS
64 return 0;
65}
66
263b5e86 67static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
b6fcb33a
SS
68{
69 struct ir_table *table = iommu->ir_table;
d585d060 70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
b6fcb33a
SS
71 u16 index, start_index;
72 unsigned int mask = 0;
4c5502b1 73 unsigned long flags;
b6fcb33a
SS
74 int i;
75
d585d060 76 if (!count || !irq_iommu)
e420dfb4 77 return -1;
e420dfb4 78
b6fcb33a
SS
79 /*
80 * start the IRTE search from index 0.
81 */
82 index = start_index = 0;
83
84 if (count > 1) {
85 count = __roundup_pow_of_two(count);
86 mask = ilog2(count);
87 }
88
89 if (mask > ecap_max_handle_mask(iommu->ecap)) {
90 printk(KERN_ERR
91 "Requested mask %x exceeds the max invalidation handle"
92 " mask value %Lx\n", mask,
93 ecap_max_handle_mask(iommu->ecap));
94 return -1;
95 }
96
96f8e98b 97 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
b6fcb33a
SS
98 do {
99 for (i = index; i < index + count; i++)
100 if (table->base[i].present)
101 break;
102 /* empty index found */
103 if (i == index + count)
104 break;
105
106 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
107
108 if (index == start_index) {
96f8e98b 109 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33a
SS
110 printk(KERN_ERR "can't allocate an IRTE\n");
111 return -1;
112 }
113 } while (1);
114
115 for (i = index; i < index + count; i++)
116 table->base[i].present = 1;
117
e420dfb4
YL
118 irq_iommu->iommu = iommu;
119 irq_iommu->irte_index = index;
120 irq_iommu->sub_handle = 0;
121 irq_iommu->irte_mask = mask;
b6fcb33a 122
96f8e98b 123 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33a
SS
124
125 return index;
126}
127
704126ad 128static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
b6fcb33a
SS
129{
130 struct qi_desc desc;
131
132 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
133 | QI_IEC_SELECTIVE;
134 desc.high = 0;
135
704126ad 136 return qi_submit_sync(&desc, iommu);
b6fcb33a
SS
137}
138
263b5e86 139static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
b6fcb33a 140{
d585d060 141 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
4c5502b1 142 unsigned long flags;
d585d060 143 int index;
b6fcb33a 144
d585d060 145 if (!irq_iommu)
b6fcb33a 146 return -1;
b6fcb33a 147
96f8e98b 148 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
e420dfb4
YL
149 *sub_handle = irq_iommu->sub_handle;
150 index = irq_iommu->irte_index;
96f8e98b 151 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33a
SS
152 return index;
153}
154
263b5e86 155static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
b6fcb33a 156{
d585d060 157 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
4c5502b1 158 unsigned long flags;
e420dfb4 159
d585d060 160 if (!irq_iommu)
0b8f1efa 161 return -1;
d585d060 162
96f8e98b 163 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
0b8f1efa 164
e420dfb4
YL
165 irq_iommu->iommu = iommu;
166 irq_iommu->irte_index = index;
167 irq_iommu->sub_handle = subhandle;
168 irq_iommu->irte_mask = 0;
b6fcb33a 169
96f8e98b 170 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33a
SS
171
172 return 0;
173}
174
263b5e86 175static int modify_irte(int irq, struct irte *irte_modified)
b6fcb33a 176{
d585d060 177 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
b6fcb33a 178 struct intel_iommu *iommu;
4c5502b1 179 unsigned long flags;
d585d060
TG
180 struct irte *irte;
181 int rc, index;
b6fcb33a 182
d585d060 183 if (!irq_iommu)
b6fcb33a 184 return -1;
d585d060 185
96f8e98b 186 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
b6fcb33a 187
e420dfb4 188 iommu = irq_iommu->iommu;
b6fcb33a 189
e420dfb4 190 index = irq_iommu->irte_index + irq_iommu->sub_handle;
b6fcb33a
SS
191 irte = &iommu->ir_table->base[index];
192
c513b67e
LT
193 set_64bit(&irte->low, irte_modified->low);
194 set_64bit(&irte->high, irte_modified->high);
b6fcb33a
SS
195 __iommu_flush_cache(iommu, irte, sizeof(*irte));
196
704126ad 197 rc = qi_flush_iec(iommu, index, 0);
96f8e98b 198 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
704126ad
YZ
199
200 return rc;
b6fcb33a
SS
201}
202
263b5e86 203static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
20f3097b
SS
204{
205 int i;
206
207 for (i = 0; i < MAX_HPET_TBS; i++)
208 if (ir_hpet[i].id == hpet_id)
209 return ir_hpet[i].iommu;
210 return NULL;
211}
212
263b5e86 213static struct intel_iommu *map_ioapic_to_ir(int apic)
89027d35
SS
214{
215 int i;
216
217 for (i = 0; i < MAX_IO_APICS; i++)
218 if (ir_ioapic[i].id == apic)
219 return ir_ioapic[i].iommu;
220 return NULL;
221}
222
263b5e86 223static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
75c46fa6
SS
224{
225 struct dmar_drhd_unit *drhd;
226
227 drhd = dmar_find_matched_drhd_unit(dev);
228 if (!drhd)
229 return NULL;
230
231 return drhd->iommu;
232}
233
c4658b4e
WH
234static int clear_entries(struct irq_2_iommu *irq_iommu)
235{
236 struct irte *start, *entry, *end;
237 struct intel_iommu *iommu;
238 int index;
239
240 if (irq_iommu->sub_handle)
241 return 0;
242
243 iommu = irq_iommu->iommu;
244 index = irq_iommu->irte_index + irq_iommu->sub_handle;
245
246 start = iommu->ir_table->base + index;
247 end = start + (1 << irq_iommu->irte_mask);
248
249 for (entry = start; entry < end; entry++) {
c513b67e
LT
250 set_64bit(&entry->low, 0);
251 set_64bit(&entry->high, 0);
c4658b4e
WH
252 }
253
254 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
255}
256
9d619f65 257static int free_irte(int irq)
b6fcb33a 258{
d585d060 259 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
4c5502b1 260 unsigned long flags;
d585d060 261 int rc;
b6fcb33a 262
d585d060 263 if (!irq_iommu)
b6fcb33a 264 return -1;
d585d060 265
96f8e98b 266 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
b6fcb33a 267
c4658b4e 268 rc = clear_entries(irq_iommu);
b6fcb33a 269
e420dfb4
YL
270 irq_iommu->iommu = NULL;
271 irq_iommu->irte_index = 0;
272 irq_iommu->sub_handle = 0;
273 irq_iommu->irte_mask = 0;
b6fcb33a 274
96f8e98b 275 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33a 276
704126ad 277 return rc;
b6fcb33a
SS
278}
279
f007e99c
WH
280/*
281 * source validation type
282 */
283#define SVT_NO_VERIFY 0x0 /* no verification is required */
25985edc 284#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
f007e99c
WH
285#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
286
287/*
288 * source-id qualifier
289 */
290#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
291#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
292 * the third least significant bit
293 */
294#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
295 * the second and third least significant bits
296 */
297#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
298 * the least three significant bits
299 */
300
301/*
302 * set SVT, SQ and SID fields of irte to verify
303 * source ids of interrupt requests
304 */
305static void set_irte_sid(struct irte *irte, unsigned int svt,
306 unsigned int sq, unsigned int sid)
307{
d1423d56
CW
308 if (disable_sourceid_checking)
309 svt = SVT_NO_VERIFY;
f007e99c
WH
310 irte->svt = svt;
311 irte->sq = sq;
312 irte->sid = sid;
313}
314
263b5e86 315static int set_ioapic_sid(struct irte *irte, int apic)
f007e99c
WH
316{
317 int i;
318 u16 sid = 0;
319
320 if (!irte)
321 return -1;
322
323 for (i = 0; i < MAX_IO_APICS; i++) {
324 if (ir_ioapic[i].id == apic) {
325 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
326 break;
327 }
328 }
329
330 if (sid == 0) {
331 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
332 return -1;
333 }
334
335 set_irte_sid(irte, 1, 0, sid);
336
337 return 0;
338}
339
263b5e86 340static int set_hpet_sid(struct irte *irte, u8 id)
20f3097b
SS
341{
342 int i;
343 u16 sid = 0;
344
345 if (!irte)
346 return -1;
347
348 for (i = 0; i < MAX_HPET_TBS; i++) {
349 if (ir_hpet[i].id == id) {
350 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
351 break;
352 }
353 }
354
355 if (sid == 0) {
356 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
357 return -1;
358 }
359
360 /*
361 * Should really use SQ_ALL_16. Some platforms are broken.
362 * While we figure out the right quirks for these broken platforms, use
363 * SQ_13_IGNORE_3 for now.
364 */
365 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
366
367 return 0;
368}
369
263b5e86 370static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
f007e99c
WH
371{
372 struct pci_dev *bridge;
373
374 if (!irte || !dev)
375 return -1;
376
377 /* PCIe device or Root Complex integrated PCI device */
5f4d91a1 378 if (pci_is_pcie(dev) || !dev->bus->parent) {
f007e99c
WH
379 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
380 (dev->bus->number << 8) | dev->devfn);
381 return 0;
382 }
383
384 bridge = pci_find_upstream_pcie_bridge(dev);
385 if (bridge) {
45e829ea 386 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
f007e99c
WH
387 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
388 (bridge->bus->number << 8) | dev->bus->number);
389 else /* this is a legacy PCI bridge */
390 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
391 (bridge->bus->number << 8) | bridge->devfn);
392 }
393
394 return 0;
395}
396
95a02e97 397static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
2ae21010
SS
398{
399 u64 addr;
c416daa9 400 u32 sts;
2ae21010
SS
401 unsigned long flags;
402
403 addr = virt_to_phys((void *)iommu->ir_table->base);
404
1f5b3c3f 405 raw_spin_lock_irqsave(&iommu->register_lock, flags);
2ae21010
SS
406
407 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
408 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
409
410 /* Set interrupt-remapping table pointer */
161fde08 411 iommu->gcmd |= DMA_GCMD_SIRTP;
c416daa9 412 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
2ae21010
SS
413
414 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
415 readl, (sts & DMA_GSTS_IRTPS), sts);
1f5b3c3f 416 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
2ae21010
SS
417
418 /*
419 * global invalidation of interrupt entry cache before enabling
420 * interrupt-remapping.
421 */
422 qi_global_iec(iommu);
423
1f5b3c3f 424 raw_spin_lock_irqsave(&iommu->register_lock, flags);
2ae21010
SS
425
426 /* Enable interrupt-remapping */
2ae21010 427 iommu->gcmd |= DMA_GCMD_IRE;
c416daa9 428 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
2ae21010
SS
429
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRES), sts);
432
1f5b3c3f 433 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
2ae21010
SS
434}
435
436
95a02e97 437static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
2ae21010
SS
438{
439 struct ir_table *ir_table;
440 struct page *pages;
441
442 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
fa4b57cc 443 GFP_ATOMIC);
2ae21010
SS
444
445 if (!iommu->ir_table)
446 return -ENOMEM;
447
824cd75b
SS
448 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
449 INTR_REMAP_PAGE_ORDER);
2ae21010
SS
450
451 if (!pages) {
452 printk(KERN_ERR "failed to allocate pages of order %d\n",
453 INTR_REMAP_PAGE_ORDER);
454 kfree(iommu->ir_table);
455 return -ENOMEM;
456 }
457
458 ir_table->base = page_address(pages);
459
95a02e97 460 iommu_set_irq_remapping(iommu, mode);
2ae21010
SS
461 return 0;
462}
463
eba67e5d
SS
464/*
465 * Disable Interrupt Remapping.
466 */
95a02e97 467static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
eba67e5d
SS
468{
469 unsigned long flags;
470 u32 sts;
471
472 if (!ecap_ir_support(iommu->ecap))
473 return;
474
b24696bc
FY
475 /*
476 * global invalidation of interrupt entry cache before disabling
477 * interrupt-remapping.
478 */
479 qi_global_iec(iommu);
480
1f5b3c3f 481 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
482
483 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
484 if (!(sts & DMA_GSTS_IRES))
485 goto end;
486
487 iommu->gcmd &= ~DMA_GCMD_IRE;
488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
489
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 readl, !(sts & DMA_GSTS_IRES), sts);
492
493end:
1f5b3c3f 494 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
495}
496
41750d31
SS
497static int __init dmar_x2apic_optout(void)
498{
499 struct acpi_table_dmar *dmar;
500 dmar = (struct acpi_table_dmar *)dmar_tbl;
501 if (!dmar || no_x2apic_optout)
502 return 0;
503 return dmar->flags & DMAR_X2APIC_OPT_OUT;
504}
505
95a02e97 506static int __init intel_irq_remapping_supported(void)
93758238
WH
507{
508 struct dmar_drhd_unit *drhd;
509
95a02e97 510 if (disable_irq_remap)
03ea8155
WH
511 return 0;
512
074835f0
YS
513 if (!dmar_ir_support())
514 return 0;
515
93758238
WH
516 for_each_drhd_unit(drhd) {
517 struct intel_iommu *iommu = drhd->iommu;
518
519 if (!ecap_ir_support(iommu->ecap))
520 return 0;
521 }
522
523 return 1;
524}
525
95a02e97 526static int __init intel_enable_irq_remapping(void)
2ae21010
SS
527{
528 struct dmar_drhd_unit *drhd;
529 int setup = 0;
41750d31 530 int eim = 0;
2ae21010 531
e936d077
YS
532 if (parse_ioapics_under_ir() != 1) {
533 printk(KERN_INFO "Not enable interrupt remapping\n");
534 return -1;
535 }
536
41750d31
SS
537 if (x2apic_supported()) {
538 eim = !dmar_x2apic_optout();
539 WARN(!eim, KERN_WARNING
540 "Your BIOS is broken and requested that x2apic be disabled\n"
541 "This will leave your machine vulnerable to irq-injection attacks\n"
542 "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
543 }
544
1531a6a6
SS
545 for_each_drhd_unit(drhd) {
546 struct intel_iommu *iommu = drhd->iommu;
547
34aaaa94
HW
548 /*
549 * If the queued invalidation is already initialized,
550 * shouldn't disable it.
551 */
552 if (iommu->qi)
553 continue;
554
1531a6a6
SS
555 /*
556 * Clear previous faults.
557 */
558 dmar_fault(-1, iommu);
559
560 /*
561 * Disable intr remapping and queued invalidation, if already
562 * enabled prior to OS handover.
563 */
95a02e97 564 iommu_disable_irq_remapping(iommu);
1531a6a6
SS
565
566 dmar_disable_qi(iommu);
567 }
568
2ae21010
SS
569 /*
570 * check for the Interrupt-remapping support
571 */
572 for_each_drhd_unit(drhd) {
573 struct intel_iommu *iommu = drhd->iommu;
574
575 if (!ecap_ir_support(iommu->ecap))
576 continue;
577
578 if (eim && !ecap_eim_support(iommu->ecap)) {
579 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
580 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
581 return -1;
582 }
583 }
584
585 /*
586 * Enable queued invalidation for all the DRHD's.
587 */
588 for_each_drhd_unit(drhd) {
589 int ret;
590 struct intel_iommu *iommu = drhd->iommu;
591 ret = dmar_enable_qi(iommu);
592
593 if (ret) {
594 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
595 " invalidation, ecap %Lx, ret %d\n",
596 drhd->reg_base_addr, iommu->ecap, ret);
597 return -1;
598 }
599 }
600
601 /*
602 * Setup Interrupt-remapping for all the DRHD's now.
603 */
604 for_each_drhd_unit(drhd) {
605 struct intel_iommu *iommu = drhd->iommu;
606
607 if (!ecap_ir_support(iommu->ecap))
608 continue;
609
95a02e97 610 if (intel_setup_irq_remapping(iommu, eim))
2ae21010
SS
611 goto error;
612
613 setup = 1;
614 }
615
616 if (!setup)
617 goto error;
618
95a02e97 619 irq_remapping_enabled = 1;
41750d31 620 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
2ae21010 621
41750d31 622 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
2ae21010
SS
623
624error:
625 /*
626 * handle error condition gracefully here!
627 */
628 return -1;
629}
ad3ad3f6 630
20f3097b
SS
631static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
632 struct intel_iommu *iommu)
633{
634 struct acpi_dmar_pci_path *path;
635 u8 bus;
636 int count;
637
638 bus = scope->bus;
639 path = (struct acpi_dmar_pci_path *)(scope + 1);
640 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
641 / sizeof(struct acpi_dmar_pci_path);
642
643 while (--count > 0) {
644 /*
645 * Access PCI directly due to the PCI
646 * subsystem isn't initialized yet.
647 */
648 bus = read_pci_config_byte(bus, path->dev, path->fn,
649 PCI_SECONDARY_BUS);
650 path++;
651 }
652 ir_hpet[ir_hpet_num].bus = bus;
653 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
654 ir_hpet[ir_hpet_num].iommu = iommu;
655 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
656 ir_hpet_num++;
657}
658
f007e99c
WH
659static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
660 struct intel_iommu *iommu)
661{
662 struct acpi_dmar_pci_path *path;
663 u8 bus;
664 int count;
665
666 bus = scope->bus;
667 path = (struct acpi_dmar_pci_path *)(scope + 1);
668 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
669 / sizeof(struct acpi_dmar_pci_path);
670
671 while (--count > 0) {
672 /*
673 * Access PCI directly due to the PCI
674 * subsystem isn't initialized yet.
675 */
676 bus = read_pci_config_byte(bus, path->dev, path->fn,
677 PCI_SECONDARY_BUS);
678 path++;
679 }
680
681 ir_ioapic[ir_ioapic_num].bus = bus;
682 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
683 ir_ioapic[ir_ioapic_num].iommu = iommu;
684 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
685 ir_ioapic_num++;
686}
687
20f3097b
SS
688static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
689 struct intel_iommu *iommu)
ad3ad3f6
SS
690{
691 struct acpi_dmar_hardware_unit *drhd;
692 struct acpi_dmar_device_scope *scope;
693 void *start, *end;
694
695 drhd = (struct acpi_dmar_hardware_unit *)header;
696
697 start = (void *)(drhd + 1);
698 end = ((void *)drhd) + header->length;
699
700 while (start < end) {
701 scope = start;
702 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
703 if (ir_ioapic_num == MAX_IO_APICS) {
704 printk(KERN_WARNING "Exceeded Max IO APICS\n");
705 return -1;
706 }
707
680a7524
YL
708 printk(KERN_INFO "IOAPIC id %d under DRHD base "
709 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
710 drhd->address, iommu->seq_id);
ad3ad3f6 711
f007e99c 712 ir_parse_one_ioapic_scope(scope, iommu);
20f3097b
SS
713 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
714 if (ir_hpet_num == MAX_HPET_TBS) {
715 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
716 return -1;
717 }
718
719 printk(KERN_INFO "HPET id %d under DRHD base"
720 " 0x%Lx\n", scope->enumeration_id,
721 drhd->address);
722
723 ir_parse_one_hpet_scope(scope, iommu);
ad3ad3f6
SS
724 }
725 start += scope->length;
726 }
727
728 return 0;
729}
730
731/*
732 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
733 * hardware unit.
734 */
735int __init parse_ioapics_under_ir(void)
736{
737 struct dmar_drhd_unit *drhd;
738 int ir_supported = 0;
739
740 for_each_drhd_unit(drhd) {
741 struct intel_iommu *iommu = drhd->iommu;
742
743 if (ecap_ir_support(iommu->ecap)) {
20f3097b 744 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
ad3ad3f6
SS
745 return -1;
746
747 ir_supported = 1;
748 }
749 }
750
751 if (ir_supported && ir_ioapic_num != nr_ioapics) {
752 printk(KERN_WARNING
753 "Not all IO-APIC's listed under remapping hardware\n");
754 return -1;
755 }
756
757 return ir_supported;
758}
b24696bc 759
61ed26e3 760int __init ir_dev_scope_init(void)
c2c7286a 761{
95a02e97 762 if (!irq_remapping_enabled)
c2c7286a
SS
763 return 0;
764
765 return dmar_dev_scope_init();
766}
767rootfs_initcall(ir_dev_scope_init);
768
95a02e97 769static void disable_irq_remapping(void)
b24696bc
FY
770{
771 struct dmar_drhd_unit *drhd;
772 struct intel_iommu *iommu = NULL;
773
774 /*
775 * Disable Interrupt-remapping for all the DRHD's now.
776 */
777 for_each_iommu(iommu, drhd) {
778 if (!ecap_ir_support(iommu->ecap))
779 continue;
780
95a02e97 781 iommu_disable_irq_remapping(iommu);
b24696bc
FY
782 }
783}
784
95a02e97 785static int reenable_irq_remapping(int eim)
b24696bc
FY
786{
787 struct dmar_drhd_unit *drhd;
788 int setup = 0;
789 struct intel_iommu *iommu = NULL;
790
791 for_each_iommu(iommu, drhd)
792 if (iommu->qi)
793 dmar_reenable_qi(iommu);
794
795 /*
796 * Setup Interrupt-remapping for all the DRHD's now.
797 */
798 for_each_iommu(iommu, drhd) {
799 if (!ecap_ir_support(iommu->ecap))
800 continue;
801
802 /* Set up interrupt remapping for iommu.*/
95a02e97 803 iommu_set_irq_remapping(iommu, eim);
b24696bc
FY
804 setup = 1;
805 }
806
807 if (!setup)
808 goto error;
809
810 return 0;
811
812error:
813 /*
814 * handle error condition gracefully here!
815 */
816 return -1;
817}
818
0c3f173a
JR
819static void prepare_irte(struct irte *irte, int vector,
820 unsigned int dest)
821{
822 memset(irte, 0, sizeof(*irte));
823
824 irte->present = 1;
825 irte->dst_mode = apic->irq_dest_mode;
826 /*
827 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
828 * actual level or edge trigger will be setup in the IO-APIC
829 * RTE. This will help simplify level triggered irq migration.
830 * For more details, see the comments (in io_apic.c) explainig IO-APIC
831 * irq migration in the presence of interrupt-remapping.
832 */
833 irte->trigger_mode = 0;
834 irte->dlvry_mode = apic->irq_delivery_mode;
835 irte->vector = vector;
836 irte->dest_id = IRTE_DEST(dest);
837 irte->redir_hint = 1;
838}
839
840static int intel_setup_ioapic_entry(int irq,
841 struct IO_APIC_route_entry *route_entry,
842 unsigned int destination, int vector,
843 struct io_apic_irq_attr *attr)
844{
845 int ioapic_id = mpc_ioapic_id(attr->ioapic);
846 struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
847 struct IR_IO_APIC_route_entry *entry;
848 struct irte irte;
849 int index;
850
851 if (!iommu) {
852 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
853 return -ENODEV;
854 }
855
856 entry = (struct IR_IO_APIC_route_entry *)route_entry;
857
858 index = alloc_irte(iommu, irq, 1);
859 if (index < 0) {
860 pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
861 return -ENOMEM;
862 }
863
864 prepare_irte(&irte, vector, destination);
865
866 /* Set source-id of interrupt request */
867 set_ioapic_sid(&irte, ioapic_id);
868
869 modify_irte(irq, &irte);
870
871 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
872 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
873 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
874 "Avail:%X Vector:%02X Dest:%08X "
875 "SID:%04X SQ:%X SVT:%X)\n",
876 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
877 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
878 irte.avail, irte.vector, irte.dest_id,
879 irte.sid, irte.sq, irte.svt);
880
881 memset(entry, 0, sizeof(*entry));
882
883 entry->index2 = (index >> 15) & 0x1;
884 entry->zero = 0;
885 entry->format = 1;
886 entry->index = (index & 0x7fff);
887 /*
888 * IO-APIC RTE will be configured with virtual vector.
889 * irq handler will do the explicit EOI to the io-apic.
890 */
891 entry->vector = attr->ioapic_pin;
892 entry->mask = 0; /* enable IRQ */
893 entry->trigger = attr->trigger;
894 entry->polarity = attr->polarity;
895
896 /* Mask level triggered irqs.
897 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
898 */
899 if (attr->trigger)
900 entry->mask = 1;
901
902 return 0;
903}
904
82b481e8 905#ifdef CONFIG_SMP
4c1bad6a
JR
906/*
907 * Migrate the IO-APIC irq in the presence of intr-remapping.
908 *
909 * For both level and edge triggered, irq migration is a simple atomic
910 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
911 *
912 * For level triggered, we eliminate the io-apic RTE modification (with the
913 * updated vector information), by using a virtual vector (io-apic pin number).
914 * Real vector that is used for interrupting cpu will be coming from
915 * the interrupt-remapping table entry.
916 *
917 * As the migration is a simple atomic update of IRTE, the same mechanism
918 * is used to migrate MSI irq's in the presence of interrupt-remapping.
919 */
920static int
921intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
922 bool force)
923{
924 struct irq_cfg *cfg = data->chip_data;
925 unsigned int dest, irq = data->irq;
926 struct irte irte;
ff164324 927 int err;
4c1bad6a
JR
928
929 if (!cpumask_intersects(mask, cpu_online_mask))
930 return -EINVAL;
931
932 if (get_irte(irq, &irte))
933 return -EBUSY;
934
ff164324
AG
935 err = assign_irq_vector(irq, cfg, mask);
936 if (err)
937 return err;
4c1bad6a 938
ff164324
AG
939 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
940 if (err) {
941 if (assign_irq_vector(irq, cfg, data->affinity));
942 pr_err("Failed to recover vector for irq %d\n", irq);
943 return err;
944 }
4c1bad6a
JR
945
946 irte.vector = cfg->vector;
947 irte.dest_id = IRTE_DEST(dest);
948
949 /*
950 * Atomically updates the IRTE with the new destination, vector
951 * and flushes the interrupt entry cache.
952 */
953 modify_irte(irq, &irte);
954
955 /*
956 * After this point, all the interrupts will start arriving
957 * at the new destination. So, time to cleanup the previous
958 * vector allocation.
959 */
960 if (cfg->move_in_progress)
961 send_cleanup_vector(cfg);
962
963 cpumask_copy(data->affinity, mask);
964 return 0;
965}
82b481e8 966#endif
0c3f173a 967
5e2b930b
JR
968static void intel_compose_msi_msg(struct pci_dev *pdev,
969 unsigned int irq, unsigned int dest,
970 struct msi_msg *msg, u8 hpet_id)
971{
972 struct irq_cfg *cfg;
973 struct irte irte;
c558df4a 974 u16 sub_handle = 0;
5e2b930b
JR
975 int ir_index;
976
977 cfg = irq_get_chip_data(irq);
978
979 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
980 BUG_ON(ir_index == -1);
981
982 prepare_irte(&irte, cfg->vector, dest);
983
984 /* Set source-id of interrupt request */
985 if (pdev)
986 set_msi_sid(&irte, pdev);
987 else
988 set_hpet_sid(&irte, hpet_id);
989
990 modify_irte(irq, &irte);
991
992 msg->address_hi = MSI_ADDR_BASE_HI;
993 msg->data = sub_handle;
994 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
995 MSI_ADDR_IR_SHV |
996 MSI_ADDR_IR_INDEX1(ir_index) |
997 MSI_ADDR_IR_INDEX2(ir_index);
998}
999
1000/*
1001 * Map the PCI dev to the corresponding remapping hardware unit
1002 * and allocate 'nvec' consecutive interrupt-remapping table entries
1003 * in it.
1004 */
1005static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1006{
1007 struct intel_iommu *iommu;
1008 int index;
1009
1010 iommu = map_dev_to_ir(dev);
1011 if (!iommu) {
1012 printk(KERN_ERR
1013 "Unable to map PCI %s to iommu\n", pci_name(dev));
1014 return -ENOENT;
1015 }
1016
1017 index = alloc_irte(iommu, irq, nvec);
1018 if (index < 0) {
1019 printk(KERN_ERR
1020 "Unable to allocate %d IRTE for PCI %s\n", nvec,
1021 pci_name(dev));
1022 return -ENOSPC;
1023 }
1024 return index;
1025}
1026
1027static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1028 int index, int sub_handle)
1029{
1030 struct intel_iommu *iommu;
1031
1032 iommu = map_dev_to_ir(pdev);
1033 if (!iommu)
1034 return -ENOENT;
1035 /*
1036 * setup the mapping between the irq and the IRTE
1037 * base index, the sub_handle pointing to the
1038 * appropriate interrupt remap table entry.
1039 */
1040 set_irte_irq(irq, iommu, index, sub_handle);
1041
1042 return 0;
1043}
1044
1045static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
1046{
1047 struct intel_iommu *iommu = map_hpet_to_ir(id);
1048 int index;
1049
1050 if (!iommu)
1051 return -1;
1052
1053 index = alloc_irte(iommu, irq, 1);
1054 if (index < 0)
1055 return -1;
1056
1057 return 0;
1058}
1059
736baef4 1060struct irq_remap_ops intel_irq_remap_ops = {
95a02e97
SS
1061 .supported = intel_irq_remapping_supported,
1062 .prepare = dmar_table_init,
1063 .enable = intel_enable_irq_remapping,
1064 .disable = disable_irq_remapping,
1065 .reenable = reenable_irq_remapping,
4f3d8b67 1066 .enable_faulting = enable_drhd_fault_handling,
0c3f173a 1067 .setup_ioapic_entry = intel_setup_ioapic_entry,
82b481e8 1068#ifdef CONFIG_SMP
4c1bad6a 1069 .set_affinity = intel_ioapic_set_affinity,
82b481e8 1070#endif
9d619f65 1071 .free_irq = free_irte,
5e2b930b
JR
1072 .compose_msi_msg = intel_compose_msi_msg,
1073 .msi_alloc_irq = intel_msi_alloc_irq,
1074 .msi_setup_irq = intel_msi_setup_irq,
1075 .setup_hpet_msi = intel_setup_hpet_msi,
736baef4 1076};