2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t
)(struct acpi_dmar_header
*, void *);
48 struct dmar_res_callback
{
49 dmar_res_handler_t cb
[ACPI_DMAR_TYPE_RESERVED
];
50 void *arg
[ACPI_DMAR_TYPE_RESERVED
];
51 bool ignore_unhandled
;
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock
);
68 LIST_HEAD(dmar_drhd_units
);
70 struct acpi_table_header
* __initdata dmar_tbl
;
71 static int dmar_dev_scope_status
= 1;
72 static unsigned long dmar_seq_ids
[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED
)];
74 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
75 static void free_iommu(struct intel_iommu
*iommu
);
77 extern const struct iommu_ops intel_iommu_ops
;
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
85 if (drhd
->include_all
)
86 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
88 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
91 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
93 struct acpi_dmar_device_scope
*scope
;
98 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_NAMESPACE
||
99 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
100 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
102 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
103 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
104 pr_warn("Unsupported device scope\n");
106 start
+= scope
->length
;
111 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
114 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
117 struct device
*tmp_dev
;
119 if (*devices
&& *cnt
) {
120 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf
[64];
132 static struct dmar_pci_notify_info
*
133 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
138 struct dmar_pci_notify_info
*info
;
140 BUG_ON(dev
->is_virtfn
);
142 /* Only generate path[] for device addition event */
143 if (event
== BUS_NOTIFY_ADD_DEVICE
)
144 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
147 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
148 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
149 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
151 info
= kzalloc(size
, GFP_KERNEL
);
153 pr_warn("Out of memory when allocating notify_info "
154 "for %s.\n", pci_name(dev
));
155 if (dmar_dev_scope_status
== 0)
156 dmar_dev_scope_status
= -ENOMEM
;
163 info
->seg
= pci_domain_nr(dev
->bus
);
165 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
166 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
) {
168 info
->path
[level
].bus
= tmp
->bus
->number
;
169 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
170 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
171 if (pci_is_root_bus(tmp
->bus
))
172 info
->bus
= tmp
->bus
->number
;
179 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
181 if ((void *)info
!= dmar_pci_notify_info_buf
)
185 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
186 struct acpi_dmar_pci_path
*path
, int count
)
190 if (info
->bus
!= bus
)
192 if (info
->level
!= count
)
195 for (i
= 0; i
< count
; i
++) {
196 if (path
[i
].device
!= info
->path
[i
].device
||
197 path
[i
].function
!= info
->path
[i
].function
)
209 if (bus
== info
->path
[i
].bus
&&
210 path
[0].device
== info
->path
[i
].device
&&
211 path
[0].function
== info
->path
[i
].function
) {
212 pr_info(FW_BUG
"RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
213 bus
, path
[0].device
, path
[0].function
);
220 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
221 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
222 void *start
, void*end
, u16 segment
,
223 struct dmar_dev_scope
*devices
,
227 struct device
*tmp
, *dev
= &info
->dev
->dev
;
228 struct acpi_dmar_device_scope
*scope
;
229 struct acpi_dmar_pci_path
*path
;
231 if (segment
!= info
->seg
)
234 for (; start
< end
; start
+= scope
->length
) {
236 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
237 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
240 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
241 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
242 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
246 * We expect devices with endpoint scope to have normal PCI
247 * headers, and devices with bridge scope to have bridge PCI
248 * headers. However PCI NTB devices may be listed in the
249 * DMAR table with bridge scope, even though they have a
250 * normal PCI header. NTB devices are identified by class
251 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
252 * for this special case.
254 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
255 info
->dev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
) ||
256 (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
&&
257 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
&&
258 info
->dev
->class >> 8 != PCI_CLASS_BRIDGE_OTHER
))) {
259 pr_warn("Device scope type does not match for %s\n",
260 pci_name(info
->dev
));
264 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
266 devices
[i
].bus
= info
->dev
->bus
->number
;
267 devices
[i
].devfn
= info
->dev
->devfn
;
268 rcu_assign_pointer(devices
[i
].dev
,
272 BUG_ON(i
>= devices_cnt
);
278 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
279 struct dmar_dev_scope
*devices
, int count
)
284 if (info
->seg
!= segment
)
287 for_each_active_dev_scope(devices
, count
, index
, tmp
)
288 if (tmp
== &info
->dev
->dev
) {
289 RCU_INIT_POINTER(devices
[index
].dev
, NULL
);
298 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
301 struct dmar_drhd_unit
*dmaru
;
302 struct acpi_dmar_hardware_unit
*drhd
;
304 for_each_drhd_unit(dmaru
) {
305 if (dmaru
->include_all
)
308 drhd
= container_of(dmaru
->hdr
,
309 struct acpi_dmar_hardware_unit
, header
);
310 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
311 ((void *)drhd
) + drhd
->header
.length
,
313 dmaru
->devices
, dmaru
->devices_cnt
);
318 ret
= dmar_iommu_notify_scope_dev(info
);
319 if (ret
< 0 && dmar_dev_scope_status
== 0)
320 dmar_dev_scope_status
= ret
;
325 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
327 struct dmar_drhd_unit
*dmaru
;
329 for_each_drhd_unit(dmaru
)
330 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
331 dmaru
->devices
, dmaru
->devices_cnt
))
333 dmar_iommu_notify_scope_dev(info
);
336 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
337 unsigned long action
, void *data
)
339 struct pci_dev
*pdev
= to_pci_dev(data
);
340 struct dmar_pci_notify_info
*info
;
342 /* Only care about add/remove events for physical functions.
343 * For VFs we actually do the lookup based on the corresponding
344 * PF in device_to_iommu() anyway. */
347 if (action
!= BUS_NOTIFY_ADD_DEVICE
&&
348 action
!= BUS_NOTIFY_REMOVED_DEVICE
)
351 info
= dmar_alloc_pci_notify_info(pdev
, action
);
355 down_write(&dmar_global_lock
);
356 if (action
== BUS_NOTIFY_ADD_DEVICE
)
357 dmar_pci_bus_add_dev(info
);
358 else if (action
== BUS_NOTIFY_REMOVED_DEVICE
)
359 dmar_pci_bus_del_dev(info
);
360 up_write(&dmar_global_lock
);
362 dmar_free_pci_notify_info(info
);
367 static struct notifier_block dmar_pci_bus_nb
= {
368 .notifier_call
= dmar_pci_bus_notifier
,
372 static struct dmar_drhd_unit
*
373 dmar_find_dmaru(struct acpi_dmar_hardware_unit
*drhd
)
375 struct dmar_drhd_unit
*dmaru
;
377 list_for_each_entry_rcu(dmaru
, &dmar_drhd_units
, list
)
378 if (dmaru
->segment
== drhd
->segment
&&
379 dmaru
->reg_base_addr
== drhd
->address
)
386 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
387 * structure which uniquely represent one DMA remapping hardware unit
388 * present in the platform
390 static int dmar_parse_one_drhd(struct acpi_dmar_header
*header
, void *arg
)
392 struct acpi_dmar_hardware_unit
*drhd
;
393 struct dmar_drhd_unit
*dmaru
;
396 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
397 dmaru
= dmar_find_dmaru(drhd
);
401 dmaru
= kzalloc(sizeof(*dmaru
) + header
->length
, GFP_KERNEL
);
406 * If header is allocated from slab by ACPI _DSM method, we need to
407 * copy the content because the memory buffer will be freed on return.
409 dmaru
->hdr
= (void *)(dmaru
+ 1);
410 memcpy(dmaru
->hdr
, header
, header
->length
);
411 dmaru
->reg_base_addr
= drhd
->address
;
412 dmaru
->segment
= drhd
->segment
;
413 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
414 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
415 ((void *)drhd
) + drhd
->header
.length
,
416 &dmaru
->devices_cnt
);
417 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
422 ret
= alloc_iommu(dmaru
);
424 dmar_free_dev_scope(&dmaru
->devices
,
425 &dmaru
->devices_cnt
);
429 dmar_register_drhd_unit(dmaru
);
438 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
440 if (dmaru
->devices
&& dmaru
->devices_cnt
)
441 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
443 free_iommu(dmaru
->iommu
);
447 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
,
450 struct acpi_dmar_andd
*andd
= (void *)header
;
452 /* Check for NUL termination within the designated length */
453 if (strnlen(andd
->device_name
, header
->length
- 8) == header
->length
- 8) {
454 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
455 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
456 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
457 dmi_get_system_info(DMI_BIOS_VENDOR
),
458 dmi_get_system_info(DMI_BIOS_VERSION
),
459 dmi_get_system_info(DMI_PRODUCT_VERSION
));
462 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
468 #ifdef CONFIG_ACPI_NUMA
469 static int dmar_parse_one_rhsa(struct acpi_dmar_header
*header
, void *arg
)
471 struct acpi_dmar_rhsa
*rhsa
;
472 struct dmar_drhd_unit
*drhd
;
474 rhsa
= (struct acpi_dmar_rhsa
*)header
;
475 for_each_drhd_unit(drhd
) {
476 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
477 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
479 if (!node_online(node
))
481 drhd
->iommu
->node
= node
;
486 1, TAINT_FIRMWARE_WORKAROUND
,
487 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
488 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
490 dmi_get_system_info(DMI_BIOS_VENDOR
),
491 dmi_get_system_info(DMI_BIOS_VERSION
),
492 dmi_get_system_info(DMI_PRODUCT_VERSION
));
497 #define dmar_parse_one_rhsa dmar_res_noop
501 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
503 struct acpi_dmar_hardware_unit
*drhd
;
504 struct acpi_dmar_reserved_memory
*rmrr
;
505 struct acpi_dmar_atsr
*atsr
;
506 struct acpi_dmar_rhsa
*rhsa
;
508 switch (header
->type
) {
509 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
510 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
512 pr_info("DRHD base: %#016Lx flags: %#x\n",
513 (unsigned long long)drhd
->address
, drhd
->flags
);
515 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
516 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
518 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
519 (unsigned long long)rmrr
->base_address
,
520 (unsigned long long)rmrr
->end_address
);
522 case ACPI_DMAR_TYPE_ROOT_ATS
:
523 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
524 pr_info("ATSR flags: %#x\n", atsr
->flags
);
526 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY
:
527 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
528 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
529 (unsigned long long)rhsa
->base_address
,
530 rhsa
->proximity_domain
);
532 case ACPI_DMAR_TYPE_NAMESPACE
:
533 /* We don't print this here because we need to sanity-check
534 it first. So print it in dmar_parse_one_andd() instead. */
540 * dmar_table_detect - checks to see if the platform supports DMAR devices
542 static int __init
dmar_table_detect(void)
544 acpi_status status
= AE_OK
;
546 /* if we could find DMAR table, then there are DMAR devices */
547 status
= acpi_get_table(ACPI_SIG_DMAR
, 0, &dmar_tbl
);
549 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
550 pr_warn("Unable to map DMAR\n");
551 status
= AE_NOT_FOUND
;
554 return (ACPI_SUCCESS(status
) ? 1 : 0);
557 static int dmar_walk_remapping_entries(struct acpi_dmar_header
*start
,
558 size_t len
, struct dmar_res_callback
*cb
)
561 struct acpi_dmar_header
*iter
, *next
;
562 struct acpi_dmar_header
*end
= ((void *)start
) + len
;
564 for (iter
= start
; iter
< end
&& ret
== 0; iter
= next
) {
565 next
= (void *)iter
+ iter
->length
;
566 if (iter
->length
== 0) {
567 /* Avoid looping forever on bad ACPI tables */
568 pr_debug(FW_BUG
"Invalid 0-length structure\n");
570 } else if (next
> end
) {
571 /* Avoid passing table end */
572 pr_warn(FW_BUG
"Record passes table end\n");
578 dmar_table_print_dmar_entry(iter
);
580 if (iter
->type
>= ACPI_DMAR_TYPE_RESERVED
) {
581 /* continue for forward compatibility */
582 pr_debug("Unknown DMAR structure type %d\n",
584 } else if (cb
->cb
[iter
->type
]) {
585 ret
= cb
->cb
[iter
->type
](iter
, cb
->arg
[iter
->type
]);
586 } else if (!cb
->ignore_unhandled
) {
587 pr_warn("No handler for DMAR structure type %d\n",
596 static inline int dmar_walk_dmar_table(struct acpi_table_dmar
*dmar
,
597 struct dmar_res_callback
*cb
)
599 return dmar_walk_remapping_entries((void *)(dmar
+ 1),
600 dmar
->header
.length
- sizeof(*dmar
), cb
);
604 * parse_dmar_table - parses the DMA reporting table
607 parse_dmar_table(void)
609 struct acpi_table_dmar
*dmar
;
612 struct dmar_res_callback cb
= {
614 .ignore_unhandled
= true,
615 .arg
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &drhd_count
,
616 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_parse_one_drhd
,
617 .cb
[ACPI_DMAR_TYPE_RESERVED_MEMORY
] = &dmar_parse_one_rmrr
,
618 .cb
[ACPI_DMAR_TYPE_ROOT_ATS
] = &dmar_parse_one_atsr
,
619 .cb
[ACPI_DMAR_TYPE_HARDWARE_AFFINITY
] = &dmar_parse_one_rhsa
,
620 .cb
[ACPI_DMAR_TYPE_NAMESPACE
] = &dmar_parse_one_andd
,
624 * Do it again, earlier dmar_tbl mapping could be mapped with
630 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
631 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
633 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
635 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
639 if (dmar
->width
< PAGE_SHIFT
- 1) {
640 pr_warn("Invalid DMAR haw\n");
644 pr_info("Host address width %d\n", dmar
->width
+ 1);
645 ret
= dmar_walk_dmar_table(dmar
, &cb
);
646 if (ret
== 0 && drhd_count
== 0)
647 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
652 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
653 int cnt
, struct pci_dev
*dev
)
659 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
660 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
663 /* Check our parent */
664 dev
= dev
->bus
->self
;
670 struct dmar_drhd_unit
*
671 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
673 struct dmar_drhd_unit
*dmaru
;
674 struct acpi_dmar_hardware_unit
*drhd
;
676 dev
= pci_physfn(dev
);
679 for_each_drhd_unit(dmaru
) {
680 drhd
= container_of(dmaru
->hdr
,
681 struct acpi_dmar_hardware_unit
,
684 if (dmaru
->include_all
&&
685 drhd
->segment
== pci_domain_nr(dev
->bus
))
688 if (dmar_pci_device_match(dmaru
->devices
,
689 dmaru
->devices_cnt
, dev
))
699 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
700 struct acpi_device
*adev
)
702 struct dmar_drhd_unit
*dmaru
;
703 struct acpi_dmar_hardware_unit
*drhd
;
704 struct acpi_dmar_device_scope
*scope
;
707 struct acpi_dmar_pci_path
*path
;
709 for_each_drhd_unit(dmaru
) {
710 drhd
= container_of(dmaru
->hdr
,
711 struct acpi_dmar_hardware_unit
,
714 for (scope
= (void *)(drhd
+ 1);
715 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
716 scope
= ((void *)scope
) + scope
->length
) {
717 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_NAMESPACE
)
719 if (scope
->enumeration_id
!= device_number
)
722 path
= (void *)(scope
+ 1);
723 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
724 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
725 scope
->bus
, path
->device
, path
->function
);
726 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
728 dmaru
->devices
[i
].bus
= scope
->bus
;
729 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
731 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
732 get_device(&adev
->dev
));
735 BUG_ON(i
>= dmaru
->devices_cnt
);
738 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
739 device_number
, dev_name(&adev
->dev
));
742 static int __init
dmar_acpi_dev_scope_init(void)
744 struct acpi_dmar_andd
*andd
;
746 if (dmar_tbl
== NULL
)
749 for (andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
750 ((unsigned long)andd
) < ((unsigned long)dmar_tbl
) + dmar_tbl
->length
;
751 andd
= ((void *)andd
) + andd
->header
.length
) {
752 if (andd
->header
.type
== ACPI_DMAR_TYPE_NAMESPACE
) {
754 struct acpi_device
*adev
;
756 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
759 pr_err("Failed to find handle for ACPI object %s\n",
763 if (acpi_bus_get_device(h
, &adev
)) {
764 pr_err("Failed to get device for ACPI object %s\n",
768 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
774 int __init
dmar_dev_scope_init(void)
776 struct pci_dev
*dev
= NULL
;
777 struct dmar_pci_notify_info
*info
;
779 if (dmar_dev_scope_status
!= 1)
780 return dmar_dev_scope_status
;
782 if (list_empty(&dmar_drhd_units
)) {
783 dmar_dev_scope_status
= -ENODEV
;
785 dmar_dev_scope_status
= 0;
787 dmar_acpi_dev_scope_init();
789 for_each_pci_dev(dev
) {
793 info
= dmar_alloc_pci_notify_info(dev
,
794 BUS_NOTIFY_ADD_DEVICE
);
796 return dmar_dev_scope_status
;
798 dmar_pci_bus_add_dev(info
);
799 dmar_free_pci_notify_info(info
);
803 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
806 return dmar_dev_scope_status
;
810 int __init
dmar_table_init(void)
812 static int dmar_table_initialized
;
815 if (dmar_table_initialized
== 0) {
816 ret
= parse_dmar_table();
819 pr_info("Parse DMAR table failure.\n");
820 } else if (list_empty(&dmar_drhd_units
)) {
821 pr_info("No DMAR devices found\n");
826 dmar_table_initialized
= ret
;
828 dmar_table_initialized
= 1;
831 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
834 static void warn_invalid_dmar(u64 addr
, const char *message
)
837 1, TAINT_FIRMWARE_WORKAROUND
,
838 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
839 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
841 dmi_get_system_info(DMI_BIOS_VENDOR
),
842 dmi_get_system_info(DMI_BIOS_VERSION
),
843 dmi_get_system_info(DMI_PRODUCT_VERSION
));
847 dmar_validate_one_drhd(struct acpi_dmar_header
*entry
, void *arg
)
849 struct acpi_dmar_hardware_unit
*drhd
;
853 drhd
= (void *)entry
;
854 if (!drhd
->address
) {
855 warn_invalid_dmar(0, "");
860 addr
= ioremap(drhd
->address
, VTD_PAGE_SIZE
);
862 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
864 pr_warn("Can't validate DRHD address: %llx\n", drhd
->address
);
868 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
869 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
874 early_iounmap(addr
, VTD_PAGE_SIZE
);
876 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
877 warn_invalid_dmar(drhd
->address
, " returns all ones");
884 int __init
detect_intel_iommu(void)
887 struct dmar_res_callback validate_drhd_cb
= {
888 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_validate_one_drhd
,
889 .ignore_unhandled
= true,
892 down_write(&dmar_global_lock
);
893 ret
= dmar_table_detect();
895 ret
= !dmar_walk_dmar_table((struct acpi_table_dmar
*)dmar_tbl
,
897 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
899 /* Make sure ACS will be enabled */
905 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
908 acpi_put_table(dmar_tbl
);
910 up_write(&dmar_global_lock
);
912 return ret
? 1 : -ENODEV
;
916 static void unmap_iommu(struct intel_iommu
*iommu
)
919 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
923 * map_iommu: map the iommu's registers
924 * @iommu: the iommu to map
925 * @phys_addr: the physical address of the base resgister
927 * Memory map the iommu's registers. Start w/ a single page, and
928 * possibly expand if that turns out to be insufficent.
930 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
934 iommu
->reg_phys
= phys_addr
;
935 iommu
->reg_size
= VTD_PAGE_SIZE
;
937 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
938 pr_err("Can't reserve memory\n");
943 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
945 pr_err("Can't map the region\n");
950 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
951 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
953 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
955 warn_invalid_dmar(phys_addr
, " returns all ones");
959 /* the registers might be more than one page */
960 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
961 cap_max_fault_reg_offset(iommu
->cap
));
962 map_size
= VTD_PAGE_ALIGN(map_size
);
963 if (map_size
> iommu
->reg_size
) {
965 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
966 iommu
->reg_size
= map_size
;
967 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
969 pr_err("Can't reserve memory\n");
973 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
975 pr_err("Can't map the region\n");
986 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
991 static int dmar_alloc_seq_id(struct intel_iommu
*iommu
)
993 iommu
->seq_id
= find_first_zero_bit(dmar_seq_ids
,
994 DMAR_UNITS_SUPPORTED
);
995 if (iommu
->seq_id
>= DMAR_UNITS_SUPPORTED
) {
998 set_bit(iommu
->seq_id
, dmar_seq_ids
);
999 sprintf(iommu
->name
, "dmar%d", iommu
->seq_id
);
1002 return iommu
->seq_id
;
1005 static void dmar_free_seq_id(struct intel_iommu
*iommu
)
1007 if (iommu
->seq_id
>= 0) {
1008 clear_bit(iommu
->seq_id
, dmar_seq_ids
);
1013 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
1015 struct intel_iommu
*iommu
;
1021 if (!drhd
->reg_base_addr
) {
1022 warn_invalid_dmar(0, "");
1026 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1030 if (dmar_alloc_seq_id(iommu
) < 0) {
1031 pr_err("Failed to allocate seq_id\n");
1036 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
1038 pr_err("Failed to map %s\n", iommu
->name
);
1039 goto error_free_seq_id
;
1043 agaw
= iommu_calculate_agaw(iommu
);
1045 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1049 msagaw
= iommu_calculate_max_sagaw(iommu
);
1051 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1056 iommu
->msagaw
= msagaw
;
1057 iommu
->segment
= drhd
->segment
;
1061 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
1062 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1064 (unsigned long long)drhd
->reg_base_addr
,
1065 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
1066 (unsigned long long)iommu
->cap
,
1067 (unsigned long long)iommu
->ecap
);
1069 /* Reflect status in gcmd */
1070 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1071 if (sts
& DMA_GSTS_IRES
)
1072 iommu
->gcmd
|= DMA_GCMD_IRE
;
1073 if (sts
& DMA_GSTS_TES
)
1074 iommu
->gcmd
|= DMA_GCMD_TE
;
1075 if (sts
& DMA_GSTS_QIES
)
1076 iommu
->gcmd
|= DMA_GCMD_QIE
;
1078 raw_spin_lock_init(&iommu
->register_lock
);
1080 if (intel_iommu_enabled
) {
1081 err
= iommu_device_sysfs_add(&iommu
->iommu
, NULL
,
1087 iommu_device_set_ops(&iommu
->iommu
, &intel_iommu_ops
);
1089 err
= iommu_device_register(&iommu
->iommu
);
1094 drhd
->iommu
= iommu
;
1101 dmar_free_seq_id(iommu
);
1107 static void free_iommu(struct intel_iommu
*iommu
)
1109 iommu_device_sysfs_remove(&iommu
->iommu
);
1110 iommu_device_unregister(&iommu
->iommu
);
1113 if (iommu
->pr_irq
) {
1114 free_irq(iommu
->pr_irq
, iommu
);
1115 dmar_free_hwirq(iommu
->pr_irq
);
1118 free_irq(iommu
->irq
, iommu
);
1119 dmar_free_hwirq(iommu
->irq
);
1124 free_page((unsigned long)iommu
->qi
->desc
);
1125 kfree(iommu
->qi
->desc_status
);
1132 dmar_free_seq_id(iommu
);
1137 * Reclaim all the submitted descriptors which have completed its work.
1139 static inline void reclaim_free_desc(struct q_inval
*qi
)
1141 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1142 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1143 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1144 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1149 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1153 struct q_inval
*qi
= iommu
->qi
;
1154 int wait_index
= (index
+ 1) % QI_LENGTH
;
1156 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1159 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1162 * If IQE happens, the head points to the descriptor associated
1163 * with the error. No new descriptors are fetched until the IQE
1166 if (fault
& DMA_FSTS_IQE
) {
1167 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1168 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1169 pr_err("VT-d detected invalid descriptor: "
1170 "low=%llx, high=%llx\n",
1171 (unsigned long long)qi
->desc
[index
].low
,
1172 (unsigned long long)qi
->desc
[index
].high
);
1173 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1174 sizeof(struct qi_desc
));
1175 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1181 * If ITE happens, all pending wait_desc commands are aborted.
1182 * No new descriptors are fetched until the ITE is cleared.
1184 if (fault
& DMA_FSTS_ITE
) {
1185 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1186 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1188 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1189 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1191 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1194 if (qi
->desc_status
[head
] == QI_IN_USE
)
1195 qi
->desc_status
[head
] = QI_ABORT
;
1196 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1197 } while (head
!= tail
);
1199 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1203 if (fault
& DMA_FSTS_ICE
)
1204 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1210 * Submit the queued invalidation descriptor to the remapping
1211 * hardware unit and wait for its completion.
1213 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1216 struct q_inval
*qi
= iommu
->qi
;
1217 struct qi_desc
*hw
, wait_desc
;
1218 int wait_index
, index
;
1219 unsigned long flags
;
1229 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1230 while (qi
->free_cnt
< 3) {
1231 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1233 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1236 index
= qi
->free_head
;
1237 wait_index
= (index
+ 1) % QI_LENGTH
;
1239 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1243 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1244 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1245 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1247 hw
[wait_index
] = wait_desc
;
1249 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1253 * update the HW tail register indicating the presence of
1256 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1258 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1260 * We will leave the interrupts disabled, to prevent interrupt
1261 * context to queue another cmd while a cmd is already submitted
1262 * and waiting for completion on this cpu. This is to avoid
1263 * a deadlock where the interrupt context can wait indefinitely
1264 * for free slots in the queue.
1266 rc
= qi_check_fault(iommu
, index
);
1270 raw_spin_unlock(&qi
->q_lock
);
1272 raw_spin_lock(&qi
->q_lock
);
1275 qi
->desc_status
[index
] = QI_DONE
;
1277 reclaim_free_desc(qi
);
1278 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1287 * Flush the global interrupt entry cache.
1289 void qi_global_iec(struct intel_iommu
*iommu
)
1291 struct qi_desc desc
;
1293 desc
.low
= QI_IEC_TYPE
;
1296 /* should never fail */
1297 qi_submit_sync(&desc
, iommu
);
1300 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1303 struct qi_desc desc
;
1305 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1306 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1309 qi_submit_sync(&desc
, iommu
);
1312 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1313 unsigned int size_order
, u64 type
)
1317 struct qi_desc desc
;
1320 if (cap_write_drain(iommu
->cap
))
1323 if (cap_read_drain(iommu
->cap
))
1326 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1327 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1328 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1329 | QI_IOTLB_AM(size_order
);
1331 qi_submit_sync(&desc
, iommu
);
1334 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1335 u64 addr
, unsigned mask
)
1337 struct qi_desc desc
;
1340 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1341 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1342 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1344 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1346 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1349 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1352 qi_submit_sync(&desc
, iommu
);
1356 * Disable Queued Invalidation interface.
1358 void dmar_disable_qi(struct intel_iommu
*iommu
)
1360 unsigned long flags
;
1362 cycles_t start_time
= get_cycles();
1364 if (!ecap_qis(iommu
->ecap
))
1367 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1369 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1370 if (!(sts
& DMA_GSTS_QIES
))
1374 * Give a chance to HW to complete the pending invalidation requests.
1376 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1377 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1378 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1381 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1382 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1384 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1385 !(sts
& DMA_GSTS_QIES
), sts
);
1387 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1391 * Enable queued invalidation.
1393 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1396 unsigned long flags
;
1397 struct q_inval
*qi
= iommu
->qi
;
1399 qi
->free_head
= qi
->free_tail
= 0;
1400 qi
->free_cnt
= QI_LENGTH
;
1402 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1404 /* write zero to the tail reg */
1405 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1407 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1409 iommu
->gcmd
|= DMA_GCMD_QIE
;
1410 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1412 /* Make sure hardware complete it */
1413 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1415 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1419 * Enable Queued Invalidation interface. This is a must to support
1420 * interrupt-remapping. Also used by DMA-remapping, which replaces
1421 * register based IOTLB invalidation.
1423 int dmar_enable_qi(struct intel_iommu
*iommu
)
1426 struct page
*desc_page
;
1428 if (!ecap_qis(iommu
->ecap
))
1432 * queued invalidation is already setup and enabled.
1437 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1444 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1451 qi
->desc
= page_address(desc_page
);
1453 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1454 if (!qi
->desc_status
) {
1455 free_page((unsigned long) qi
->desc
);
1461 raw_spin_lock_init(&qi
->q_lock
);
1463 __dmar_enable_qi(iommu
);
1468 /* iommu interrupt handling. Most stuff are MSI-like. */
1476 static const char *dma_remap_fault_reasons
[] =
1479 "Present bit in root entry is clear",
1480 "Present bit in context entry is clear",
1481 "Invalid context entry",
1482 "Access beyond MGAW",
1483 "PTE Write access is not set",
1484 "PTE Read access is not set",
1485 "Next page table ptr is invalid",
1486 "Root table address invalid",
1487 "Context table ptr is invalid",
1488 "non-zero reserved fields in RTP",
1489 "non-zero reserved fields in CTP",
1490 "non-zero reserved fields in PTE",
1491 "PCE for translation request specifies blocking",
1494 static const char *irq_remap_fault_reasons
[] =
1496 "Detected reserved fields in the decoded interrupt-remapped request",
1497 "Interrupt index exceeded the interrupt-remapping table size",
1498 "Present field in the IRTE entry is clear",
1499 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1500 "Detected reserved fields in the IRTE entry",
1501 "Blocked a compatibility format interrupt request",
1502 "Blocked an interrupt request due to source-id verification failure",
1505 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1507 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1508 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1509 *fault_type
= INTR_REMAP
;
1510 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1511 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1512 *fault_type
= DMA_REMAP
;
1513 return dma_remap_fault_reasons
[fault_reason
];
1515 *fault_type
= UNKNOWN
;
1521 static inline int dmar_msi_reg(struct intel_iommu
*iommu
, int irq
)
1523 if (iommu
->irq
== irq
)
1524 return DMAR_FECTL_REG
;
1525 else if (iommu
->pr_irq
== irq
)
1526 return DMAR_PECTL_REG
;
1531 void dmar_msi_unmask(struct irq_data
*data
)
1533 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1534 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1538 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1539 writel(0, iommu
->reg
+ reg
);
1540 /* Read a reg to force flush the post write */
1541 readl(iommu
->reg
+ reg
);
1542 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1545 void dmar_msi_mask(struct irq_data
*data
)
1547 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1548 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1552 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1553 writel(DMA_FECTL_IM
, iommu
->reg
+ reg
);
1554 /* Read a reg to force flush the post write */
1555 readl(iommu
->reg
+ reg
);
1556 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1559 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1561 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1562 int reg
= dmar_msi_reg(iommu
, irq
);
1565 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1566 writel(msg
->data
, iommu
->reg
+ reg
+ 4);
1567 writel(msg
->address_lo
, iommu
->reg
+ reg
+ 8);
1568 writel(msg
->address_hi
, iommu
->reg
+ reg
+ 12);
1569 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1572 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1574 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1575 int reg
= dmar_msi_reg(iommu
, irq
);
1578 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1579 msg
->data
= readl(iommu
->reg
+ reg
+ 4);
1580 msg
->address_lo
= readl(iommu
->reg
+ reg
+ 8);
1581 msg
->address_hi
= readl(iommu
->reg
+ reg
+ 12);
1582 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1585 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1586 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1591 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1593 if (fault_type
== INTR_REMAP
)
1594 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1595 source_id
>> 8, PCI_SLOT(source_id
& 0xFF),
1596 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1597 fault_reason
, reason
);
1599 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1600 type
? "DMA Read" : "DMA Write",
1601 source_id
>> 8, PCI_SLOT(source_id
& 0xFF),
1602 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1606 #define PRIMARY_FAULT_REG_LEN (16)
1607 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1609 struct intel_iommu
*iommu
= dev_id
;
1610 int reg
, fault_index
;
1614 static DEFINE_RATELIMIT_STATE(rs
,
1615 DEFAULT_RATELIMIT_INTERVAL
,
1616 DEFAULT_RATELIMIT_BURST
);
1618 /* Disable printing, simply clear the fault when ratelimited */
1619 ratelimited
= !__ratelimit(&rs
);
1621 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1622 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1623 if (fault_status
&& !ratelimited
)
1624 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1626 /* TBD: ignore advanced fault log currently */
1627 if (!(fault_status
& DMA_FSTS_PPF
))
1630 fault_index
= dma_fsts_fault_record_index(fault_status
);
1631 reg
= cap_fault_reg_offset(iommu
->cap
);
1639 /* highest 32 bits */
1640 data
= readl(iommu
->reg
+ reg
+
1641 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1642 if (!(data
& DMA_FRCD_F
))
1646 fault_reason
= dma_frcd_fault_reason(data
);
1647 type
= dma_frcd_type(data
);
1649 data
= readl(iommu
->reg
+ reg
+
1650 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1651 source_id
= dma_frcd_source_id(data
);
1653 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1654 fault_index
* PRIMARY_FAULT_REG_LEN
);
1655 guest_addr
= dma_frcd_page_addr(guest_addr
);
1658 /* clear the fault */
1659 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1660 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1662 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1665 dmar_fault_do_one(iommu
, type
, fault_reason
,
1666 source_id
, guest_addr
);
1669 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1671 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1674 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1677 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1681 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1686 * Check if the fault interrupt is already initialized.
1691 irq
= dmar_alloc_hwirq(iommu
->seq_id
, iommu
->node
, iommu
);
1695 pr_err("No free IRQ vectors\n");
1699 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1701 pr_err("Can't request irq\n");
1705 int __init
enable_drhd_fault_handling(void)
1707 struct dmar_drhd_unit
*drhd
;
1708 struct intel_iommu
*iommu
;
1711 * Enable fault control interrupt.
1713 for_each_iommu(iommu
, drhd
) {
1715 int ret
= dmar_set_interrupt(iommu
);
1718 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1719 (unsigned long long)drhd
->reg_base_addr
, ret
);
1724 * Clear any previous faults.
1726 dmar_fault(iommu
->irq
, iommu
);
1727 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1728 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1735 * Re-enable Queued Invalidation interface.
1737 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1739 if (!ecap_qis(iommu
->ecap
))
1746 * First disable queued invalidation.
1748 dmar_disable_qi(iommu
);
1750 * Then enable queued invalidation again. Since there is no pending
1751 * invalidation requests now, it's safe to re-enable queued
1754 __dmar_enable_qi(iommu
);
1760 * Check interrupt remapping support in DMAR table description.
1762 int __init
dmar_ir_support(void)
1764 struct acpi_table_dmar
*dmar
;
1765 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1768 return dmar
->flags
& 0x1;
1771 /* Check whether DMAR units are in use */
1772 static inline bool dmar_in_use(void)
1774 return irq_remapping_enabled
|| intel_iommu_enabled
;
1777 static int __init
dmar_free_unused_resources(void)
1779 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1784 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1785 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1787 down_write(&dmar_global_lock
);
1788 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1789 list_del(&dmaru
->list
);
1790 dmar_free_drhd(dmaru
);
1792 up_write(&dmar_global_lock
);
1797 late_initcall(dmar_free_unused_resources
);
1798 IOMMU_INIT_POST(detect_intel_iommu
);
1801 * DMAR Hotplug Support
1802 * For more details, please refer to Intel(R) Virtualization Technology
1803 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1804 * "Remapping Hardware Unit Hot Plug".
1806 static u8 dmar_hp_uuid
[] = {
1807 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1808 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1812 * Currently there's only one revision and BIOS will not check the revision id,
1813 * so use 0 for safety.
1815 #define DMAR_DSM_REV_ID 0
1816 #define DMAR_DSM_FUNC_DRHD 1
1817 #define DMAR_DSM_FUNC_ATSR 2
1818 #define DMAR_DSM_FUNC_RHSA 3
1820 static inline bool dmar_detect_dsm(acpi_handle handle
, int func
)
1822 return acpi_check_dsm(handle
, dmar_hp_uuid
, DMAR_DSM_REV_ID
, 1 << func
);
1825 static int dmar_walk_dsm_resource(acpi_handle handle
, int func
,
1826 dmar_res_handler_t handler
, void *arg
)
1829 union acpi_object
*obj
;
1830 struct acpi_dmar_header
*start
;
1831 struct dmar_res_callback callback
;
1832 static int res_type
[] = {
1833 [DMAR_DSM_FUNC_DRHD
] = ACPI_DMAR_TYPE_HARDWARE_UNIT
,
1834 [DMAR_DSM_FUNC_ATSR
] = ACPI_DMAR_TYPE_ROOT_ATS
,
1835 [DMAR_DSM_FUNC_RHSA
] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY
,
1838 if (!dmar_detect_dsm(handle
, func
))
1841 obj
= acpi_evaluate_dsm_typed(handle
, dmar_hp_uuid
, DMAR_DSM_REV_ID
,
1842 func
, NULL
, ACPI_TYPE_BUFFER
);
1846 memset(&callback
, 0, sizeof(callback
));
1847 callback
.cb
[res_type
[func
]] = handler
;
1848 callback
.arg
[res_type
[func
]] = arg
;
1849 start
= (struct acpi_dmar_header
*)obj
->buffer
.pointer
;
1850 ret
= dmar_walk_remapping_entries(start
, obj
->buffer
.length
, &callback
);
1857 static int dmar_hp_add_drhd(struct acpi_dmar_header
*header
, void *arg
)
1860 struct dmar_drhd_unit
*dmaru
;
1862 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1866 ret
= dmar_ir_hotplug(dmaru
, true);
1868 ret
= dmar_iommu_hotplug(dmaru
, true);
1873 static int dmar_hp_remove_drhd(struct acpi_dmar_header
*header
, void *arg
)
1877 struct dmar_drhd_unit
*dmaru
;
1879 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1884 * All PCI devices managed by this unit should have been destroyed.
1886 if (!dmaru
->include_all
&& dmaru
->devices
&& dmaru
->devices_cnt
) {
1887 for_each_active_dev_scope(dmaru
->devices
,
1888 dmaru
->devices_cnt
, i
, dev
)
1892 ret
= dmar_ir_hotplug(dmaru
, false);
1894 ret
= dmar_iommu_hotplug(dmaru
, false);
1899 static int dmar_hp_release_drhd(struct acpi_dmar_header
*header
, void *arg
)
1901 struct dmar_drhd_unit
*dmaru
;
1903 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1905 list_del_rcu(&dmaru
->list
);
1907 dmar_free_drhd(dmaru
);
1913 static int dmar_hotplug_insert(acpi_handle handle
)
1918 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1919 &dmar_validate_one_drhd
, (void *)1);
1923 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1924 &dmar_parse_one_drhd
, (void *)&drhd_count
);
1925 if (ret
== 0 && drhd_count
== 0) {
1926 pr_warn(FW_BUG
"No DRHD structures in buffer returned by _DSM method\n");
1932 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_RHSA
,
1933 &dmar_parse_one_rhsa
, NULL
);
1937 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1938 &dmar_parse_one_atsr
, NULL
);
1942 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1943 &dmar_hp_add_drhd
, NULL
);
1947 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1948 &dmar_hp_remove_drhd
, NULL
);
1950 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1951 &dmar_release_one_atsr
, NULL
);
1953 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1954 &dmar_hp_release_drhd
, NULL
);
1959 static int dmar_hotplug_remove(acpi_handle handle
)
1963 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1964 &dmar_check_one_atsr
, NULL
);
1968 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1969 &dmar_hp_remove_drhd
, NULL
);
1971 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1972 &dmar_release_one_atsr
, NULL
));
1973 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1974 &dmar_hp_release_drhd
, NULL
));
1976 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1977 &dmar_hp_add_drhd
, NULL
);
1983 static acpi_status
dmar_get_dsm_handle(acpi_handle handle
, u32 lvl
,
1984 void *context
, void **retval
)
1986 acpi_handle
*phdl
= retval
;
1988 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
1990 return AE_CTRL_TERMINATE
;
1996 static int dmar_device_hotplug(acpi_handle handle
, bool insert
)
1999 acpi_handle tmp
= NULL
;
2005 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
2008 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
,
2010 dmar_get_dsm_handle
,
2012 if (ACPI_FAILURE(status
)) {
2013 pr_warn("Failed to locate _DSM method.\n");
2020 down_write(&dmar_global_lock
);
2022 ret
= dmar_hotplug_insert(tmp
);
2024 ret
= dmar_hotplug_remove(tmp
);
2025 up_write(&dmar_global_lock
);
2030 int dmar_device_add(acpi_handle handle
)
2032 return dmar_device_hotplug(handle
, true);
2035 int dmar_device_remove(acpi_handle handle
)
2037 return dmar_device_hotplug(handle
, false);