]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/iommu/dmar.c
Merge branch 'drm-etnaviv-fixes' of https://git.pengutronix.de/git/lst/linux into...
[mirror_ubuntu-zesty-kernel.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
9f10e5bf 29#define pr_fmt(fmt) "DMAR: " fmt
e9071b0b 30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
a5459cfe 41#include <linux/iommu.h>
8a8f422d 42#include <asm/irq_remapping.h>
4db77ff3 43#include <asm/iommu_table.h>
10e5247f 44
078e1ee2
JR
45#include "irq_remapping.h"
46
c2a0b538
JL
47typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48struct dmar_res_callback {
49 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
50 void *arg[ACPI_DMAR_TYPE_RESERVED];
51 bool ignore_unhandled;
52 bool print_entry;
53};
54
3a5670e8
JL
55/*
56 * Assumptions:
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
62 *
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
10e5247f 66 */
3a5670e8 67DECLARE_RWSEM(dmar_global_lock);
10e5247f 68LIST_HEAD(dmar_drhd_units);
10e5247f 69
41750d31 70struct acpi_table_header * __initdata dmar_tbl;
2e455289 71static int dmar_dev_scope_status = 1;
78d8e704 72static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
10e5247f 73
694835dc 74static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7 75static void free_iommu(struct intel_iommu *iommu);
694835dc 76
6b197249 77static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
10e5247f
KA
78{
79 /*
80 * add INCLUDE_ALL at the tail, so scan the list will find it at
81 * the very end.
82 */
83 if (drhd->include_all)
0e242612 84 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
10e5247f 85 else
0e242612 86 list_add_rcu(&drhd->list, &dmar_drhd_units);
10e5247f
KA
87}
88
bb3a6b78 89void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
10e5247f
KA
90{
91 struct acpi_dmar_device_scope *scope;
10e5247f
KA
92
93 *cnt = 0;
94 while (start < end) {
95 scope = start;
83118b0d 96 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
07cb52ff 97 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
10e5247f
KA
98 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
99 (*cnt)++;
ae3e7f3a
LC
100 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
101 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 102 pr_warn("Unsupported device scope\n");
5715f0f9 103 }
10e5247f
KA
104 start += scope->length;
105 }
106 if (*cnt == 0)
bb3a6b78
JL
107 return NULL;
108
832bd858 109 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
bb3a6b78
JL
110}
111
832bd858 112void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
ada4d4b2 113{
b683b230 114 int i;
832bd858 115 struct device *tmp_dev;
b683b230 116
ada4d4b2 117 if (*devices && *cnt) {
b683b230 118 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
832bd858 119 put_device(tmp_dev);
ada4d4b2 120 kfree(*devices);
ada4d4b2 121 }
0e242612
JL
122
123 *devices = NULL;
124 *cnt = 0;
ada4d4b2
JL
125}
126
59ce0515
JL
127/* Optimize out kzalloc()/kfree() for normal cases */
128static char dmar_pci_notify_info_buf[64];
129
130static struct dmar_pci_notify_info *
131dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
132{
133 int level = 0;
134 size_t size;
135 struct pci_dev *tmp;
136 struct dmar_pci_notify_info *info;
137
138 BUG_ON(dev->is_virtfn);
139
140 /* Only generate path[] for device addition event */
141 if (event == BUS_NOTIFY_ADD_DEVICE)
142 for (tmp = dev; tmp; tmp = tmp->bus->self)
143 level++;
144
145 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
146 if (size <= sizeof(dmar_pci_notify_info_buf)) {
147 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
148 } else {
149 info = kzalloc(size, GFP_KERNEL);
150 if (!info) {
151 pr_warn("Out of memory when allocating notify_info "
152 "for %s.\n", pci_name(dev));
2e455289
JL
153 if (dmar_dev_scope_status == 0)
154 dmar_dev_scope_status = -ENOMEM;
59ce0515
JL
155 return NULL;
156 }
157 }
158
159 info->event = event;
160 info->dev = dev;
161 info->seg = pci_domain_nr(dev->bus);
162 info->level = level;
163 if (event == BUS_NOTIFY_ADD_DEVICE) {
5ae0566a
JL
164 for (tmp = dev; tmp; tmp = tmp->bus->self) {
165 level--;
57384592 166 info->path[level].bus = tmp->bus->number;
59ce0515
JL
167 info->path[level].device = PCI_SLOT(tmp->devfn);
168 info->path[level].function = PCI_FUNC(tmp->devfn);
169 if (pci_is_root_bus(tmp->bus))
170 info->bus = tmp->bus->number;
171 }
172 }
173
174 return info;
175}
176
177static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
178{
179 if ((void *)info != dmar_pci_notify_info_buf)
180 kfree(info);
181}
182
183static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
184 struct acpi_dmar_pci_path *path, int count)
185{
186 int i;
187
188 if (info->bus != bus)
80f7b3d1 189 goto fallback;
59ce0515 190 if (info->level != count)
80f7b3d1 191 goto fallback;
59ce0515
JL
192
193 for (i = 0; i < count; i++) {
194 if (path[i].device != info->path[i].device ||
195 path[i].function != info->path[i].function)
80f7b3d1 196 goto fallback;
59ce0515
JL
197 }
198
199 return true;
80f7b3d1
JR
200
201fallback:
202
203 if (count != 1)
204 return false;
205
206 i = info->level - 1;
207 if (bus == info->path[i].bus &&
208 path[0].device == info->path[i].device &&
209 path[0].function == info->path[i].function) {
210 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
211 bus, path[0].device, path[0].function);
212 return true;
213 }
214
215 return false;
59ce0515
JL
216}
217
218/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
219int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
220 void *start, void*end, u16 segment,
832bd858
DW
221 struct dmar_dev_scope *devices,
222 int devices_cnt)
59ce0515
JL
223{
224 int i, level;
832bd858 225 struct device *tmp, *dev = &info->dev->dev;
59ce0515
JL
226 struct acpi_dmar_device_scope *scope;
227 struct acpi_dmar_pci_path *path;
228
229 if (segment != info->seg)
230 return 0;
231
232 for (; start < end; start += scope->length) {
233 scope = start;
234 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
235 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
236 continue;
237
238 path = (struct acpi_dmar_pci_path *)(scope + 1);
239 level = (scope->length - sizeof(*scope)) / sizeof(*path);
240 if (!dmar_match_pci_path(info, scope->bus, path, level))
241 continue;
242
ffb2d1eb
RD
243 /*
244 * We expect devices with endpoint scope to have normal PCI
245 * headers, and devices with bridge scope to have bridge PCI
246 * headers. However PCI NTB devices may be listed in the
247 * DMAR table with bridge scope, even though they have a
248 * normal PCI header. NTB devices are identified by class
249 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
250 * for this special case.
251 */
252 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
253 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
254 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
255 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
256 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
59ce0515 257 pr_warn("Device scope type does not match for %s\n",
832bd858 258 pci_name(info->dev));
59ce0515
JL
259 return -EINVAL;
260 }
261
262 for_each_dev_scope(devices, devices_cnt, i, tmp)
263 if (tmp == NULL) {
832bd858
DW
264 devices[i].bus = info->dev->bus->number;
265 devices[i].devfn = info->dev->devfn;
266 rcu_assign_pointer(devices[i].dev,
267 get_device(dev));
59ce0515
JL
268 return 1;
269 }
270 BUG_ON(i >= devices_cnt);
271 }
272
273 return 0;
274}
275
276int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
832bd858 277 struct dmar_dev_scope *devices, int count)
59ce0515
JL
278{
279 int index;
832bd858 280 struct device *tmp;
59ce0515
JL
281
282 if (info->seg != segment)
283 return 0;
284
285 for_each_active_dev_scope(devices, count, index, tmp)
832bd858 286 if (tmp == &info->dev->dev) {
eecbad7d 287 RCU_INIT_POINTER(devices[index].dev, NULL);
59ce0515 288 synchronize_rcu();
832bd858 289 put_device(tmp);
59ce0515
JL
290 return 1;
291 }
292
293 return 0;
294}
295
296static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
297{
298 int ret = 0;
299 struct dmar_drhd_unit *dmaru;
300 struct acpi_dmar_hardware_unit *drhd;
301
302 for_each_drhd_unit(dmaru) {
303 if (dmaru->include_all)
304 continue;
305
306 drhd = container_of(dmaru->hdr,
307 struct acpi_dmar_hardware_unit, header);
308 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
309 ((void *)drhd) + drhd->header.length,
310 dmaru->segment,
311 dmaru->devices, dmaru->devices_cnt);
312 if (ret != 0)
313 break;
314 }
315 if (ret >= 0)
316 ret = dmar_iommu_notify_scope_dev(info);
2e455289
JL
317 if (ret < 0 && dmar_dev_scope_status == 0)
318 dmar_dev_scope_status = ret;
59ce0515
JL
319
320 return ret;
321}
322
323static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
324{
325 struct dmar_drhd_unit *dmaru;
326
327 for_each_drhd_unit(dmaru)
328 if (dmar_remove_dev_scope(info, dmaru->segment,
329 dmaru->devices, dmaru->devices_cnt))
330 break;
331 dmar_iommu_notify_scope_dev(info);
332}
333
334static int dmar_pci_bus_notifier(struct notifier_block *nb,
335 unsigned long action, void *data)
336{
337 struct pci_dev *pdev = to_pci_dev(data);
338 struct dmar_pci_notify_info *info;
339
1c387188
AR
340 /* Only care about add/remove events for physical functions.
341 * For VFs we actually do the lookup based on the corresponding
342 * PF in device_to_iommu() anyway. */
59ce0515
JL
343 if (pdev->is_virtfn)
344 return NOTIFY_DONE;
e6a8c9b3
JR
345 if (action != BUS_NOTIFY_ADD_DEVICE &&
346 action != BUS_NOTIFY_REMOVED_DEVICE)
59ce0515
JL
347 return NOTIFY_DONE;
348
349 info = dmar_alloc_pci_notify_info(pdev, action);
350 if (!info)
351 return NOTIFY_DONE;
352
353 down_write(&dmar_global_lock);
354 if (action == BUS_NOTIFY_ADD_DEVICE)
355 dmar_pci_bus_add_dev(info);
e6a8c9b3 356 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
59ce0515
JL
357 dmar_pci_bus_del_dev(info);
358 up_write(&dmar_global_lock);
359
360 dmar_free_pci_notify_info(info);
361
362 return NOTIFY_OK;
363}
364
365static struct notifier_block dmar_pci_bus_nb = {
366 .notifier_call = dmar_pci_bus_notifier,
367 .priority = INT_MIN,
368};
369
6b197249
JL
370static struct dmar_drhd_unit *
371dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
372{
373 struct dmar_drhd_unit *dmaru;
374
375 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
376 if (dmaru->segment == drhd->segment &&
377 dmaru->reg_base_addr == drhd->address)
378 return dmaru;
379
380 return NULL;
381}
382
10e5247f
KA
383/**
384 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
385 * structure which uniquely represent one DMA remapping hardware unit
386 * present in the platform
387 */
6b197249 388static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
10e5247f
KA
389{
390 struct acpi_dmar_hardware_unit *drhd;
391 struct dmar_drhd_unit *dmaru;
392 int ret = 0;
10e5247f 393
e523b38e 394 drhd = (struct acpi_dmar_hardware_unit *)header;
6b197249
JL
395 dmaru = dmar_find_dmaru(drhd);
396 if (dmaru)
397 goto out;
398
399 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
10e5247f
KA
400 if (!dmaru)
401 return -ENOMEM;
402
6b197249
JL
403 /*
404 * If header is allocated from slab by ACPI _DSM method, we need to
405 * copy the content because the memory buffer will be freed on return.
406 */
407 dmaru->hdr = (void *)(dmaru + 1);
408 memcpy(dmaru->hdr, header, header->length);
10e5247f 409 dmaru->reg_base_addr = drhd->address;
276dbf99 410 dmaru->segment = drhd->segment;
10e5247f 411 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
07cb52ff
DW
412 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
413 ((void *)drhd) + drhd->header.length,
414 &dmaru->devices_cnt);
415 if (dmaru->devices_cnt && dmaru->devices == NULL) {
416 kfree(dmaru);
417 return -ENOMEM;
2e455289 418 }
10e5247f 419
1886e8a9
SS
420 ret = alloc_iommu(dmaru);
421 if (ret) {
07cb52ff
DW
422 dmar_free_dev_scope(&dmaru->devices,
423 &dmaru->devices_cnt);
1886e8a9
SS
424 kfree(dmaru);
425 return ret;
426 }
427 dmar_register_drhd_unit(dmaru);
c2a0b538 428
6b197249 429out:
c2a0b538
JL
430 if (arg)
431 (*(int *)arg)++;
432
1886e8a9
SS
433 return 0;
434}
435
a868e6b7
JL
436static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
437{
438 if (dmaru->devices && dmaru->devices_cnt)
439 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
440 if (dmaru->iommu)
441 free_iommu(dmaru->iommu);
442 kfree(dmaru);
443}
444
c2a0b538
JL
445static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
446 void *arg)
e625b4a9
DW
447{
448 struct acpi_dmar_andd *andd = (void *)header;
449
450 /* Check for NUL termination within the designated length */
83118b0d 451 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
e625b4a9
DW
452 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
453 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
454 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
455 dmi_get_system_info(DMI_BIOS_VENDOR),
456 dmi_get_system_info(DMI_BIOS_VERSION),
457 dmi_get_system_info(DMI_PRODUCT_VERSION));
458 return -EINVAL;
459 }
460 pr_info("ANDD device: %x name: %s\n", andd->device_number,
83118b0d 461 andd->device_name);
e625b4a9
DW
462
463 return 0;
464}
465
aa697079 466#ifdef CONFIG_ACPI_NUMA
6b197249 467static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
ee34b32d
SS
468{
469 struct acpi_dmar_rhsa *rhsa;
470 struct dmar_drhd_unit *drhd;
471
472 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 473 for_each_drhd_unit(drhd) {
ee34b32d
SS
474 if (drhd->reg_base_addr == rhsa->base_address) {
475 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
476
477 if (!node_online(node))
478 node = -1;
479 drhd->iommu->node = node;
aa697079
DW
480 return 0;
481 }
ee34b32d 482 }
fd0c8894
BH
483 WARN_TAINT(
484 1, TAINT_FIRMWARE_WORKAROUND,
485 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
486 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
487 drhd->reg_base_addr,
488 dmi_get_system_info(DMI_BIOS_VENDOR),
489 dmi_get_system_info(DMI_BIOS_VERSION),
490 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 491
aa697079 492 return 0;
ee34b32d 493}
c2a0b538
JL
494#else
495#define dmar_parse_one_rhsa dmar_res_noop
aa697079 496#endif
ee34b32d 497
10e5247f
KA
498static void __init
499dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
500{
501 struct acpi_dmar_hardware_unit *drhd;
502 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 503 struct acpi_dmar_atsr *atsr;
17b60977 504 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
505
506 switch (header->type) {
507 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
508 drhd = container_of(header, struct acpi_dmar_hardware_unit,
509 header);
e9071b0b 510 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 511 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
512 break;
513 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
514 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
515 header);
e9071b0b 516 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
517 (unsigned long long)rmrr->base_address,
518 (unsigned long long)rmrr->end_address);
10e5247f 519 break;
83118b0d 520 case ACPI_DMAR_TYPE_ROOT_ATS:
aa5d2b51 521 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 522 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 523 break;
83118b0d 524 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
17b60977 525 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 526 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
527 (unsigned long long)rhsa->base_address,
528 rhsa->proximity_domain);
529 break;
83118b0d 530 case ACPI_DMAR_TYPE_NAMESPACE:
e625b4a9
DW
531 /* We don't print this here because we need to sanity-check
532 it first. So print it in dmar_parse_one_andd() instead. */
533 break;
10e5247f
KA
534 }
535}
536
f6dd5c31
YL
537/**
538 * dmar_table_detect - checks to see if the platform supports DMAR devices
539 */
540static int __init dmar_table_detect(void)
541{
542 acpi_status status = AE_OK;
543
544 /* if we could find DMAR table, then there are DMAR devices */
6b11d1d6 545 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
f6dd5c31
YL
546
547 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 548 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
549 status = AE_NOT_FOUND;
550 }
551
552 return (ACPI_SUCCESS(status) ? 1 : 0);
553}
aaa9d1dd 554
c2a0b538
JL
555static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
556 size_t len, struct dmar_res_callback *cb)
557{
558 int ret = 0;
559 struct acpi_dmar_header *iter, *next;
560 struct acpi_dmar_header *end = ((void *)start) + len;
561
562 for (iter = start; iter < end && ret == 0; iter = next) {
563 next = (void *)iter + iter->length;
564 if (iter->length == 0) {
565 /* Avoid looping forever on bad ACPI tables */
566 pr_debug(FW_BUG "Invalid 0-length structure\n");
567 break;
568 } else if (next > end) {
569 /* Avoid passing table end */
9f10e5bf 570 pr_warn(FW_BUG "Record passes table end\n");
c2a0b538
JL
571 ret = -EINVAL;
572 break;
573 }
574
575 if (cb->print_entry)
576 dmar_table_print_dmar_entry(iter);
577
578 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
579 /* continue for forward compatibility */
580 pr_debug("Unknown DMAR structure type %d\n",
581 iter->type);
582 } else if (cb->cb[iter->type]) {
583 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
584 } else if (!cb->ignore_unhandled) {
585 pr_warn("No handler for DMAR structure type %d\n",
586 iter->type);
587 ret = -EINVAL;
588 }
589 }
590
591 return ret;
592}
593
594static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
595 struct dmar_res_callback *cb)
596{
597 return dmar_walk_remapping_entries((void *)(dmar + 1),
598 dmar->header.length - sizeof(*dmar), cb);
599}
600
10e5247f
KA
601/**
602 * parse_dmar_table - parses the DMA reporting table
603 */
604static int __init
605parse_dmar_table(void)
606{
607 struct acpi_table_dmar *dmar;
10e5247f 608 int ret = 0;
7cef3347 609 int drhd_count = 0;
c2a0b538
JL
610 struct dmar_res_callback cb = {
611 .print_entry = true,
612 .ignore_unhandled = true,
613 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
614 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
615 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
616 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
617 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
618 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
619 };
10e5247f 620
f6dd5c31
YL
621 /*
622 * Do it again, earlier dmar_tbl mapping could be mapped with
623 * fixed map.
624 */
625 dmar_table_detect();
626
a59b50e9
JC
627 /*
628 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
629 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
630 */
631 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
632
10e5247f
KA
633 dmar = (struct acpi_table_dmar *)dmar_tbl;
634 if (!dmar)
635 return -ENODEV;
636
5b6985ce 637 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 638 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
639 return -EINVAL;
640 }
641
e9071b0b 642 pr_info("Host address width %d\n", dmar->width + 1);
c2a0b538
JL
643 ret = dmar_walk_dmar_table(dmar, &cb);
644 if (ret == 0 && drhd_count == 0)
7cef3347 645 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
c2a0b538 646
10e5247f
KA
647 return ret;
648}
649
832bd858
DW
650static int dmar_pci_device_match(struct dmar_dev_scope devices[],
651 int cnt, struct pci_dev *dev)
e61d98d8
SS
652{
653 int index;
832bd858 654 struct device *tmp;
e61d98d8
SS
655
656 while (dev) {
b683b230 657 for_each_active_dev_scope(devices, cnt, index, tmp)
832bd858 658 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
e61d98d8
SS
659 return 1;
660
661 /* Check our parent */
662 dev = dev->bus->self;
663 }
664
665 return 0;
666}
667
668struct dmar_drhd_unit *
669dmar_find_matched_drhd_unit(struct pci_dev *dev)
670{
0e242612 671 struct dmar_drhd_unit *dmaru;
2e824f79
YZ
672 struct acpi_dmar_hardware_unit *drhd;
673
dda56549
Y
674 dev = pci_physfn(dev);
675
0e242612 676 rcu_read_lock();
8b161f0e 677 for_each_drhd_unit(dmaru) {
2e824f79
YZ
678 drhd = container_of(dmaru->hdr,
679 struct acpi_dmar_hardware_unit,
680 header);
681
682 if (dmaru->include_all &&
683 drhd->segment == pci_domain_nr(dev->bus))
0e242612 684 goto out;
e61d98d8 685
2e824f79
YZ
686 if (dmar_pci_device_match(dmaru->devices,
687 dmaru->devices_cnt, dev))
0e242612 688 goto out;
e61d98d8 689 }
0e242612
JL
690 dmaru = NULL;
691out:
692 rcu_read_unlock();
e61d98d8 693
0e242612 694 return dmaru;
e61d98d8
SS
695}
696
ed40356b
DW
697static void __init dmar_acpi_insert_dev_scope(u8 device_number,
698 struct acpi_device *adev)
699{
700 struct dmar_drhd_unit *dmaru;
701 struct acpi_dmar_hardware_unit *drhd;
702 struct acpi_dmar_device_scope *scope;
703 struct device *tmp;
704 int i;
705 struct acpi_dmar_pci_path *path;
706
707 for_each_drhd_unit(dmaru) {
708 drhd = container_of(dmaru->hdr,
709 struct acpi_dmar_hardware_unit,
710 header);
711
712 for (scope = (void *)(drhd + 1);
713 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
714 scope = ((void *)scope) + scope->length) {
83118b0d 715 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
ed40356b
DW
716 continue;
717 if (scope->enumeration_id != device_number)
718 continue;
719
720 path = (void *)(scope + 1);
721 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
722 dev_name(&adev->dev), dmaru->reg_base_addr,
723 scope->bus, path->device, path->function);
724 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
725 if (tmp == NULL) {
726 dmaru->devices[i].bus = scope->bus;
727 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
728 path->function);
729 rcu_assign_pointer(dmaru->devices[i].dev,
730 get_device(&adev->dev));
731 return;
732 }
733 BUG_ON(i >= dmaru->devices_cnt);
734 }
735 }
736 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
737 device_number, dev_name(&adev->dev));
738}
739
740static int __init dmar_acpi_dev_scope_init(void)
741{
11f1a776
JR
742 struct acpi_dmar_andd *andd;
743
744 if (dmar_tbl == NULL)
745 return -ENODEV;
746
7713ec06
DW
747 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
748 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
749 andd = ((void *)andd) + andd->header.length) {
83118b0d 750 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
ed40356b
DW
751 acpi_handle h;
752 struct acpi_device *adev;
753
754 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
83118b0d 755 andd->device_name,
ed40356b
DW
756 &h))) {
757 pr_err("Failed to find handle for ACPI object %s\n",
83118b0d 758 andd->device_name);
ed40356b
DW
759 continue;
760 }
c0df975f 761 if (acpi_bus_get_device(h, &adev)) {
ed40356b 762 pr_err("Failed to get device for ACPI object %s\n",
83118b0d 763 andd->device_name);
ed40356b
DW
764 continue;
765 }
766 dmar_acpi_insert_dev_scope(andd->device_number, adev);
767 }
ed40356b
DW
768 }
769 return 0;
770}
771
1886e8a9
SS
772int __init dmar_dev_scope_init(void)
773{
2e455289
JL
774 struct pci_dev *dev = NULL;
775 struct dmar_pci_notify_info *info;
1886e8a9 776
2e455289
JL
777 if (dmar_dev_scope_status != 1)
778 return dmar_dev_scope_status;
c2c7286a 779
2e455289
JL
780 if (list_empty(&dmar_drhd_units)) {
781 dmar_dev_scope_status = -ENODEV;
782 } else {
783 dmar_dev_scope_status = 0;
784
63b42624
DW
785 dmar_acpi_dev_scope_init();
786
2e455289
JL
787 for_each_pci_dev(dev) {
788 if (dev->is_virtfn)
789 continue;
790
791 info = dmar_alloc_pci_notify_info(dev,
792 BUS_NOTIFY_ADD_DEVICE);
793 if (!info) {
794 return dmar_dev_scope_status;
795 } else {
796 dmar_pci_bus_add_dev(info);
797 dmar_free_pci_notify_info(info);
798 }
799 }
318fe7df 800
2e455289 801 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1886e8a9
SS
802 }
803
2e455289 804 return dmar_dev_scope_status;
1886e8a9
SS
805}
806
10e5247f
KA
807
808int __init dmar_table_init(void)
809{
1886e8a9 810 static int dmar_table_initialized;
093f87d2
FY
811 int ret;
812
cc05301f
JL
813 if (dmar_table_initialized == 0) {
814 ret = parse_dmar_table();
815 if (ret < 0) {
816 if (ret != -ENODEV)
9f10e5bf 817 pr_info("Parse DMAR table failure.\n");
cc05301f
JL
818 } else if (list_empty(&dmar_drhd_units)) {
819 pr_info("No DMAR devices found\n");
820 ret = -ENODEV;
821 }
093f87d2 822
cc05301f
JL
823 if (ret < 0)
824 dmar_table_initialized = ret;
825 else
826 dmar_table_initialized = 1;
10e5247f 827 }
093f87d2 828
cc05301f 829 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
10e5247f
KA
830}
831
3a8663ee
BH
832static void warn_invalid_dmar(u64 addr, const char *message)
833{
fd0c8894
BH
834 WARN_TAINT_ONCE(
835 1, TAINT_FIRMWARE_WORKAROUND,
836 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
837 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
838 addr, message,
839 dmi_get_system_info(DMI_BIOS_VENDOR),
840 dmi_get_system_info(DMI_BIOS_VERSION),
841 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 842}
6ecbf01c 843
c2a0b538
JL
844static int __ref
845dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
86cf898e 846{
86cf898e 847 struct acpi_dmar_hardware_unit *drhd;
c2a0b538
JL
848 void __iomem *addr;
849 u64 cap, ecap;
86cf898e 850
c2a0b538
JL
851 drhd = (void *)entry;
852 if (!drhd->address) {
853 warn_invalid_dmar(0, "");
854 return -EINVAL;
855 }
2c992208 856
6b197249
JL
857 if (arg)
858 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
859 else
860 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
c2a0b538 861 if (!addr) {
9f10e5bf 862 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
c2a0b538
JL
863 return -EINVAL;
864 }
6b197249 865
c2a0b538
JL
866 cap = dmar_readq(addr + DMAR_CAP_REG);
867 ecap = dmar_readq(addr + DMAR_ECAP_REG);
6b197249
JL
868
869 if (arg)
870 iounmap(addr);
871 else
872 early_iounmap(addr, VTD_PAGE_SIZE);
86cf898e 873
c2a0b538
JL
874 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
875 warn_invalid_dmar(drhd->address, " returns all ones");
876 return -EINVAL;
86cf898e 877 }
2c992208 878
2c992208 879 return 0;
86cf898e
DW
880}
881
480125ba 882int __init detect_intel_iommu(void)
2ae21010
SS
883{
884 int ret;
c2a0b538
JL
885 struct dmar_res_callback validate_drhd_cb = {
886 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
887 .ignore_unhandled = true,
888 };
2ae21010 889
3a5670e8 890 down_write(&dmar_global_lock);
f6dd5c31 891 ret = dmar_table_detect();
86cf898e 892 if (ret)
c2a0b538
JL
893 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
894 &validate_drhd_cb);
895 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
896 iommu_detected = 1;
897 /* Make sure ACS will be enabled */
898 pci_request_acs();
899 }
f5d1b97b 900
9d5ce73a 901#ifdef CONFIG_X86
c2a0b538
JL
902 if (ret)
903 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 904#endif
c2a0b538 905
696c7f8e
RW
906 if (dmar_tbl) {
907 acpi_put_table(dmar_tbl);
908 dmar_tbl = NULL;
909 }
3a5670e8 910 up_write(&dmar_global_lock);
480125ba 911
4db77ff3 912 return ret ? 1 : -ENODEV;
2ae21010
SS
913}
914
915
6f5cf521
DD
916static void unmap_iommu(struct intel_iommu *iommu)
917{
918 iounmap(iommu->reg);
919 release_mem_region(iommu->reg_phys, iommu->reg_size);
920}
921
922/**
923 * map_iommu: map the iommu's registers
924 * @iommu: the iommu to map
925 * @phys_addr: the physical address of the base resgister
e9071b0b 926 *
6f5cf521 927 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 928 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
929 */
930static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
931{
932 int map_size, err=0;
933
934 iommu->reg_phys = phys_addr;
935 iommu->reg_size = VTD_PAGE_SIZE;
936
937 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
9f10e5bf 938 pr_err("Can't reserve memory\n");
6f5cf521
DD
939 err = -EBUSY;
940 goto out;
941 }
942
943 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
944 if (!iommu->reg) {
9f10e5bf 945 pr_err("Can't map the region\n");
6f5cf521
DD
946 err = -ENOMEM;
947 goto release;
948 }
949
950 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
951 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
952
953 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
954 err = -EINVAL;
955 warn_invalid_dmar(phys_addr, " returns all ones");
956 goto unmap;
957 }
958
959 /* the registers might be more than one page */
960 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
961 cap_max_fault_reg_offset(iommu->cap));
962 map_size = VTD_PAGE_ALIGN(map_size);
963 if (map_size > iommu->reg_size) {
964 iounmap(iommu->reg);
965 release_mem_region(iommu->reg_phys, iommu->reg_size);
966 iommu->reg_size = map_size;
967 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
968 iommu->name)) {
9f10e5bf 969 pr_err("Can't reserve memory\n");
6f5cf521
DD
970 err = -EBUSY;
971 goto out;
972 }
973 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
974 if (!iommu->reg) {
9f10e5bf 975 pr_err("Can't map the region\n");
6f5cf521
DD
976 err = -ENOMEM;
977 goto release;
978 }
979 }
980 err = 0;
981 goto out;
982
983unmap:
984 iounmap(iommu->reg);
985release:
986 release_mem_region(iommu->reg_phys, iommu->reg_size);
987out:
988 return err;
989}
990
78d8e704
JL
991static int dmar_alloc_seq_id(struct intel_iommu *iommu)
992{
993 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
994 DMAR_UNITS_SUPPORTED);
995 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
996 iommu->seq_id = -1;
997 } else {
998 set_bit(iommu->seq_id, dmar_seq_ids);
999 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1000 }
1001
1002 return iommu->seq_id;
1003}
1004
1005static void dmar_free_seq_id(struct intel_iommu *iommu)
1006{
1007 if (iommu->seq_id >= 0) {
1008 clear_bit(iommu->seq_id, dmar_seq_ids);
1009 iommu->seq_id = -1;
1010 }
1011}
1012
694835dc 1013static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 1014{
c42d9f32 1015 struct intel_iommu *iommu;
3a93c841 1016 u32 ver, sts;
43f7392b 1017 int agaw = 0;
4ed0d3e6 1018 int msagaw = 0;
6f5cf521 1019 int err;
c42d9f32 1020
6ecbf01c 1021 if (!drhd->reg_base_addr) {
3a8663ee 1022 warn_invalid_dmar(0, "");
6ecbf01c
DW
1023 return -EINVAL;
1024 }
1025
c42d9f32
SS
1026 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1027 if (!iommu)
1886e8a9 1028 return -ENOMEM;
c42d9f32 1029
78d8e704 1030 if (dmar_alloc_seq_id(iommu) < 0) {
9f10e5bf 1031 pr_err("Failed to allocate seq_id\n");
78d8e704
JL
1032 err = -ENOSPC;
1033 goto error;
1034 }
e61d98d8 1035
6f5cf521
DD
1036 err = map_iommu(iommu, drhd->reg_base_addr);
1037 if (err) {
9f10e5bf 1038 pr_err("Failed to map %s\n", iommu->name);
78d8e704 1039 goto error_free_seq_id;
e61d98d8 1040 }
0815565a 1041
6f5cf521 1042 err = -EINVAL;
1b573683
WH
1043 agaw = iommu_calculate_agaw(iommu);
1044 if (agaw < 0) {
bf947fcb
DD
1045 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1046 iommu->seq_id);
0815565a 1047 goto err_unmap;
4ed0d3e6
FY
1048 }
1049 msagaw = iommu_calculate_max_sagaw(iommu);
1050 if (msagaw < 0) {
bf947fcb 1051 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 1052 iommu->seq_id);
0815565a 1053 goto err_unmap;
1b573683
WH
1054 }
1055 iommu->agaw = agaw;
4ed0d3e6 1056 iommu->msagaw = msagaw;
67ccac41 1057 iommu->segment = drhd->segment;
1b573683 1058
ee34b32d
SS
1059 iommu->node = -1;
1060
e61d98d8 1061 ver = readl(iommu->reg + DMAR_VER_REG);
9f10e5bf
JR
1062 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1063 iommu->name,
5b6985ce
FY
1064 (unsigned long long)drhd->reg_base_addr,
1065 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1066 (unsigned long long)iommu->cap,
1067 (unsigned long long)iommu->ecap);
e61d98d8 1068
3a93c841
TI
1069 /* Reflect status in gcmd */
1070 sts = readl(iommu->reg + DMAR_GSTS_REG);
1071 if (sts & DMA_GSTS_IRES)
1072 iommu->gcmd |= DMA_GCMD_IRE;
1073 if (sts & DMA_GSTS_TES)
1074 iommu->gcmd |= DMA_GCMD_TE;
1075 if (sts & DMA_GSTS_QIES)
1076 iommu->gcmd |= DMA_GCMD_QIE;
1077
1f5b3c3f 1078 raw_spin_lock_init(&iommu->register_lock);
e61d98d8 1079
bc847454 1080 if (intel_iommu_enabled) {
a5459cfe
AW
1081 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1082 intel_iommu_groups,
2439d4aa 1083 "%s", iommu->name);
a5459cfe 1084
bc847454
JR
1085 if (IS_ERR(iommu->iommu_dev)) {
1086 err = PTR_ERR(iommu->iommu_dev);
1087 goto err_unmap;
1088 }
59203379
NK
1089 }
1090
bc847454
JR
1091 drhd->iommu = iommu;
1092
1886e8a9 1093 return 0;
0815565a 1094
78d8e704 1095err_unmap:
6f5cf521 1096 unmap_iommu(iommu);
78d8e704
JL
1097error_free_seq_id:
1098 dmar_free_seq_id(iommu);
1099error:
e61d98d8 1100 kfree(iommu);
6f5cf521 1101 return err;
e61d98d8
SS
1102}
1103
a868e6b7 1104static void free_iommu(struct intel_iommu *iommu)
e61d98d8 1105{
a5459cfe
AW
1106 iommu_device_destroy(iommu->iommu_dev);
1107
a868e6b7 1108 if (iommu->irq) {
1208225c
DW
1109 if (iommu->pr_irq) {
1110 free_irq(iommu->pr_irq, iommu);
1111 dmar_free_hwirq(iommu->pr_irq);
1112 iommu->pr_irq = 0;
1113 }
a868e6b7 1114 free_irq(iommu->irq, iommu);
a553b142 1115 dmar_free_hwirq(iommu->irq);
34742db8 1116 iommu->irq = 0;
a868e6b7 1117 }
e61d98d8 1118
a84da70b
JL
1119 if (iommu->qi) {
1120 free_page((unsigned long)iommu->qi->desc);
1121 kfree(iommu->qi->desc_status);
1122 kfree(iommu->qi);
1123 }
1124
e61d98d8 1125 if (iommu->reg)
6f5cf521
DD
1126 unmap_iommu(iommu);
1127
78d8e704 1128 dmar_free_seq_id(iommu);
e61d98d8
SS
1129 kfree(iommu);
1130}
fe962e90
SS
1131
1132/*
1133 * Reclaim all the submitted descriptors which have completed its work.
1134 */
1135static inline void reclaim_free_desc(struct q_inval *qi)
1136{
6ba6c3a4
YZ
1137 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1138 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
1139 qi->desc_status[qi->free_tail] = QI_FREE;
1140 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1141 qi->free_cnt++;
1142 }
1143}
1144
704126ad
YZ
1145static int qi_check_fault(struct intel_iommu *iommu, int index)
1146{
1147 u32 fault;
6ba6c3a4 1148 int head, tail;
704126ad
YZ
1149 struct q_inval *qi = iommu->qi;
1150 int wait_index = (index + 1) % QI_LENGTH;
1151
6ba6c3a4
YZ
1152 if (qi->desc_status[wait_index] == QI_ABORT)
1153 return -EAGAIN;
1154
704126ad
YZ
1155 fault = readl(iommu->reg + DMAR_FSTS_REG);
1156
1157 /*
1158 * If IQE happens, the head points to the descriptor associated
1159 * with the error. No new descriptors are fetched until the IQE
1160 * is cleared.
1161 */
1162 if (fault & DMA_FSTS_IQE) {
1163 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 1164 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 1165 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
1166 "low=%llx, high=%llx\n",
1167 (unsigned long long)qi->desc[index].low,
1168 (unsigned long long)qi->desc[index].high);
704126ad
YZ
1169 memcpy(&qi->desc[index], &qi->desc[wait_index],
1170 sizeof(struct qi_desc));
704126ad
YZ
1171 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1172 return -EINVAL;
1173 }
1174 }
1175
6ba6c3a4
YZ
1176 /*
1177 * If ITE happens, all pending wait_desc commands are aborted.
1178 * No new descriptors are fetched until the ITE is cleared.
1179 */
1180 if (fault & DMA_FSTS_ITE) {
1181 head = readl(iommu->reg + DMAR_IQH_REG);
1182 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1183 head |= 1;
1184 tail = readl(iommu->reg + DMAR_IQT_REG);
1185 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1186
1187 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1188
1189 do {
1190 if (qi->desc_status[head] == QI_IN_USE)
1191 qi->desc_status[head] = QI_ABORT;
1192 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1193 } while (head != tail);
1194
1195 if (qi->desc_status[wait_index] == QI_ABORT)
1196 return -EAGAIN;
1197 }
1198
1199 if (fault & DMA_FSTS_ICE)
1200 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1201
704126ad
YZ
1202 return 0;
1203}
1204
fe962e90
SS
1205/*
1206 * Submit the queued invalidation descriptor to the remapping
1207 * hardware unit and wait for its completion.
1208 */
704126ad 1209int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 1210{
6ba6c3a4 1211 int rc;
fe962e90
SS
1212 struct q_inval *qi = iommu->qi;
1213 struct qi_desc *hw, wait_desc;
1214 int wait_index, index;
1215 unsigned long flags;
1216
1217 if (!qi)
704126ad 1218 return 0;
fe962e90
SS
1219
1220 hw = qi->desc;
1221
6ba6c3a4
YZ
1222restart:
1223 rc = 0;
1224
3b8f4048 1225 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 1226 while (qi->free_cnt < 3) {
3b8f4048 1227 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 1228 cpu_relax();
3b8f4048 1229 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
1230 }
1231
1232 index = qi->free_head;
1233 wait_index = (index + 1) % QI_LENGTH;
1234
1235 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1236
1237 hw[index] = *desc;
1238
704126ad
YZ
1239 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1240 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
1241 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1242
1243 hw[wait_index] = wait_desc;
1244
fe962e90
SS
1245 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1246 qi->free_cnt -= 2;
1247
fe962e90
SS
1248 /*
1249 * update the HW tail register indicating the presence of
1250 * new descriptors.
1251 */
6ba6c3a4 1252 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
1253
1254 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
1255 /*
1256 * We will leave the interrupts disabled, to prevent interrupt
1257 * context to queue another cmd while a cmd is already submitted
1258 * and waiting for completion on this cpu. This is to avoid
1259 * a deadlock where the interrupt context can wait indefinitely
1260 * for free slots in the queue.
1261 */
704126ad
YZ
1262 rc = qi_check_fault(iommu, index);
1263 if (rc)
6ba6c3a4 1264 break;
704126ad 1265
3b8f4048 1266 raw_spin_unlock(&qi->q_lock);
fe962e90 1267 cpu_relax();
3b8f4048 1268 raw_spin_lock(&qi->q_lock);
fe962e90 1269 }
6ba6c3a4
YZ
1270
1271 qi->desc_status[index] = QI_DONE;
fe962e90
SS
1272
1273 reclaim_free_desc(qi);
3b8f4048 1274 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 1275
6ba6c3a4
YZ
1276 if (rc == -EAGAIN)
1277 goto restart;
1278
704126ad 1279 return rc;
fe962e90
SS
1280}
1281
1282/*
1283 * Flush the global interrupt entry cache.
1284 */
1285void qi_global_iec(struct intel_iommu *iommu)
1286{
1287 struct qi_desc desc;
1288
1289 desc.low = QI_IEC_TYPE;
1290 desc.high = 0;
1291
704126ad 1292 /* should never fail */
fe962e90
SS
1293 qi_submit_sync(&desc, iommu);
1294}
1295
4c25a2c1
DW
1296void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1297 u64 type)
3481f210 1298{
3481f210
YS
1299 struct qi_desc desc;
1300
3481f210
YS
1301 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1302 | QI_CC_GRAN(type) | QI_CC_TYPE;
1303 desc.high = 0;
1304
4c25a2c1 1305 qi_submit_sync(&desc, iommu);
3481f210
YS
1306}
1307
1f0ef2aa
DW
1308void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1309 unsigned int size_order, u64 type)
3481f210
YS
1310{
1311 u8 dw = 0, dr = 0;
1312
1313 struct qi_desc desc;
1314 int ih = 0;
1315
3481f210
YS
1316 if (cap_write_drain(iommu->cap))
1317 dw = 1;
1318
1319 if (cap_read_drain(iommu->cap))
1320 dr = 1;
1321
1322 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1323 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1324 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1325 | QI_IOTLB_AM(size_order);
1326
1f0ef2aa 1327 qi_submit_sync(&desc, iommu);
3481f210
YS
1328}
1329
6ba6c3a4
YZ
1330void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1331 u64 addr, unsigned mask)
1332{
1333 struct qi_desc desc;
1334
1335 if (mask) {
1336 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1337 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1338 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1339 } else
1340 desc.high = QI_DEV_IOTLB_ADDR(addr);
1341
1342 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1343 qdep = 0;
1344
1345 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1346 QI_DIOTLB_TYPE;
1347
1348 qi_submit_sync(&desc, iommu);
1349}
1350
eba67e5d
SS
1351/*
1352 * Disable Queued Invalidation interface.
1353 */
1354void dmar_disable_qi(struct intel_iommu *iommu)
1355{
1356 unsigned long flags;
1357 u32 sts;
1358 cycles_t start_time = get_cycles();
1359
1360 if (!ecap_qis(iommu->ecap))
1361 return;
1362
1f5b3c3f 1363 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d 1364
fda3bec1 1365 sts = readl(iommu->reg + DMAR_GSTS_REG);
eba67e5d
SS
1366 if (!(sts & DMA_GSTS_QIES))
1367 goto end;
1368
1369 /*
1370 * Give a chance to HW to complete the pending invalidation requests.
1371 */
1372 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1373 readl(iommu->reg + DMAR_IQH_REG)) &&
1374 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1375 cpu_relax();
1376
1377 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1378 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1379
1380 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1381 !(sts & DMA_GSTS_QIES), sts);
1382end:
1f5b3c3f 1383 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
1384}
1385
eb4a52bc
FY
1386/*
1387 * Enable queued invalidation.
1388 */
1389static void __dmar_enable_qi(struct intel_iommu *iommu)
1390{
c416daa9 1391 u32 sts;
eb4a52bc
FY
1392 unsigned long flags;
1393 struct q_inval *qi = iommu->qi;
1394
1395 qi->free_head = qi->free_tail = 0;
1396 qi->free_cnt = QI_LENGTH;
1397
1f5b3c3f 1398 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1399
1400 /* write zero to the tail reg */
1401 writel(0, iommu->reg + DMAR_IQT_REG);
1402
1403 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1404
eb4a52bc 1405 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1406 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1407
1408 /* Make sure hardware complete it */
1409 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1410
1f5b3c3f 1411 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1412}
1413
fe962e90
SS
1414/*
1415 * Enable Queued Invalidation interface. This is a must to support
1416 * interrupt-remapping. Also used by DMA-remapping, which replaces
1417 * register based IOTLB invalidation.
1418 */
1419int dmar_enable_qi(struct intel_iommu *iommu)
1420{
fe962e90 1421 struct q_inval *qi;
751cafe3 1422 struct page *desc_page;
fe962e90
SS
1423
1424 if (!ecap_qis(iommu->ecap))
1425 return -ENOENT;
1426
1427 /*
1428 * queued invalidation is already setup and enabled.
1429 */
1430 if (iommu->qi)
1431 return 0;
1432
fa4b57cc 1433 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1434 if (!iommu->qi)
1435 return -ENOMEM;
1436
1437 qi = iommu->qi;
1438
751cafe3
SS
1439
1440 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1441 if (!desc_page) {
fe962e90 1442 kfree(qi);
b707cb02 1443 iommu->qi = NULL;
fe962e90
SS
1444 return -ENOMEM;
1445 }
1446
751cafe3
SS
1447 qi->desc = page_address(desc_page);
1448
37a40710 1449 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1450 if (!qi->desc_status) {
1451 free_page((unsigned long) qi->desc);
1452 kfree(qi);
b707cb02 1453 iommu->qi = NULL;
fe962e90
SS
1454 return -ENOMEM;
1455 }
1456
3b8f4048 1457 raw_spin_lock_init(&qi->q_lock);
fe962e90 1458
eb4a52bc 1459 __dmar_enable_qi(iommu);
fe962e90
SS
1460
1461 return 0;
1462}
0ac2491f
SS
1463
1464/* iommu interrupt handling. Most stuff are MSI-like. */
1465
9d783ba0
SS
1466enum faulttype {
1467 DMA_REMAP,
1468 INTR_REMAP,
1469 UNKNOWN,
1470};
1471
1472static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1473{
1474 "Software",
1475 "Present bit in root entry is clear",
1476 "Present bit in context entry is clear",
1477 "Invalid context entry",
1478 "Access beyond MGAW",
1479 "PTE Write access is not set",
1480 "PTE Read access is not set",
1481 "Next page table ptr is invalid",
1482 "Root table address invalid",
1483 "Context table ptr is invalid",
1484 "non-zero reserved fields in RTP",
1485 "non-zero reserved fields in CTP",
1486 "non-zero reserved fields in PTE",
4ecccd9e 1487 "PCE for translation request specifies blocking",
0ac2491f 1488};
9d783ba0 1489
95a02e97 1490static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1491{
1492 "Detected reserved fields in the decoded interrupt-remapped request",
1493 "Interrupt index exceeded the interrupt-remapping table size",
1494 "Present field in the IRTE entry is clear",
1495 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1496 "Detected reserved fields in the IRTE entry",
1497 "Blocked a compatibility format interrupt request",
1498 "Blocked an interrupt request due to source-id verification failure",
1499};
1500
21004dcd 1501static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1502{
fefe1ed1
DC
1503 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1504 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1505 *fault_type = INTR_REMAP;
95a02e97 1506 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1507 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1508 *fault_type = DMA_REMAP;
1509 return dma_remap_fault_reasons[fault_reason];
1510 } else {
1511 *fault_type = UNKNOWN;
0ac2491f 1512 return "Unknown";
9d783ba0 1513 }
0ac2491f
SS
1514}
1515
1208225c
DW
1516
1517static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1518{
1519 if (iommu->irq == irq)
1520 return DMAR_FECTL_REG;
1521 else if (iommu->pr_irq == irq)
1522 return DMAR_PECTL_REG;
1523 else
1524 BUG();
1525}
1526
5c2837fb 1527void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1528{
dced35ae 1529 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1208225c 1530 int reg = dmar_msi_reg(iommu, data->irq);
0ac2491f
SS
1531 unsigned long flag;
1532
1533 /* unmask it */
1f5b3c3f 1534 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225c 1535 writel(0, iommu->reg + reg);
0ac2491f 1536 /* Read a reg to force flush the post write */
1208225c 1537 readl(iommu->reg + reg);
1f5b3c3f 1538 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1539}
1540
5c2837fb 1541void dmar_msi_mask(struct irq_data *data)
0ac2491f 1542{
dced35ae 1543 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1208225c
DW
1544 int reg = dmar_msi_reg(iommu, data->irq);
1545 unsigned long flag;
0ac2491f
SS
1546
1547 /* mask it */
1f5b3c3f 1548 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225c 1549 writel(DMA_FECTL_IM, iommu->reg + reg);
0ac2491f 1550 /* Read a reg to force flush the post write */
1208225c 1551 readl(iommu->reg + reg);
1f5b3c3f 1552 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1553}
1554
1555void dmar_msi_write(int irq, struct msi_msg *msg)
1556{
dced35ae 1557 struct intel_iommu *iommu = irq_get_handler_data(irq);
1208225c 1558 int reg = dmar_msi_reg(iommu, irq);
0ac2491f
SS
1559 unsigned long flag;
1560
1f5b3c3f 1561 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225c
DW
1562 writel(msg->data, iommu->reg + reg + 4);
1563 writel(msg->address_lo, iommu->reg + reg + 8);
1564 writel(msg->address_hi, iommu->reg + reg + 12);
1f5b3c3f 1565 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1566}
1567
1568void dmar_msi_read(int irq, struct msi_msg *msg)
1569{
dced35ae 1570 struct intel_iommu *iommu = irq_get_handler_data(irq);
1208225c 1571 int reg = dmar_msi_reg(iommu, irq);
0ac2491f
SS
1572 unsigned long flag;
1573
1f5b3c3f 1574 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225c
DW
1575 msg->data = readl(iommu->reg + reg + 4);
1576 msg->address_lo = readl(iommu->reg + reg + 8);
1577 msg->address_hi = readl(iommu->reg + reg + 12);
1f5b3c3f 1578 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1579}
1580
1581static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1582 u8 fault_reason, u16 source_id, unsigned long long addr)
1583{
1584 const char *reason;
9d783ba0 1585 int fault_type;
0ac2491f 1586
9d783ba0 1587 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1588
9d783ba0 1589 if (fault_type == INTR_REMAP)
a0fe14d7
AW
1590 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1591 source_id >> 8, PCI_SLOT(source_id & 0xFF),
9d783ba0
SS
1592 PCI_FUNC(source_id & 0xFF), addr >> 48,
1593 fault_reason, reason);
1594 else
a0fe14d7
AW
1595 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1596 type ? "DMA Read" : "DMA Write",
1597 source_id >> 8, PCI_SLOT(source_id & 0xFF),
9d783ba0 1598 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1599 return 0;
1600}
1601
1602#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1603irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1604{
1605 struct intel_iommu *iommu = dev_id;
1606 int reg, fault_index;
1607 u32 fault_status;
1608 unsigned long flag;
c43fce4e
AW
1609 bool ratelimited;
1610 static DEFINE_RATELIMIT_STATE(rs,
1611 DEFAULT_RATELIMIT_INTERVAL,
1612 DEFAULT_RATELIMIT_BURST);
1613
1614 /* Disable printing, simply clear the fault when ratelimited */
1615 ratelimited = !__ratelimit(&rs);
0ac2491f 1616
1f5b3c3f 1617 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1618 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
c43fce4e 1619 if (fault_status && !ratelimited)
bf947fcb 1620 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1621
1622 /* TBD: ignore advanced fault log currently */
1623 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1624 goto unlock_exit;
0ac2491f
SS
1625
1626 fault_index = dma_fsts_fault_record_index(fault_status);
1627 reg = cap_fault_reg_offset(iommu->cap);
1628 while (1) {
1629 u8 fault_reason;
1630 u16 source_id;
1631 u64 guest_addr;
1632 int type;
1633 u32 data;
1634
1635 /* highest 32 bits */
1636 data = readl(iommu->reg + reg +
1637 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1638 if (!(data & DMA_FRCD_F))
1639 break;
1640
c43fce4e
AW
1641 if (!ratelimited) {
1642 fault_reason = dma_frcd_fault_reason(data);
1643 type = dma_frcd_type(data);
0ac2491f 1644
c43fce4e
AW
1645 data = readl(iommu->reg + reg +
1646 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1647 source_id = dma_frcd_source_id(data);
1648
1649 guest_addr = dmar_readq(iommu->reg + reg +
1650 fault_index * PRIMARY_FAULT_REG_LEN);
1651 guest_addr = dma_frcd_page_addr(guest_addr);
1652 }
0ac2491f 1653
0ac2491f
SS
1654 /* clear the fault */
1655 writel(DMA_FRCD_F, iommu->reg + reg +
1656 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1657
1f5b3c3f 1658 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f 1659
c43fce4e
AW
1660 if (!ratelimited)
1661 dmar_fault_do_one(iommu, type, fault_reason,
1662 source_id, guest_addr);
0ac2491f
SS
1663
1664 fault_index++;
8211a7b5 1665 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1666 fault_index = 0;
1f5b3c3f 1667 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1668 }
0ac2491f 1669
bd5cdad0
LZH
1670 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1671
1672unlock_exit:
1f5b3c3f 1673 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1674 return IRQ_HANDLED;
1675}
1676
1677int dmar_set_interrupt(struct intel_iommu *iommu)
1678{
1679 int irq, ret;
1680
9d783ba0
SS
1681 /*
1682 * Check if the fault interrupt is already initialized.
1683 */
1684 if (iommu->irq)
1685 return 0;
1686
34742db8
JL
1687 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1688 if (irq > 0) {
1689 iommu->irq = irq;
1690 } else {
9f10e5bf 1691 pr_err("No free IRQ vectors\n");
0ac2491f
SS
1692 return -EINVAL;
1693 }
1694
477694e7 1695 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1696 if (ret)
9f10e5bf 1697 pr_err("Can't request irq\n");
0ac2491f
SS
1698 return ret;
1699}
9d783ba0
SS
1700
1701int __init enable_drhd_fault_handling(void)
1702{
1703 struct dmar_drhd_unit *drhd;
7c919779 1704 struct intel_iommu *iommu;
9d783ba0
SS
1705
1706 /*
1707 * Enable fault control interrupt.
1708 */
7c919779 1709 for_each_iommu(iommu, drhd) {
bd5cdad0 1710 u32 fault_status;
7c919779 1711 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1712
1713 if (ret) {
e9071b0b 1714 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1715 (unsigned long long)drhd->reg_base_addr, ret);
1716 return -1;
1717 }
7f99d946
SS
1718
1719 /*
1720 * Clear any previous faults.
1721 */
1722 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1723 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1724 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1725 }
1726
1727 return 0;
1728}
eb4a52bc
FY
1729
1730/*
1731 * Re-enable Queued Invalidation interface.
1732 */
1733int dmar_reenable_qi(struct intel_iommu *iommu)
1734{
1735 if (!ecap_qis(iommu->ecap))
1736 return -ENOENT;
1737
1738 if (!iommu->qi)
1739 return -ENOENT;
1740
1741 /*
1742 * First disable queued invalidation.
1743 */
1744 dmar_disable_qi(iommu);
1745 /*
1746 * Then enable queued invalidation again. Since there is no pending
1747 * invalidation requests now, it's safe to re-enable queued
1748 * invalidation.
1749 */
1750 __dmar_enable_qi(iommu);
1751
1752 return 0;
1753}
074835f0
YS
1754
1755/*
1756 * Check interrupt remapping support in DMAR table description.
1757 */
0b8973a8 1758int __init dmar_ir_support(void)
074835f0
YS
1759{
1760 struct acpi_table_dmar *dmar;
1761 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1762 if (!dmar)
1763 return 0;
074835f0
YS
1764 return dmar->flags & 0x1;
1765}
694835dc 1766
6b197249
JL
1767/* Check whether DMAR units are in use */
1768static inline bool dmar_in_use(void)
1769{
1770 return irq_remapping_enabled || intel_iommu_enabled;
1771}
1772
a868e6b7
JL
1773static int __init dmar_free_unused_resources(void)
1774{
1775 struct dmar_drhd_unit *dmaru, *dmaru_n;
1776
6b197249 1777 if (dmar_in_use())
a868e6b7
JL
1778 return 0;
1779
2e455289
JL
1780 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1781 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
59ce0515 1782
3a5670e8 1783 down_write(&dmar_global_lock);
a868e6b7
JL
1784 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1785 list_del(&dmaru->list);
1786 dmar_free_drhd(dmaru);
1787 }
3a5670e8 1788 up_write(&dmar_global_lock);
a868e6b7
JL
1789
1790 return 0;
1791}
1792
1793late_initcall(dmar_free_unused_resources);
4db77ff3 1794IOMMU_INIT_POST(detect_intel_iommu);
6b197249
JL
1795
1796/*
1797 * DMAR Hotplug Support
1798 * For more details, please refer to Intel(R) Virtualization Technology
1799 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1800 * "Remapping Hardware Unit Hot Plug".
1801 */
1802static u8 dmar_hp_uuid[] = {
1803 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1804 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1805};
1806
1807/*
1808 * Currently there's only one revision and BIOS will not check the revision id,
1809 * so use 0 for safety.
1810 */
1811#define DMAR_DSM_REV_ID 0
1812#define DMAR_DSM_FUNC_DRHD 1
1813#define DMAR_DSM_FUNC_ATSR 2
1814#define DMAR_DSM_FUNC_RHSA 3
1815
1816static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1817{
1818 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1819}
1820
1821static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1822 dmar_res_handler_t handler, void *arg)
1823{
1824 int ret = -ENODEV;
1825 union acpi_object *obj;
1826 struct acpi_dmar_header *start;
1827 struct dmar_res_callback callback;
1828 static int res_type[] = {
1829 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1830 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1831 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1832 };
1833
1834 if (!dmar_detect_dsm(handle, func))
1835 return 0;
1836
1837 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1838 func, NULL, ACPI_TYPE_BUFFER);
1839 if (!obj)
1840 return -ENODEV;
1841
1842 memset(&callback, 0, sizeof(callback));
1843 callback.cb[res_type[func]] = handler;
1844 callback.arg[res_type[func]] = arg;
1845 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1846 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1847
1848 ACPI_FREE(obj);
1849
1850 return ret;
1851}
1852
1853static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1854{
1855 int ret;
1856 struct dmar_drhd_unit *dmaru;
1857
1858 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1859 if (!dmaru)
1860 return -ENODEV;
1861
1862 ret = dmar_ir_hotplug(dmaru, true);
1863 if (ret == 0)
1864 ret = dmar_iommu_hotplug(dmaru, true);
1865
1866 return ret;
1867}
1868
1869static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1870{
1871 int i, ret;
1872 struct device *dev;
1873 struct dmar_drhd_unit *dmaru;
1874
1875 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1876 if (!dmaru)
1877 return 0;
1878
1879 /*
1880 * All PCI devices managed by this unit should have been destroyed.
1881 */
194dc870 1882 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
6b197249
JL
1883 for_each_active_dev_scope(dmaru->devices,
1884 dmaru->devices_cnt, i, dev)
1885 return -EBUSY;
194dc870 1886 }
6b197249
JL
1887
1888 ret = dmar_ir_hotplug(dmaru, false);
1889 if (ret == 0)
1890 ret = dmar_iommu_hotplug(dmaru, false);
1891
1892 return ret;
1893}
1894
1895static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1896{
1897 struct dmar_drhd_unit *dmaru;
1898
1899 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1900 if (dmaru) {
1901 list_del_rcu(&dmaru->list);
1902 synchronize_rcu();
1903 dmar_free_drhd(dmaru);
1904 }
1905
1906 return 0;
1907}
1908
1909static int dmar_hotplug_insert(acpi_handle handle)
1910{
1911 int ret;
1912 int drhd_count = 0;
1913
1914 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1915 &dmar_validate_one_drhd, (void *)1);
1916 if (ret)
1917 goto out;
1918
1919 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1920 &dmar_parse_one_drhd, (void *)&drhd_count);
1921 if (ret == 0 && drhd_count == 0) {
1922 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1923 goto out;
1924 } else if (ret) {
1925 goto release_drhd;
1926 }
1927
1928 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1929 &dmar_parse_one_rhsa, NULL);
1930 if (ret)
1931 goto release_drhd;
1932
1933 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1934 &dmar_parse_one_atsr, NULL);
1935 if (ret)
1936 goto release_atsr;
1937
1938 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1939 &dmar_hp_add_drhd, NULL);
1940 if (!ret)
1941 return 0;
1942
1943 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1944 &dmar_hp_remove_drhd, NULL);
1945release_atsr:
1946 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1947 &dmar_release_one_atsr, NULL);
1948release_drhd:
1949 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1950 &dmar_hp_release_drhd, NULL);
1951out:
1952 return ret;
1953}
1954
1955static int dmar_hotplug_remove(acpi_handle handle)
1956{
1957 int ret;
1958
1959 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1960 &dmar_check_one_atsr, NULL);
1961 if (ret)
1962 return ret;
1963
1964 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1965 &dmar_hp_remove_drhd, NULL);
1966 if (ret == 0) {
1967 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1968 &dmar_release_one_atsr, NULL));
1969 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1970 &dmar_hp_release_drhd, NULL));
1971 } else {
1972 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1973 &dmar_hp_add_drhd, NULL);
1974 }
1975
1976 return ret;
1977}
1978
d35165a9
JL
1979static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1980 void *context, void **retval)
1981{
1982 acpi_handle *phdl = retval;
1983
1984 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1985 *phdl = handle;
1986 return AE_CTRL_TERMINATE;
1987 }
1988
1989 return AE_OK;
1990}
1991
6b197249
JL
1992static int dmar_device_hotplug(acpi_handle handle, bool insert)
1993{
1994 int ret;
d35165a9
JL
1995 acpi_handle tmp = NULL;
1996 acpi_status status;
6b197249
JL
1997
1998 if (!dmar_in_use())
1999 return 0;
2000
d35165a9
JL
2001 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2002 tmp = handle;
2003 } else {
2004 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2005 ACPI_UINT32_MAX,
2006 dmar_get_dsm_handle,
2007 NULL, NULL, &tmp);
2008 if (ACPI_FAILURE(status)) {
2009 pr_warn("Failed to locate _DSM method.\n");
2010 return -ENXIO;
2011 }
2012 }
2013 if (tmp == NULL)
6b197249
JL
2014 return 0;
2015
2016 down_write(&dmar_global_lock);
2017 if (insert)
d35165a9 2018 ret = dmar_hotplug_insert(tmp);
6b197249 2019 else
d35165a9 2020 ret = dmar_hotplug_remove(tmp);
6b197249
JL
2021 up_write(&dmar_global_lock);
2022
2023 return ret;
2024}
2025
2026int dmar_device_add(acpi_handle handle)
2027{
2028 return dmar_device_hotplug(handle, true);
2029}
2030
2031int dmar_device_remove(acpi_handle handle)
2032{
2033 return dmar_device_hotplug(handle, false);
2034}