]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/iommu/dmar.c
iommu/vt-d: Fix error in detect ATS capability
[mirror_ubuntu-zesty-kernel.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
10e5247f
KA
46/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
10e5247f 51
41750d31 52struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 53static acpi_size dmar_tbl_size;
10e5247f 54
694835dc 55static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7 56static void free_iommu(struct intel_iommu *iommu);
694835dc 57
10e5247f
KA
58static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59{
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68}
69
10e5247f
KA
70static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72{
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
e9071b0b 91 pr_warn("Device scope bus [%d] not found\n", scope->bus);
10e5247f
KA
92 break;
93 }
fa5f508f 94 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
10e5247f 95 if (!pdev) {
e9071b0b 96 /* warning will be printed below */
10e5247f
KA
97 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
e9071b0b 104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
fa5f508f 105 segment, scope->bus, path->device, path->function);
10e5247f
KA
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
e9071b0b
DD
112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
10e5247f
KA
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
bb3a6b78 120void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
10e5247f
KA
121{
122 struct acpi_dmar_device_scope *scope;
10e5247f
KA
123
124 *cnt = 0;
125 while (start < end) {
126 scope = start;
127 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
128 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
129 (*cnt)++;
ae3e7f3a
LC
130 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
131 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 132 pr_warn("Unsupported device scope\n");
5715f0f9 133 }
10e5247f
KA
134 start += scope->length;
135 }
136 if (*cnt == 0)
bb3a6b78
JL
137 return NULL;
138
139 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
140}
141
142int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
143 struct pci_dev ***devices, u16 segment)
144{
145 struct acpi_dmar_device_scope *scope;
146 int index, ret;
10e5247f 147
bb3a6b78
JL
148 *devices = dmar_alloc_dev_scope(start, end, cnt);
149 if (*cnt == 0)
150 return 0;
151 else if (!*devices)
10e5247f
KA
152 return -ENOMEM;
153
bb3a6b78 154 for (index = 0; start < end; start += scope->length) {
10e5247f
KA
155 scope = start;
156 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
157 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
158 ret = dmar_parse_one_dev_scope(scope,
159 &(*devices)[index], segment);
160 if (ret) {
ada4d4b2 161 dmar_free_dev_scope(devices, cnt);
10e5247f
KA
162 return ret;
163 }
164 index ++;
165 }
10e5247f
KA
166 }
167
168 return 0;
169}
170
ada4d4b2
JL
171void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
172{
173 if (*devices && *cnt) {
174 while (--*cnt >= 0)
175 pci_dev_put((*devices)[*cnt]);
176 kfree(*devices);
177 *devices = NULL;
178 *cnt = 0;
179 }
180}
181
10e5247f
KA
182/**
183 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
184 * structure which uniquely represent one DMA remapping hardware unit
185 * present in the platform
186 */
187static int __init
188dmar_parse_one_drhd(struct acpi_dmar_header *header)
189{
190 struct acpi_dmar_hardware_unit *drhd;
191 struct dmar_drhd_unit *dmaru;
192 int ret = 0;
10e5247f 193
e523b38e 194 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
195 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
196 if (!dmaru)
197 return -ENOMEM;
198
1886e8a9 199 dmaru->hdr = header;
10e5247f 200 dmaru->reg_base_addr = drhd->address;
276dbf99 201 dmaru->segment = drhd->segment;
10e5247f
KA
202 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
203
1886e8a9
SS
204 ret = alloc_iommu(dmaru);
205 if (ret) {
206 kfree(dmaru);
207 return ret;
208 }
209 dmar_register_drhd_unit(dmaru);
210 return 0;
211}
212
a868e6b7
JL
213static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
214{
215 if (dmaru->devices && dmaru->devices_cnt)
216 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
217 if (dmaru->iommu)
218 free_iommu(dmaru->iommu);
219 kfree(dmaru);
220}
221
f82851a8 222static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
223{
224 struct acpi_dmar_hardware_unit *drhd;
1886e8a9
SS
225
226 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
227
2e824f79
YZ
228 if (dmaru->include_all)
229 return 0;
230
a868e6b7
JL
231 return dmar_parse_dev_scope((void *)(drhd + 1),
232 ((void *)drhd) + drhd->header.length,
233 &dmaru->devices_cnt, &dmaru->devices,
234 drhd->segment);
10e5247f
KA
235}
236
aa697079 237#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
238static int __init
239dmar_parse_one_rhsa(struct acpi_dmar_header *header)
240{
241 struct acpi_dmar_rhsa *rhsa;
242 struct dmar_drhd_unit *drhd;
243
244 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 245 for_each_drhd_unit(drhd) {
ee34b32d
SS
246 if (drhd->reg_base_addr == rhsa->base_address) {
247 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
248
249 if (!node_online(node))
250 node = -1;
251 drhd->iommu->node = node;
aa697079
DW
252 return 0;
253 }
ee34b32d 254 }
fd0c8894
BH
255 WARN_TAINT(
256 1, TAINT_FIRMWARE_WORKAROUND,
257 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
258 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
259 drhd->reg_base_addr,
260 dmi_get_system_info(DMI_BIOS_VENDOR),
261 dmi_get_system_info(DMI_BIOS_VERSION),
262 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 263
aa697079 264 return 0;
ee34b32d 265}
aa697079 266#endif
ee34b32d 267
10e5247f
KA
268static void __init
269dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
270{
271 struct acpi_dmar_hardware_unit *drhd;
272 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 273 struct acpi_dmar_atsr *atsr;
17b60977 274 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
275
276 switch (header->type) {
277 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
278 drhd = container_of(header, struct acpi_dmar_hardware_unit,
279 header);
e9071b0b 280 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 281 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
282 break;
283 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
284 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
285 header);
e9071b0b 286 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
287 (unsigned long long)rmrr->base_address,
288 (unsigned long long)rmrr->end_address);
10e5247f 289 break;
aa5d2b51
YZ
290 case ACPI_DMAR_TYPE_ATSR:
291 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 292 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 293 break;
17b60977
RD
294 case ACPI_DMAR_HARDWARE_AFFINITY:
295 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 296 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
297 (unsigned long long)rhsa->base_address,
298 rhsa->proximity_domain);
299 break;
10e5247f
KA
300 }
301}
302
f6dd5c31
YL
303/**
304 * dmar_table_detect - checks to see if the platform supports DMAR devices
305 */
306static int __init dmar_table_detect(void)
307{
308 acpi_status status = AE_OK;
309
310 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
311 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
312 (struct acpi_table_header **)&dmar_tbl,
313 &dmar_tbl_size);
f6dd5c31
YL
314
315 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 316 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
317 status = AE_NOT_FOUND;
318 }
319
320 return (ACPI_SUCCESS(status) ? 1 : 0);
321}
aaa9d1dd 322
10e5247f
KA
323/**
324 * parse_dmar_table - parses the DMA reporting table
325 */
326static int __init
327parse_dmar_table(void)
328{
329 struct acpi_table_dmar *dmar;
330 struct acpi_dmar_header *entry_header;
331 int ret = 0;
7cef3347 332 int drhd_count = 0;
10e5247f 333
f6dd5c31
YL
334 /*
335 * Do it again, earlier dmar_tbl mapping could be mapped with
336 * fixed map.
337 */
338 dmar_table_detect();
339
a59b50e9
JC
340 /*
341 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
342 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
343 */
344 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
345
10e5247f
KA
346 dmar = (struct acpi_table_dmar *)dmar_tbl;
347 if (!dmar)
348 return -ENODEV;
349
5b6985ce 350 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 351 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
352 return -EINVAL;
353 }
354
e9071b0b 355 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
356
357 entry_header = (struct acpi_dmar_header *)(dmar + 1);
358 while (((unsigned long)entry_header) <
359 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
360 /* Avoid looping forever on bad ACPI tables */
361 if (entry_header->length == 0) {
e9071b0b 362 pr_warn("Invalid 0-length structure\n");
084eb960
TB
363 ret = -EINVAL;
364 break;
365 }
366
10e5247f
KA
367 dmar_table_print_dmar_entry(entry_header);
368
369 switch (entry_header->type) {
370 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 371 drhd_count++;
10e5247f
KA
372 ret = dmar_parse_one_drhd(entry_header);
373 break;
374 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
375 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
376 break;
377 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 378 ret = dmar_parse_one_atsr(entry_header);
10e5247f 379 break;
17b60977 380 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 381#ifdef CONFIG_ACPI_NUMA
ee34b32d 382 ret = dmar_parse_one_rhsa(entry_header);
aa697079 383#endif
17b60977 384 break;
10e5247f 385 default:
e9071b0b 386 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 387 entry_header->type);
10e5247f
KA
388 ret = 0; /* for forward compatibility */
389 break;
390 }
391 if (ret)
392 break;
393
394 entry_header = ((void *)entry_header + entry_header->length);
395 }
7cef3347
LZH
396 if (drhd_count == 0)
397 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
398 return ret;
399}
400
dda56549 401static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
402 struct pci_dev *dev)
403{
404 int index;
405
406 while (dev) {
407 for (index = 0; index < cnt; index++)
408 if (dev == devices[index])
409 return 1;
410
411 /* Check our parent */
412 dev = dev->bus->self;
413 }
414
415 return 0;
416}
417
418struct dmar_drhd_unit *
419dmar_find_matched_drhd_unit(struct pci_dev *dev)
420{
2e824f79
YZ
421 struct dmar_drhd_unit *dmaru = NULL;
422 struct acpi_dmar_hardware_unit *drhd;
423
dda56549
Y
424 dev = pci_physfn(dev);
425
8b161f0e 426 for_each_drhd_unit(dmaru) {
2e824f79
YZ
427 drhd = container_of(dmaru->hdr,
428 struct acpi_dmar_hardware_unit,
429 header);
430
431 if (dmaru->include_all &&
432 drhd->segment == pci_domain_nr(dev->bus))
433 return dmaru;
e61d98d8 434
2e824f79
YZ
435 if (dmar_pci_device_match(dmaru->devices,
436 dmaru->devices_cnt, dev))
437 return dmaru;
e61d98d8
SS
438 }
439
440 return NULL;
441}
442
1886e8a9
SS
443int __init dmar_dev_scope_init(void)
444{
c2c7286a 445 static int dmar_dev_scope_initialized;
a868e6b7 446 struct dmar_drhd_unit *drhd;
1886e8a9
SS
447 int ret = -ENODEV;
448
c2c7286a
SS
449 if (dmar_dev_scope_initialized)
450 return dmar_dev_scope_initialized;
451
318fe7df
SS
452 if (list_empty(&dmar_drhd_units))
453 goto fail;
454
a868e6b7 455 list_for_each_entry(drhd, &dmar_drhd_units, list) {
1886e8a9
SS
456 ret = dmar_parse_dev(drhd);
457 if (ret)
c2c7286a 458 goto fail;
1886e8a9
SS
459 }
460
318fe7df
SS
461 ret = dmar_parse_rmrr_atsr_dev();
462 if (ret)
463 goto fail;
1886e8a9 464
c2c7286a
SS
465 dmar_dev_scope_initialized = 1;
466 return 0;
467
468fail:
469 dmar_dev_scope_initialized = ret;
1886e8a9
SS
470 return ret;
471}
472
10e5247f
KA
473
474int __init dmar_table_init(void)
475{
1886e8a9 476 static int dmar_table_initialized;
093f87d2
FY
477 int ret;
478
cc05301f
JL
479 if (dmar_table_initialized == 0) {
480 ret = parse_dmar_table();
481 if (ret < 0) {
482 if (ret != -ENODEV)
483 pr_info("parse DMAR table failure.\n");
484 } else if (list_empty(&dmar_drhd_units)) {
485 pr_info("No DMAR devices found\n");
486 ret = -ENODEV;
487 }
093f87d2 488
cc05301f
JL
489 if (ret < 0)
490 dmar_table_initialized = ret;
491 else
492 dmar_table_initialized = 1;
10e5247f 493 }
093f87d2 494
cc05301f 495 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
10e5247f
KA
496}
497
3a8663ee
BH
498static void warn_invalid_dmar(u64 addr, const char *message)
499{
fd0c8894
BH
500 WARN_TAINT_ONCE(
501 1, TAINT_FIRMWARE_WORKAROUND,
502 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
503 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
504 addr, message,
505 dmi_get_system_info(DMI_BIOS_VENDOR),
506 dmi_get_system_info(DMI_BIOS_VERSION),
507 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 508}
6ecbf01c 509
21004dcd 510static int __init check_zero_address(void)
86cf898e
DW
511{
512 struct acpi_table_dmar *dmar;
513 struct acpi_dmar_header *entry_header;
514 struct acpi_dmar_hardware_unit *drhd;
515
516 dmar = (struct acpi_table_dmar *)dmar_tbl;
517 entry_header = (struct acpi_dmar_header *)(dmar + 1);
518
519 while (((unsigned long)entry_header) <
520 (((unsigned long)dmar) + dmar_tbl->length)) {
521 /* Avoid looping forever on bad ACPI tables */
522 if (entry_header->length == 0) {
e9071b0b 523 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
524 return 0;
525 }
526
527 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
528 void __iomem *addr;
529 u64 cap, ecap;
530
86cf898e
DW
531 drhd = (void *)entry_header;
532 if (!drhd->address) {
3a8663ee 533 warn_invalid_dmar(0, "");
2c992208
CW
534 goto failed;
535 }
536
537 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
538 if (!addr ) {
539 printk("IOMMU: can't validate: %llx\n", drhd->address);
540 goto failed;
541 }
542 cap = dmar_readq(addr + DMAR_CAP_REG);
543 ecap = dmar_readq(addr + DMAR_ECAP_REG);
544 early_iounmap(addr, VTD_PAGE_SIZE);
545 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
546 warn_invalid_dmar(drhd->address,
547 " returns all ones");
2c992208 548 goto failed;
86cf898e 549 }
86cf898e
DW
550 }
551
552 entry_header = ((void *)entry_header + entry_header->length);
553 }
554 return 1;
2c992208
CW
555
556failed:
2c992208 557 return 0;
86cf898e
DW
558}
559
480125ba 560int __init detect_intel_iommu(void)
2ae21010
SS
561{
562 int ret;
563
f6dd5c31 564 ret = dmar_table_detect();
86cf898e
DW
565 if (ret)
566 ret = check_zero_address();
2ae21010 567 {
11bd04f6 568 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 569 iommu_detected = 1;
5d990b62
CW
570 /* Make sure ACS will be enabled */
571 pci_request_acs();
572 }
f5d1b97b 573
9d5ce73a
FT
574#ifdef CONFIG_X86
575 if (ret)
576 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 577#endif
cacd4213 578 }
b707cb02 579 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
f6dd5c31 580 dmar_tbl = NULL;
480125ba 581
4db77ff3 582 return ret ? 1 : -ENODEV;
2ae21010
SS
583}
584
585
6f5cf521
DD
586static void unmap_iommu(struct intel_iommu *iommu)
587{
588 iounmap(iommu->reg);
589 release_mem_region(iommu->reg_phys, iommu->reg_size);
590}
591
592/**
593 * map_iommu: map the iommu's registers
594 * @iommu: the iommu to map
595 * @phys_addr: the physical address of the base resgister
e9071b0b 596 *
6f5cf521 597 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 598 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
599 */
600static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
601{
602 int map_size, err=0;
603
604 iommu->reg_phys = phys_addr;
605 iommu->reg_size = VTD_PAGE_SIZE;
606
607 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
608 pr_err("IOMMU: can't reserve memory\n");
609 err = -EBUSY;
610 goto out;
611 }
612
613 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
614 if (!iommu->reg) {
615 pr_err("IOMMU: can't map the region\n");
616 err = -ENOMEM;
617 goto release;
618 }
619
620 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
621 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
622
623 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
624 err = -EINVAL;
625 warn_invalid_dmar(phys_addr, " returns all ones");
626 goto unmap;
627 }
628
629 /* the registers might be more than one page */
630 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
631 cap_max_fault_reg_offset(iommu->cap));
632 map_size = VTD_PAGE_ALIGN(map_size);
633 if (map_size > iommu->reg_size) {
634 iounmap(iommu->reg);
635 release_mem_region(iommu->reg_phys, iommu->reg_size);
636 iommu->reg_size = map_size;
637 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
638 iommu->name)) {
639 pr_err("IOMMU: can't reserve memory\n");
640 err = -EBUSY;
641 goto out;
642 }
643 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
644 if (!iommu->reg) {
645 pr_err("IOMMU: can't map the region\n");
646 err = -ENOMEM;
647 goto release;
648 }
649 }
650 err = 0;
651 goto out;
652
653unmap:
654 iounmap(iommu->reg);
655release:
656 release_mem_region(iommu->reg_phys, iommu->reg_size);
657out:
658 return err;
659}
660
694835dc 661static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 662{
c42d9f32 663 struct intel_iommu *iommu;
3a93c841 664 u32 ver, sts;
c42d9f32 665 static int iommu_allocated = 0;
43f7392b 666 int agaw = 0;
4ed0d3e6 667 int msagaw = 0;
6f5cf521 668 int err;
c42d9f32 669
6ecbf01c 670 if (!drhd->reg_base_addr) {
3a8663ee 671 warn_invalid_dmar(0, "");
6ecbf01c
DW
672 return -EINVAL;
673 }
674
c42d9f32
SS
675 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
676 if (!iommu)
1886e8a9 677 return -ENOMEM;
c42d9f32
SS
678
679 iommu->seq_id = iommu_allocated++;
9d783ba0 680 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 681
6f5cf521
DD
682 err = map_iommu(iommu, drhd->reg_base_addr);
683 if (err) {
684 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
685 goto error;
686 }
0815565a 687
6f5cf521 688 err = -EINVAL;
1b573683
WH
689 agaw = iommu_calculate_agaw(iommu);
690 if (agaw < 0) {
bf947fcb
DD
691 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
692 iommu->seq_id);
0815565a 693 goto err_unmap;
4ed0d3e6
FY
694 }
695 msagaw = iommu_calculate_max_sagaw(iommu);
696 if (msagaw < 0) {
bf947fcb 697 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 698 iommu->seq_id);
0815565a 699 goto err_unmap;
1b573683
WH
700 }
701 iommu->agaw = agaw;
4ed0d3e6 702 iommu->msagaw = msagaw;
1b573683 703
ee34b32d
SS
704 iommu->node = -1;
705
e61d98d8 706 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
707 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
708 iommu->seq_id,
5b6985ce
FY
709 (unsigned long long)drhd->reg_base_addr,
710 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
711 (unsigned long long)iommu->cap,
712 (unsigned long long)iommu->ecap);
e61d98d8 713
3a93c841
TI
714 /* Reflect status in gcmd */
715 sts = readl(iommu->reg + DMAR_GSTS_REG);
716 if (sts & DMA_GSTS_IRES)
717 iommu->gcmd |= DMA_GCMD_IRE;
718 if (sts & DMA_GSTS_TES)
719 iommu->gcmd |= DMA_GCMD_TE;
720 if (sts & DMA_GSTS_QIES)
721 iommu->gcmd |= DMA_GCMD_QIE;
722
1f5b3c3f 723 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
724
725 drhd->iommu = iommu;
1886e8a9 726 return 0;
0815565a
DW
727
728 err_unmap:
6f5cf521 729 unmap_iommu(iommu);
0815565a 730 error:
e61d98d8 731 kfree(iommu);
6f5cf521 732 return err;
e61d98d8
SS
733}
734
a868e6b7 735static void free_iommu(struct intel_iommu *iommu)
e61d98d8 736{
a868e6b7
JL
737 if (iommu->irq) {
738 free_irq(iommu->irq, iommu);
739 irq_set_handler_data(iommu->irq, NULL);
740 destroy_irq(iommu->irq);
741 }
e61d98d8 742
a84da70b
JL
743 if (iommu->qi) {
744 free_page((unsigned long)iommu->qi->desc);
745 kfree(iommu->qi->desc_status);
746 kfree(iommu->qi);
747 }
748
e61d98d8 749 if (iommu->reg)
6f5cf521
DD
750 unmap_iommu(iommu);
751
e61d98d8
SS
752 kfree(iommu);
753}
fe962e90
SS
754
755/*
756 * Reclaim all the submitted descriptors which have completed its work.
757 */
758static inline void reclaim_free_desc(struct q_inval *qi)
759{
6ba6c3a4
YZ
760 while (qi->desc_status[qi->free_tail] == QI_DONE ||
761 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
762 qi->desc_status[qi->free_tail] = QI_FREE;
763 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
764 qi->free_cnt++;
765 }
766}
767
704126ad
YZ
768static int qi_check_fault(struct intel_iommu *iommu, int index)
769{
770 u32 fault;
6ba6c3a4 771 int head, tail;
704126ad
YZ
772 struct q_inval *qi = iommu->qi;
773 int wait_index = (index + 1) % QI_LENGTH;
774
6ba6c3a4
YZ
775 if (qi->desc_status[wait_index] == QI_ABORT)
776 return -EAGAIN;
777
704126ad
YZ
778 fault = readl(iommu->reg + DMAR_FSTS_REG);
779
780 /*
781 * If IQE happens, the head points to the descriptor associated
782 * with the error. No new descriptors are fetched until the IQE
783 * is cleared.
784 */
785 if (fault & DMA_FSTS_IQE) {
786 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 787 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 788 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
789 "low=%llx, high=%llx\n",
790 (unsigned long long)qi->desc[index].low,
791 (unsigned long long)qi->desc[index].high);
704126ad
YZ
792 memcpy(&qi->desc[index], &qi->desc[wait_index],
793 sizeof(struct qi_desc));
794 __iommu_flush_cache(iommu, &qi->desc[index],
795 sizeof(struct qi_desc));
796 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
797 return -EINVAL;
798 }
799 }
800
6ba6c3a4
YZ
801 /*
802 * If ITE happens, all pending wait_desc commands are aborted.
803 * No new descriptors are fetched until the ITE is cleared.
804 */
805 if (fault & DMA_FSTS_ITE) {
806 head = readl(iommu->reg + DMAR_IQH_REG);
807 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
808 head |= 1;
809 tail = readl(iommu->reg + DMAR_IQT_REG);
810 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
811
812 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
813
814 do {
815 if (qi->desc_status[head] == QI_IN_USE)
816 qi->desc_status[head] = QI_ABORT;
817 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
818 } while (head != tail);
819
820 if (qi->desc_status[wait_index] == QI_ABORT)
821 return -EAGAIN;
822 }
823
824 if (fault & DMA_FSTS_ICE)
825 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
826
704126ad
YZ
827 return 0;
828}
829
fe962e90
SS
830/*
831 * Submit the queued invalidation descriptor to the remapping
832 * hardware unit and wait for its completion.
833 */
704126ad 834int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 835{
6ba6c3a4 836 int rc;
fe962e90
SS
837 struct q_inval *qi = iommu->qi;
838 struct qi_desc *hw, wait_desc;
839 int wait_index, index;
840 unsigned long flags;
841
842 if (!qi)
704126ad 843 return 0;
fe962e90
SS
844
845 hw = qi->desc;
846
6ba6c3a4
YZ
847restart:
848 rc = 0;
849
3b8f4048 850 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 851 while (qi->free_cnt < 3) {
3b8f4048 852 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 853 cpu_relax();
3b8f4048 854 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
855 }
856
857 index = qi->free_head;
858 wait_index = (index + 1) % QI_LENGTH;
859
860 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
861
862 hw[index] = *desc;
863
704126ad
YZ
864 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
865 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
866 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
867
868 hw[wait_index] = wait_desc;
869
870 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
871 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
872
873 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
874 qi->free_cnt -= 2;
875
fe962e90
SS
876 /*
877 * update the HW tail register indicating the presence of
878 * new descriptors.
879 */
6ba6c3a4 880 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
881
882 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
883 /*
884 * We will leave the interrupts disabled, to prevent interrupt
885 * context to queue another cmd while a cmd is already submitted
886 * and waiting for completion on this cpu. This is to avoid
887 * a deadlock where the interrupt context can wait indefinitely
888 * for free slots in the queue.
889 */
704126ad
YZ
890 rc = qi_check_fault(iommu, index);
891 if (rc)
6ba6c3a4 892 break;
704126ad 893
3b8f4048 894 raw_spin_unlock(&qi->q_lock);
fe962e90 895 cpu_relax();
3b8f4048 896 raw_spin_lock(&qi->q_lock);
fe962e90 897 }
6ba6c3a4
YZ
898
899 qi->desc_status[index] = QI_DONE;
fe962e90
SS
900
901 reclaim_free_desc(qi);
3b8f4048 902 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 903
6ba6c3a4
YZ
904 if (rc == -EAGAIN)
905 goto restart;
906
704126ad 907 return rc;
fe962e90
SS
908}
909
910/*
911 * Flush the global interrupt entry cache.
912 */
913void qi_global_iec(struct intel_iommu *iommu)
914{
915 struct qi_desc desc;
916
917 desc.low = QI_IEC_TYPE;
918 desc.high = 0;
919
704126ad 920 /* should never fail */
fe962e90
SS
921 qi_submit_sync(&desc, iommu);
922}
923
4c25a2c1
DW
924void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
925 u64 type)
3481f210 926{
3481f210
YS
927 struct qi_desc desc;
928
3481f210
YS
929 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
930 | QI_CC_GRAN(type) | QI_CC_TYPE;
931 desc.high = 0;
932
4c25a2c1 933 qi_submit_sync(&desc, iommu);
3481f210
YS
934}
935
1f0ef2aa
DW
936void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
937 unsigned int size_order, u64 type)
3481f210
YS
938{
939 u8 dw = 0, dr = 0;
940
941 struct qi_desc desc;
942 int ih = 0;
943
3481f210
YS
944 if (cap_write_drain(iommu->cap))
945 dw = 1;
946
947 if (cap_read_drain(iommu->cap))
948 dr = 1;
949
950 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
951 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
952 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
953 | QI_IOTLB_AM(size_order);
954
1f0ef2aa 955 qi_submit_sync(&desc, iommu);
3481f210
YS
956}
957
6ba6c3a4
YZ
958void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
959 u64 addr, unsigned mask)
960{
961 struct qi_desc desc;
962
963 if (mask) {
964 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
965 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
966 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
967 } else
968 desc.high = QI_DEV_IOTLB_ADDR(addr);
969
970 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
971 qdep = 0;
972
973 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
974 QI_DIOTLB_TYPE;
975
976 qi_submit_sync(&desc, iommu);
977}
978
eba67e5d
SS
979/*
980 * Disable Queued Invalidation interface.
981 */
982void dmar_disable_qi(struct intel_iommu *iommu)
983{
984 unsigned long flags;
985 u32 sts;
986 cycles_t start_time = get_cycles();
987
988 if (!ecap_qis(iommu->ecap))
989 return;
990
1f5b3c3f 991 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
992
993 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
994 if (!(sts & DMA_GSTS_QIES))
995 goto end;
996
997 /*
998 * Give a chance to HW to complete the pending invalidation requests.
999 */
1000 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1001 readl(iommu->reg + DMAR_IQH_REG)) &&
1002 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1003 cpu_relax();
1004
1005 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1006 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1007
1008 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1009 !(sts & DMA_GSTS_QIES), sts);
1010end:
1f5b3c3f 1011 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
1012}
1013
eb4a52bc
FY
1014/*
1015 * Enable queued invalidation.
1016 */
1017static void __dmar_enable_qi(struct intel_iommu *iommu)
1018{
c416daa9 1019 u32 sts;
eb4a52bc
FY
1020 unsigned long flags;
1021 struct q_inval *qi = iommu->qi;
1022
1023 qi->free_head = qi->free_tail = 0;
1024 qi->free_cnt = QI_LENGTH;
1025
1f5b3c3f 1026 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1027
1028 /* write zero to the tail reg */
1029 writel(0, iommu->reg + DMAR_IQT_REG);
1030
1031 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1032
eb4a52bc 1033 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1034 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1035
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1038
1f5b3c3f 1039 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1040}
1041
fe962e90
SS
1042/*
1043 * Enable Queued Invalidation interface. This is a must to support
1044 * interrupt-remapping. Also used by DMA-remapping, which replaces
1045 * register based IOTLB invalidation.
1046 */
1047int dmar_enable_qi(struct intel_iommu *iommu)
1048{
fe962e90 1049 struct q_inval *qi;
751cafe3 1050 struct page *desc_page;
fe962e90
SS
1051
1052 if (!ecap_qis(iommu->ecap))
1053 return -ENOENT;
1054
1055 /*
1056 * queued invalidation is already setup and enabled.
1057 */
1058 if (iommu->qi)
1059 return 0;
1060
fa4b57cc 1061 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1062 if (!iommu->qi)
1063 return -ENOMEM;
1064
1065 qi = iommu->qi;
1066
751cafe3
SS
1067
1068 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1069 if (!desc_page) {
fe962e90 1070 kfree(qi);
b707cb02 1071 iommu->qi = NULL;
fe962e90
SS
1072 return -ENOMEM;
1073 }
1074
751cafe3
SS
1075 qi->desc = page_address(desc_page);
1076
37a40710 1077 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1078 if (!qi->desc_status) {
1079 free_page((unsigned long) qi->desc);
1080 kfree(qi);
b707cb02 1081 iommu->qi = NULL;
fe962e90
SS
1082 return -ENOMEM;
1083 }
1084
1085 qi->free_head = qi->free_tail = 0;
1086 qi->free_cnt = QI_LENGTH;
1087
3b8f4048 1088 raw_spin_lock_init(&qi->q_lock);
fe962e90 1089
eb4a52bc 1090 __dmar_enable_qi(iommu);
fe962e90
SS
1091
1092 return 0;
1093}
0ac2491f
SS
1094
1095/* iommu interrupt handling. Most stuff are MSI-like. */
1096
9d783ba0
SS
1097enum faulttype {
1098 DMA_REMAP,
1099 INTR_REMAP,
1100 UNKNOWN,
1101};
1102
1103static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1104{
1105 "Software",
1106 "Present bit in root entry is clear",
1107 "Present bit in context entry is clear",
1108 "Invalid context entry",
1109 "Access beyond MGAW",
1110 "PTE Write access is not set",
1111 "PTE Read access is not set",
1112 "Next page table ptr is invalid",
1113 "Root table address invalid",
1114 "Context table ptr is invalid",
1115 "non-zero reserved fields in RTP",
1116 "non-zero reserved fields in CTP",
1117 "non-zero reserved fields in PTE",
4ecccd9e 1118 "PCE for translation request specifies blocking",
0ac2491f 1119};
9d783ba0 1120
95a02e97 1121static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1122{
1123 "Detected reserved fields in the decoded interrupt-remapped request",
1124 "Interrupt index exceeded the interrupt-remapping table size",
1125 "Present field in the IRTE entry is clear",
1126 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1127 "Detected reserved fields in the IRTE entry",
1128 "Blocked a compatibility format interrupt request",
1129 "Blocked an interrupt request due to source-id verification failure",
1130};
1131
21004dcd 1132static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1133{
fefe1ed1
DC
1134 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1135 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1136 *fault_type = INTR_REMAP;
95a02e97 1137 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1138 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1139 *fault_type = DMA_REMAP;
1140 return dma_remap_fault_reasons[fault_reason];
1141 } else {
1142 *fault_type = UNKNOWN;
0ac2491f 1143 return "Unknown";
9d783ba0 1144 }
0ac2491f
SS
1145}
1146
5c2837fb 1147void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1148{
dced35ae 1149 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1150 unsigned long flag;
1151
1152 /* unmask it */
1f5b3c3f 1153 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1154 writel(0, iommu->reg + DMAR_FECTL_REG);
1155 /* Read a reg to force flush the post write */
1156 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1157 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1158}
1159
5c2837fb 1160void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1161{
1162 unsigned long flag;
dced35ae 1163 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1164
1165 /* mask it */
1f5b3c3f 1166 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1167 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1168 /* Read a reg to force flush the post write */
1169 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1170 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1171}
1172
1173void dmar_msi_write(int irq, struct msi_msg *msg)
1174{
dced35ae 1175 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1176 unsigned long flag;
1177
1f5b3c3f 1178 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1179 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1180 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1181 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1182 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1183}
1184
1185void dmar_msi_read(int irq, struct msi_msg *msg)
1186{
dced35ae 1187 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1188 unsigned long flag;
1189
1f5b3c3f 1190 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1191 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1192 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1193 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1194 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1195}
1196
1197static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1198 u8 fault_reason, u16 source_id, unsigned long long addr)
1199{
1200 const char *reason;
9d783ba0 1201 int fault_type;
0ac2491f 1202
9d783ba0 1203 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1204
9d783ba0 1205 if (fault_type == INTR_REMAP)
bf947fcb 1206 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1207 "fault index %llx\n"
1208 "INTR-REMAP:[fault reason %02d] %s\n",
1209 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1210 PCI_FUNC(source_id & 0xFF), addr >> 48,
1211 fault_reason, reason);
1212 else
bf947fcb 1213 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1214 "fault addr %llx \n"
1215 "DMAR:[fault reason %02d] %s\n",
1216 (type ? "DMA Read" : "DMA Write"),
1217 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1218 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1219 return 0;
1220}
1221
1222#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1223irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1224{
1225 struct intel_iommu *iommu = dev_id;
1226 int reg, fault_index;
1227 u32 fault_status;
1228 unsigned long flag;
1229
1f5b3c3f 1230 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1231 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1232 if (fault_status)
bf947fcb 1233 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1234
1235 /* TBD: ignore advanced fault log currently */
1236 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1237 goto unlock_exit;
0ac2491f
SS
1238
1239 fault_index = dma_fsts_fault_record_index(fault_status);
1240 reg = cap_fault_reg_offset(iommu->cap);
1241 while (1) {
1242 u8 fault_reason;
1243 u16 source_id;
1244 u64 guest_addr;
1245 int type;
1246 u32 data;
1247
1248 /* highest 32 bits */
1249 data = readl(iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1251 if (!(data & DMA_FRCD_F))
1252 break;
1253
1254 fault_reason = dma_frcd_fault_reason(data);
1255 type = dma_frcd_type(data);
1256
1257 data = readl(iommu->reg + reg +
1258 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1259 source_id = dma_frcd_source_id(data);
1260
1261 guest_addr = dmar_readq(iommu->reg + reg +
1262 fault_index * PRIMARY_FAULT_REG_LEN);
1263 guest_addr = dma_frcd_page_addr(guest_addr);
1264 /* clear the fault */
1265 writel(DMA_FRCD_F, iommu->reg + reg +
1266 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1267
1f5b3c3f 1268 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1269
1270 dmar_fault_do_one(iommu, type, fault_reason,
1271 source_id, guest_addr);
1272
1273 fault_index++;
8211a7b5 1274 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1275 fault_index = 0;
1f5b3c3f 1276 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1277 }
0ac2491f 1278
bd5cdad0
LZH
1279 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1280
1281unlock_exit:
1f5b3c3f 1282 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1283 return IRQ_HANDLED;
1284}
1285
1286int dmar_set_interrupt(struct intel_iommu *iommu)
1287{
1288 int irq, ret;
1289
9d783ba0
SS
1290 /*
1291 * Check if the fault interrupt is already initialized.
1292 */
1293 if (iommu->irq)
1294 return 0;
1295
0ac2491f
SS
1296 irq = create_irq();
1297 if (!irq) {
bf947fcb 1298 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1299 return -EINVAL;
1300 }
1301
dced35ae 1302 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1303 iommu->irq = irq;
1304
1305 ret = arch_setup_dmar_msi(irq);
1306 if (ret) {
dced35ae 1307 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1308 iommu->irq = 0;
1309 destroy_irq(irq);
dd726435 1310 return ret;
0ac2491f
SS
1311 }
1312
477694e7 1313 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1314 if (ret)
bf947fcb 1315 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1316 return ret;
1317}
9d783ba0
SS
1318
1319int __init enable_drhd_fault_handling(void)
1320{
1321 struct dmar_drhd_unit *drhd;
7c919779 1322 struct intel_iommu *iommu;
9d783ba0
SS
1323
1324 /*
1325 * Enable fault control interrupt.
1326 */
7c919779 1327 for_each_iommu(iommu, drhd) {
bd5cdad0 1328 u32 fault_status;
7c919779 1329 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1330
1331 if (ret) {
e9071b0b 1332 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1333 (unsigned long long)drhd->reg_base_addr, ret);
1334 return -1;
1335 }
7f99d946
SS
1336
1337 /*
1338 * Clear any previous faults.
1339 */
1340 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1341 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1342 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1343 }
1344
1345 return 0;
1346}
eb4a52bc
FY
1347
1348/*
1349 * Re-enable Queued Invalidation interface.
1350 */
1351int dmar_reenable_qi(struct intel_iommu *iommu)
1352{
1353 if (!ecap_qis(iommu->ecap))
1354 return -ENOENT;
1355
1356 if (!iommu->qi)
1357 return -ENOENT;
1358
1359 /*
1360 * First disable queued invalidation.
1361 */
1362 dmar_disable_qi(iommu);
1363 /*
1364 * Then enable queued invalidation again. Since there is no pending
1365 * invalidation requests now, it's safe to re-enable queued
1366 * invalidation.
1367 */
1368 __dmar_enable_qi(iommu);
1369
1370 return 0;
1371}
074835f0
YS
1372
1373/*
1374 * Check interrupt remapping support in DMAR table description.
1375 */
0b8973a8 1376int __init dmar_ir_support(void)
074835f0
YS
1377{
1378 struct acpi_table_dmar *dmar;
1379 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1380 if (!dmar)
1381 return 0;
074835f0
YS
1382 return dmar->flags & 0x1;
1383}
694835dc 1384
a868e6b7
JL
1385static int __init dmar_free_unused_resources(void)
1386{
1387 struct dmar_drhd_unit *dmaru, *dmaru_n;
1388
1389 /* DMAR units are in use */
1390 if (irq_remapping_enabled || intel_iommu_enabled)
1391 return 0;
1392
1393 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1394 list_del(&dmaru->list);
1395 dmar_free_drhd(dmaru);
1396 }
1397
1398 return 0;
1399}
1400
1401late_initcall(dmar_free_unused_resources);
4db77ff3 1402IOMMU_INIT_POST(detect_intel_iommu);