]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/iommu/amd/init.c
treewide: Use fallthrough pseudo-keyword
[mirror_ubuntu-jammy-kernel.git] / drivers / iommu / amd / init.c
CommitLineData
45051539 1// SPDX-License-Identifier: GPL-2.0-only
f6e2e6b6 2/*
5d0d7156 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
63ce3ae8 4 * Author: Joerg Roedel <jroedel@suse.de>
f6e2e6b6 5 * Leo Duran <leo.duran@amd.com>
f6e2e6b6
JR
6 */
7
101fa037 8#define pr_fmt(fmt) "AMD-Vi: " fmt
5f226da1 9#define dev_fmt(fmt) pr_fmt(fmt)
101fa037 10
f6e2e6b6
JR
11#include <linux/pci.h>
12#include <linux/acpi.h>
f6e2e6b6 13#include <linux/list.h>
5c87f62d 14#include <linux/bitmap.h>
5a0e3ad6 15#include <linux/slab.h>
f3c6ea1b 16#include <linux/syscore_ops.h>
a80dc3e0
JR
17#include <linux/interrupt.h>
18#include <linux/msi.h>
403f81d8 19#include <linux/amd-iommu.h>
400a28a0 20#include <linux/export.h>
ebcfa284 21#include <linux/kmemleak.h>
2543a786 22#include <linux/mem_encrypt.h>
f6e2e6b6 23#include <asm/pci-direct.h>
46a7fa27 24#include <asm/iommu.h>
66929812
SS
25#include <asm/apic.h>
26#include <asm/msidef.h>
1d9b16d1 27#include <asm/gart.h>
ea1b0d39 28#include <asm/x86_init.h>
22e6daf4 29#include <asm/iommu_table.h>
eb1eb7ae 30#include <asm/io_apic.h>
6b474b82 31#include <asm/irq_remapping.h>
403f81d8 32
3ac3e5ee 33#include <linux/crash_dump.h>
786dfe49 34
93d05155 35#include "amd_iommu.h"
ad8694ba 36#include "../irq_remapping.h"
403f81d8 37
f6e2e6b6
JR
38/*
39 * definitions for the ACPI scanning code
40 */
f6e2e6b6 41#define IVRS_HEADER_LENGTH 48
f6e2e6b6 42
8c7142f5 43#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
f6e2e6b6
JR
44#define ACPI_IVMD_TYPE_ALL 0x20
45#define ACPI_IVMD_TYPE 0x21
46#define ACPI_IVMD_TYPE_RANGE 0x22
47
48#define IVHD_DEV_ALL 0x01
49#define IVHD_DEV_SELECT 0x02
50#define IVHD_DEV_SELECT_RANGE_START 0x03
51#define IVHD_DEV_RANGE_END 0x04
52#define IVHD_DEV_ALIAS 0x42
53#define IVHD_DEV_ALIAS_RANGE 0x43
54#define IVHD_DEV_EXT_SELECT 0x46
55#define IVHD_DEV_EXT_SELECT_RANGE 0x47
6efed63b 56#define IVHD_DEV_SPECIAL 0x48
8c7142f5 57#define IVHD_DEV_ACPI_HID 0xf0
6efed63b 58
2a0cb4e2
WZ
59#define UID_NOT_PRESENT 0
60#define UID_IS_INTEGER 1
61#define UID_IS_CHARACTER 2
62
6efed63b
JR
63#define IVHD_SPECIAL_IOAPIC 1
64#define IVHD_SPECIAL_HPET 2
f6e2e6b6 65
6da7342f
JR
66#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
67#define IVHD_FLAG_PASSPW_EN_MASK 0x02
68#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
69#define IVHD_FLAG_ISOC_EN_MASK 0x08
f6e2e6b6
JR
70
71#define IVMD_FLAG_EXCL_RANGE 0x08
387caf0b
AH
72#define IVMD_FLAG_IW 0x04
73#define IVMD_FLAG_IR 0x02
f6e2e6b6
JR
74#define IVMD_FLAG_UNITY_MAP 0x01
75
76#define ACPI_DEVFLAG_INITPASS 0x01
77#define ACPI_DEVFLAG_EXTINT 0x02
78#define ACPI_DEVFLAG_NMI 0x04
79#define ACPI_DEVFLAG_SYSMGT1 0x10
80#define ACPI_DEVFLAG_SYSMGT2 0x20
81#define ACPI_DEVFLAG_LINT0 0x40
82#define ACPI_DEVFLAG_LINT1 0x80
83#define ACPI_DEVFLAG_ATSDIS 0x10000000
84
8bda0cfb 85#define LOOP_TIMEOUT 100000
b65233a9
JR
86/*
87 * ACPI table definitions
88 *
89 * These data structures are laid over the table to parse the important values
90 * out of it.
91 */
92
b0119e87
JR
93extern const struct iommu_ops amd_iommu_ops;
94
b65233a9
JR
95/*
96 * structure describing one IOMMU in the ACPI table. Typically followed by one
97 * or more ivhd_entrys.
98 */
f6e2e6b6
JR
99struct ivhd_header {
100 u8 type;
101 u8 flags;
102 u16 length;
103 u16 devid;
104 u16 cap_ptr;
105 u64 mmio_phys;
106 u16 pci_seg;
107 u16 info;
7d7d38af
SS
108 u32 efr_attr;
109
110 /* Following only valid on IVHD type 11h and 40h */
111 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
112 u64 res;
f6e2e6b6
JR
113} __attribute__((packed));
114
b65233a9
JR
115/*
116 * A device entry describing which devices a specific IOMMU translates and
117 * which requestor ids they use.
118 */
f6e2e6b6
JR
119struct ivhd_entry {
120 u8 type;
121 u16 devid;
122 u8 flags;
123 u32 ext;
2a0cb4e2
WZ
124 u32 hidh;
125 u64 cid;
126 u8 uidf;
127 u8 uidl;
128 u8 uid;
f6e2e6b6
JR
129} __attribute__((packed));
130
b65233a9
JR
131/*
132 * An AMD IOMMU memory definition structure. It defines things like exclusion
133 * ranges for devices and regions that should be unity mapped.
134 */
f6e2e6b6
JR
135struct ivmd_header {
136 u8 type;
137 u8 flags;
138 u16 length;
139 u16 devid;
140 u16 aux;
141 u64 resv;
142 u64 range_start;
143 u64 range_length;
144} __attribute__((packed));
145
fefda117 146bool amd_iommu_dump;
05152a04 147bool amd_iommu_irq_remap __read_mostly;
fefda117 148
d98de49a 149int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
81307143 150static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
3928aa3f 151
02f3b3f5 152static bool amd_iommu_detected;
a5235725 153static bool __initdata amd_iommu_disabled;
8c7142f5 154static int amd_iommu_target_ivhd_type;
c1cbebee 155
b65233a9
JR
156u16 amd_iommu_last_bdf; /* largest PCI device id we have
157 to handle */
2e22847f 158LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
b65233a9 159 we find in ACPI */
621a5f7a 160bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
928abd25 161
2e22847f 162LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
b65233a9 163 system */
928abd25 164
bb52777e
JR
165/* Array to assign indices to IOMMUs*/
166struct amd_iommu *amd_iommus[MAX_IOMMUS];
6b9376e3
SS
167
168/* Number of IOMMUs present in the system */
169static int amd_iommus_present;
bb52777e 170
318afd41
JR
171/* IOMMUs have a non-present cache? */
172bool amd_iommu_np_cache __read_mostly;
60f723b4 173bool amd_iommu_iotlb_sup __read_mostly = true;
318afd41 174
a919a018 175u32 amd_iommu_max_pasid __read_mostly = ~0;
62f71abb 176
400a28a0 177bool amd_iommu_v2_present __read_mostly;
4160cd9e 178static bool amd_iommu_pc_present __read_mostly;
400a28a0 179
5abcdba4
JR
180bool amd_iommu_force_isolation __read_mostly;
181
b65233a9
JR
182/*
183 * Pointer to the device table which is shared by all AMD IOMMUs
184 * it is indexed by the PCI device id or the HT unit id and contains
185 * information about the domain the device belongs to as well as the
186 * page table root pointer.
187 */
928abd25 188struct dev_table_entry *amd_iommu_dev_table;
45a01c42
BH
189/*
190 * Pointer to a device table which the content of old device table
191 * will be copied to. It's only be used in kdump kernel.
192 */
193static struct dev_table_entry *old_dev_tbl_cpy;
b65233a9
JR
194
195/*
196 * The alias table is a driver specific data structure which contains the
197 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
198 * More than one device can share the same requestor id.
199 */
928abd25 200u16 *amd_iommu_alias_table;
b65233a9
JR
201
202/*
203 * The rlookup table is used to find the IOMMU which is responsible
204 * for a specific device. It is also indexed by the PCI device id.
205 */
928abd25 206struct amd_iommu **amd_iommu_rlookup_table;
daae2d25 207EXPORT_SYMBOL(amd_iommu_rlookup_table);
b65233a9 208
b65233a9 209/*
0ea2c422
JR
210 * This table is used to find the irq remapping table for a given device id
211 * quickly.
212 */
213struct irq_remap_table **irq_lookup_table;
214
b65233a9 215/*
df805abb 216 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
b65233a9
JR
217 * to know which ones are already in use.
218 */
928abd25
JR
219unsigned long *amd_iommu_pd_alloc_bitmap;
220
b65233a9
JR
221static u32 dev_table_size; /* size of the device table */
222static u32 alias_table_size; /* size of the alias table */
223static u32 rlookup_table_size; /* size if the rlookup table */
3e8064ba 224
2c0ae172
JR
225enum iommu_init_state {
226 IOMMU_START_STATE,
227 IOMMU_IVRS_DETECTED,
228 IOMMU_ACPI_FINISHED,
229 IOMMU_ENABLED,
230 IOMMU_PCI_INIT,
231 IOMMU_INTERRUPTS_EN,
232 IOMMU_DMA_OPS,
233 IOMMU_INITIALIZED,
234 IOMMU_NOT_FOUND,
235 IOMMU_INIT_ERROR,
1b1e942e 236 IOMMU_CMDLINE_DISABLED,
2c0ae172
JR
237};
238
235dacbc
JR
239/* Early ioapic and hpet maps from kernel command line */
240#define EARLY_MAP_SIZE 4
241static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
242static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
2a0cb4e2
WZ
243static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
244
235dacbc
JR
245static int __initdata early_ioapic_map_size;
246static int __initdata early_hpet_map_size;
2a0cb4e2
WZ
247static int __initdata early_acpihid_map_size;
248
dfbb6d47 249static bool __initdata cmdline_maps;
235dacbc 250
2c0ae172
JR
251static enum iommu_init_state init_state = IOMMU_START_STATE;
252
ae295142 253static int amd_iommu_enable_interrupts(void);
2c0ae172 254static int __init iommu_go_to_state(enum iommu_init_state state);
aafd8ba0 255static void init_device_table_dma(void);
3d9761e7 256
2479c631 257static bool amd_iommu_pre_enabled = true;
3ac3e5ee 258
4c232a70
BH
259bool translation_pre_enabled(struct amd_iommu *iommu)
260{
261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
262}
daae2d25 263EXPORT_SYMBOL(translation_pre_enabled);
4c232a70
BH
264
265static void clear_translation_pre_enabled(struct amd_iommu *iommu)
266{
267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
268}
269
270static void init_translation_status(struct amd_iommu *iommu)
271{
e881dbd5 272 u64 ctrl;
4c232a70 273
e881dbd5 274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
4c232a70
BH
275 if (ctrl & (1<<CONTROL_IOMMU_EN))
276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
277}
278
208ec8c9
JR
279static inline void update_last_devid(u16 devid)
280{
281 if (devid > amd_iommu_last_bdf)
282 amd_iommu_last_bdf = devid;
283}
284
c571484e
JR
285static inline unsigned long tbl_size(int entry_size)
286{
287 unsigned shift = PAGE_SHIFT +
421f909c 288 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
c571484e
JR
289
290 return 1UL << shift;
291}
292
6b9376e3
SS
293int amd_iommu_get_num_iommus(void)
294{
295 return amd_iommus_present;
296}
297
5bcd757f
MG
298/* Access to l1 and l2 indexed register spaces */
299
300static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
301{
302 u32 val;
303
304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
305 pci_read_config_dword(iommu->dev, 0xfc, &val);
306 return val;
307}
308
309static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
310{
311 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
312 pci_write_config_dword(iommu->dev, 0xfc, val);
313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
314}
315
316static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
317{
318 u32 val;
319
320 pci_write_config_dword(iommu->dev, 0xf0, address);
321 pci_read_config_dword(iommu->dev, 0xf4, &val);
322 return val;
323}
324
325static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
326{
327 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
328 pci_write_config_dword(iommu->dev, 0xf4, val);
329}
330
b65233a9
JR
331/****************************************************************************
332 *
333 * AMD IOMMU MMIO register space handling functions
334 *
335 * These functions are used to program the IOMMU device registers in
336 * MMIO space required for that driver.
337 *
338 ****************************************************************************/
3e8064ba 339
b65233a9
JR
340/*
341 * This function set the exclusion range in the IOMMU. DMA accesses to the
342 * exclusion range are passed through untranslated
343 */
05f92db9 344static void iommu_set_exclusion_range(struct amd_iommu *iommu)
b2026aa2
JR
345{
346 u64 start = iommu->exclusion_start & PAGE_MASK;
3c677d20 347 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
b2026aa2
JR
348 u64 entry;
349
350 if (!iommu->exclusion_start)
351 return;
352
353 entry = start | MMIO_EXCL_ENABLE_MASK;
354 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
355 &entry, sizeof(entry));
356
357 entry = limit;
358 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
359 &entry, sizeof(entry));
360}
361
b65233a9 362/* Programs the physical address of the device table into the IOMMU hardware */
6b7f000e 363static void iommu_set_device_table(struct amd_iommu *iommu)
b2026aa2 364{
f609891f 365 u64 entry;
b2026aa2
JR
366
367 BUG_ON(iommu->mmio_base == NULL);
368
2543a786 369 entry = iommu_virt_to_phys(amd_iommu_dev_table);
b2026aa2
JR
370 entry |= (dev_table_size >> 12) - 1;
371 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
372 &entry, sizeof(entry));
373}
374
b65233a9 375/* Generic functions to enable/disable certain features of the IOMMU. */
05f92db9 376static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
b2026aa2 377{
e881dbd5 378 u64 ctrl;
b2026aa2 379
e881dbd5
SS
380 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
381 ctrl |= (1ULL << bit);
382 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
b2026aa2
JR
383}
384
ca020711 385static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
b2026aa2 386{
e881dbd5 387 u64 ctrl;
b2026aa2 388
e881dbd5
SS
389 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
390 ctrl &= ~(1ULL << bit);
391 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
b2026aa2
JR
392}
393
1456e9d2
JR
394static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
395{
e881dbd5 396 u64 ctrl;
1456e9d2 397
e881dbd5 398 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
1456e9d2
JR
399 ctrl &= ~CTRL_INV_TO_MASK;
400 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
e881dbd5 401 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
1456e9d2
JR
402}
403
b65233a9 404/* Function to enable the hardware */
05f92db9 405static void iommu_enable(struct amd_iommu *iommu)
b2026aa2 406{
b2026aa2 407 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
b2026aa2
JR
408}
409
92ac4320 410static void iommu_disable(struct amd_iommu *iommu)
126c52be 411{
3ddbe913
KM
412 if (!iommu->mmio_base)
413 return;
414
a8c485bb
CW
415 /* Disable command buffer */
416 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
417
418 /* Disable event logging and event interrupts */
419 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
420 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
421
8bda0cfb
SS
422 /* Disable IOMMU GA_LOG */
423 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
424 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
425
a8c485bb 426 /* Disable IOMMU hardware itself */
92ac4320 427 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
126c52be
JR
428}
429
b65233a9
JR
430/*
431 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
432 * the system has one.
433 */
30861ddc 434static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
6c56747b 435{
30861ddc 436 if (!request_mem_region(address, end, "amd_iommu")) {
101fa037 437 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
30861ddc 438 address, end);
101fa037 439 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
6c56747b 440 return NULL;
e82752d8 441 }
6c56747b 442
4bdc0d67 443 return (u8 __iomem *)ioremap(address, end);
6c56747b
JR
444}
445
446static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
447{
448 if (iommu->mmio_base)
449 iounmap(iommu->mmio_base);
30861ddc 450 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
6c56747b
JR
451}
452
ac7ccf67
SS
453static inline u32 get_ivhd_header_size(struct ivhd_header *h)
454{
455 u32 size = 0;
456
457 switch (h->type) {
458 case 0x10:
459 size = 24;
460 break;
461 case 0x11:
462 case 0x40:
463 size = 40;
464 break;
465 }
466 return size;
467}
468
b65233a9
JR
469/****************************************************************************
470 *
471 * The functions below belong to the first pass of AMD IOMMU ACPI table
472 * parsing. In this pass we try to find out the highest device id this
473 * code has to handle. Upon this information the size of the shared data
474 * structures is determined later.
475 *
476 ****************************************************************************/
477
b514e555
JR
478/*
479 * This function calculates the length of a given IVHD entry
480 */
481static inline int ivhd_entry_length(u8 *ivhd)
482{
8c7142f5
SS
483 u32 type = ((struct ivhd_entry *)ivhd)->type;
484
485 if (type < 0x80) {
486 return 0x04 << (*ivhd >> 6);
487 } else if (type == IVHD_DEV_ACPI_HID) {
488 /* For ACPI_HID, offset 21 is uid len */
489 return *((u8 *)ivhd + 21) + 22;
490 }
491 return 0;
b514e555
JR
492}
493
b65233a9
JR
494/*
495 * After reading the highest device id from the IOMMU PCI capability header
496 * this function looks if there is a higher device id defined in the ACPI table
497 */
3e8064ba
JR
498static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
499{
500 u8 *p = (void *)h, *end = (void *)h;
501 struct ivhd_entry *dev;
502
ac7ccf67
SS
503 u32 ivhd_size = get_ivhd_header_size(h);
504
505 if (!ivhd_size) {
101fa037 506 pr_err("Unsupported IVHD type %#x\n", h->type);
ac7ccf67
SS
507 return -EINVAL;
508 }
509
510 p += ivhd_size;
3e8064ba
JR
511 end += h->length;
512
3e8064ba
JR
513 while (p < end) {
514 dev = (struct ivhd_entry *)p;
515 switch (dev->type) {
d1259416
JR
516 case IVHD_DEV_ALL:
517 /* Use maximum BDF value for DEV_ALL */
518 update_last_devid(0xffff);
519 break;
3e8064ba
JR
520 case IVHD_DEV_SELECT:
521 case IVHD_DEV_RANGE_END:
522 case IVHD_DEV_ALIAS:
523 case IVHD_DEV_EXT_SELECT:
b65233a9 524 /* all the above subfield types refer to device ids */
208ec8c9 525 update_last_devid(dev->devid);
3e8064ba
JR
526 break;
527 default:
528 break;
529 }
b514e555 530 p += ivhd_entry_length(p);
3e8064ba
JR
531 }
532
533 WARN_ON(p != end);
534
535 return 0;
536}
537
8c7142f5
SS
538static int __init check_ivrs_checksum(struct acpi_table_header *table)
539{
540 int i;
541 u8 checksum = 0, *p = (u8 *)table;
542
543 for (i = 0; i < table->length; ++i)
544 checksum += p[i];
545 if (checksum != 0) {
546 /* ACPI table corrupt */
101fa037 547 pr_err(FW_BUG "IVRS invalid checksum\n");
8c7142f5
SS
548 return -ENODEV;
549 }
550
551 return 0;
552}
553
b65233a9
JR
554/*
555 * Iterate over all IVHD entries in the ACPI table and find the highest device
556 * id which we need to handle. This is the first of three functions which parse
557 * the ACPI table. So we check the checksum here.
558 */
3e8064ba
JR
559static int __init find_last_devid_acpi(struct acpi_table_header *table)
560{
8c7142f5 561 u8 *p = (u8 *)table, *end = (u8 *)table;
3e8064ba
JR
562 struct ivhd_header *h;
563
3e8064ba
JR
564 p += IVRS_HEADER_LENGTH;
565
566 end += table->length;
567 while (p < end) {
568 h = (struct ivhd_header *)p;
8c7142f5
SS
569 if (h->type == amd_iommu_target_ivhd_type) {
570 int ret = find_last_devid_from_ivhd(h);
571
572 if (ret)
573 return ret;
3e8064ba
JR
574 }
575 p += h->length;
576 }
577 WARN_ON(p != end);
578
579 return 0;
580}
581
b65233a9
JR
582/****************************************************************************
583 *
df805abb 584 * The following functions belong to the code path which parses the ACPI table
b65233a9
JR
585 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
586 * data structures, initialize the device/alias/rlookup table and also
587 * basically initialize the hardware.
588 *
589 ****************************************************************************/
590
591/*
592 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
593 * write commands to that buffer later and the IOMMU will execute them
594 * asynchronously
595 */
f2c2db53 596static int __init alloc_command_buffer(struct amd_iommu *iommu)
b36ca91e 597{
f2c2db53
JR
598 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
599 get_order(CMD_BUFFER_SIZE));
b36ca91e 600
f2c2db53 601 return iommu->cmd_buf ? 0 : -ENOMEM;
58492e12
JR
602}
603
93f1cc67
JR
604/*
605 * This function resets the command buffer if the IOMMU stopped fetching
606 * commands from it.
607 */
608void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
609{
610 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
611
612 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
613 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
d334a563
TL
614 iommu->cmd_buf_head = 0;
615 iommu->cmd_buf_tail = 0;
93f1cc67
JR
616
617 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
618}
619
58492e12
JR
620/*
621 * This function writes the command buffer address to the hardware and
622 * enables it.
623 */
624static void iommu_enable_command_buffer(struct amd_iommu *iommu)
625{
626 u64 entry;
627
628 BUG_ON(iommu->cmd_buf == NULL);
629
2543a786 630 entry = iommu_virt_to_phys(iommu->cmd_buf);
b36ca91e 631 entry |= MMIO_CMD_SIZE_512;
58492e12 632
b36ca91e 633 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
58492e12 634 &entry, sizeof(entry));
b36ca91e 635
93f1cc67 636 amd_iommu_reset_cmd_buffer(iommu);
b36ca91e
JR
637}
638
78d313c6
BH
639/*
640 * This function disables the command buffer
641 */
642static void iommu_disable_command_buffer(struct amd_iommu *iommu)
643{
644 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
645}
646
b36ca91e
JR
647static void __init free_command_buffer(struct amd_iommu *iommu)
648{
deba4bce 649 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
b36ca91e
JR
650}
651
335503e5 652/* allocates the memory where the IOMMU will log its events to */
f2c2db53 653static int __init alloc_event_buffer(struct amd_iommu *iommu)
335503e5 654{
f2c2db53
JR
655 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
656 get_order(EVT_BUFFER_SIZE));
335503e5 657
f2c2db53 658 return iommu->evt_buf ? 0 : -ENOMEM;
58492e12
JR
659}
660
661static void iommu_enable_event_buffer(struct amd_iommu *iommu)
662{
663 u64 entry;
664
665 BUG_ON(iommu->evt_buf == NULL);
666
2543a786 667 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
58492e12 668
335503e5
JR
669 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
670 &entry, sizeof(entry));
671
09067207
JR
672 /* set head and tail to zero manually */
673 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
674 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
675
58492e12 676 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
335503e5
JR
677}
678
78d313c6
BH
679/*
680 * This function disables the event log buffer
681 */
682static void iommu_disable_event_buffer(struct amd_iommu *iommu)
683{
684 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
685}
686
335503e5
JR
687static void __init free_event_buffer(struct amd_iommu *iommu)
688{
689 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
690}
691
1a29ac01 692/* allocates the memory where the IOMMU will log its events to */
f2c2db53 693static int __init alloc_ppr_log(struct amd_iommu *iommu)
1a29ac01 694{
f2c2db53
JR
695 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
696 get_order(PPR_LOG_SIZE));
1a29ac01 697
f2c2db53 698 return iommu->ppr_log ? 0 : -ENOMEM;
1a29ac01
JR
699}
700
701static void iommu_enable_ppr_log(struct amd_iommu *iommu)
702{
703 u64 entry;
704
705 if (iommu->ppr_log == NULL)
706 return;
707
2543a786 708 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
1a29ac01
JR
709
710 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
711 &entry, sizeof(entry));
712
713 /* set head and tail to zero manually */
714 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
715 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
716
bde9e6b9 717 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
1a29ac01
JR
718 iommu_feature_enable(iommu, CONTROL_PPR_EN);
719}
720
721static void __init free_ppr_log(struct amd_iommu *iommu)
722{
1a29ac01
JR
723 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
724}
725
8bda0cfb
SS
726static void free_ga_log(struct amd_iommu *iommu)
727{
728#ifdef CONFIG_IRQ_REMAP
092550ea
LZ
729 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
730 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
8bda0cfb
SS
731#endif
732}
733
734static int iommu_ga_log_enable(struct amd_iommu *iommu)
735{
736#ifdef CONFIG_IRQ_REMAP
737 u32 status, i;
738
739 if (!iommu->ga_log)
740 return -EINVAL;
741
742 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
743
744 /* Check if already running */
745 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
746 return 0;
747
748 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
749 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
750
751 for (i = 0; i < LOOP_TIMEOUT; ++i) {
752 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
753 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
754 break;
755 }
756
757 if (i >= LOOP_TIMEOUT)
758 return -EINVAL;
759#endif /* CONFIG_IRQ_REMAP */
760 return 0;
761}
762
763#ifdef CONFIG_IRQ_REMAP
764static int iommu_init_ga_log(struct amd_iommu *iommu)
765{
766 u64 entry;
767
768 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
769 return 0;
770
771 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
772 get_order(GA_LOG_SIZE));
773 if (!iommu->ga_log)
774 goto err_out;
775
776 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
777 get_order(8));
778 if (!iommu->ga_log_tail)
779 goto err_out;
780
2543a786 781 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
8bda0cfb
SS
782 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
783 &entry, sizeof(entry));
ab99be46
FS
784 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
785 (BIT_ULL(52)-1)) & ~7ULL;
8bda0cfb
SS
786 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
787 &entry, sizeof(entry));
788 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
789 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
790
791 return 0;
792err_out:
793 free_ga_log(iommu);
794 return -EINVAL;
795}
796#endif /* CONFIG_IRQ_REMAP */
797
798static int iommu_init_ga(struct amd_iommu *iommu)
799{
800 int ret = 0;
801
802#ifdef CONFIG_IRQ_REMAP
803 /* Note: We have already checked GASup from IVRS table.
804 * Now, we need to make sure that GAMSup is set.
805 */
806 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
807 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
808 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
809
810 ret = iommu_init_ga_log(iommu);
811#endif /* CONFIG_IRQ_REMAP */
812
813 return ret;
814}
815
90fcffd9
SS
816static void iommu_enable_xt(struct amd_iommu *iommu)
817{
818#ifdef CONFIG_IRQ_REMAP
819 /*
820 * XT mode (32-bit APIC destination ID) requires
821 * GA mode (128-bit IRTE support) as a prerequisite.
822 */
823 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
824 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
825 iommu_feature_enable(iommu, CONTROL_XT_EN);
826#endif /* CONFIG_IRQ_REMAP */
827}
828
cbc33a90
JR
829static void iommu_enable_gt(struct amd_iommu *iommu)
830{
831 if (!iommu_feature(iommu, FEATURE_GT))
832 return;
833
834 iommu_feature_enable(iommu, CONTROL_GT_EN);
835}
836
b65233a9 837/* sets a specific bit in the device table entry. */
3566b778
JR
838static void set_dev_entry_bit(u16 devid, u8 bit)
839{
ee6c2868
JR
840 int i = (bit >> 6) & 0x03;
841 int _bit = bit & 0x3f;
3566b778 842
ee6c2868 843 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
3566b778
JR
844}
845
c5cca146
JR
846static int get_dev_entry_bit(u16 devid, u8 bit)
847{
ee6c2868
JR
848 int i = (bit >> 6) & 0x03;
849 int _bit = bit & 0x3f;
c5cca146 850
ee6c2868 851 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
c5cca146
JR
852}
853
854
45a01c42
BH
855static bool copy_device_table(void)
856{
ae162efb 857 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
45a01c42
BH
858 struct dev_table_entry *old_devtb = NULL;
859 u32 lo, hi, devid, old_devtb_size;
860 phys_addr_t old_devtb_phys;
45a01c42 861 struct amd_iommu *iommu;
53019a9e 862 u16 dom_id, dte_v, irq_v;
45a01c42 863 gfp_t gfp_flag;
daae2d25 864 u64 tmp;
45a01c42 865
3ac3e5ee
BH
866 if (!amd_iommu_pre_enabled)
867 return false;
45a01c42
BH
868
869 pr_warn("Translation is already enabled - trying to copy translation structures\n");
870 for_each_iommu(iommu) {
871 /* All IOMMUs should use the same device table with the same size */
872 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
873 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
874 entry = (((u64) hi) << 32) + lo;
875 if (last_entry && last_entry != entry) {
3c6bae62 876 pr_err("IOMMU:%d should use the same dev table as others!\n",
45a01c42
BH
877 iommu->index);
878 return false;
879 }
880 last_entry = entry;
881
882 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
883 if (old_devtb_size != dev_table_size) {
3c6bae62 884 pr_err("The device table size of IOMMU:%d is not expected!\n",
45a01c42
BH
885 iommu->index);
886 return false;
887 }
888 }
889
8780158c
LJ
890 /*
891 * When SME is enabled in the first kernel, the entry includes the
892 * memory encryption mask(sme_me_mask), we must remove the memory
893 * encryption mask to obtain the true physical address in kdump kernel.
894 */
895 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
896
b336781b 897 if (old_devtb_phys >= 0x100000000ULL) {
3c6bae62 898 pr_err("The address of old device table is above 4G, not trustworthy!\n");
b336781b
BH
899 return false;
900 }
8780158c
LJ
901 old_devtb = (sme_active() && is_kdump_kernel())
902 ? (__force void *)ioremap_encrypted(old_devtb_phys,
903 dev_table_size)
904 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
905
45a01c42
BH
906 if (!old_devtb)
907 return false;
908
b336781b 909 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
45a01c42
BH
910 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
911 get_order(dev_table_size));
912 if (old_dev_tbl_cpy == NULL) {
3c6bae62 913 pr_err("Failed to allocate memory for copying old device table!\n");
45a01c42
BH
914 return false;
915 }
916
917 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
918 old_dev_tbl_cpy[devid] = old_devtb[devid];
919 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
920 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
53019a9e
BH
921
922 if (dte_v && dom_id) {
923 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
924 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
45a01c42 925 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
daae2d25
BH
926 /* If gcr3 table existed, mask it out */
927 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
928 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
929 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
930 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
931 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
932 tmp |= DTE_FLAG_GV;
933 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
934 }
53019a9e
BH
935 }
936
937 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
938 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
939 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
940 if (irq_v && (int_ctl || int_tab_len)) {
941 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
942 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
943 pr_err("Wrong old irq remapping flag: %#x\n", devid);
944 return false;
945 }
946
947 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
948 }
45a01c42
BH
949 }
950 memunmap(old_devtb);
951
952 return true;
953}
954
c5cca146
JR
955void amd_iommu_apply_erratum_63(u16 devid)
956{
957 int sysmgt;
958
959 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
960 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
961
962 if (sysmgt == 0x01)
963 set_dev_entry_bit(devid, DEV_ENTRY_IW);
964}
965
5ff4789d
JR
966/* Writes the specific IOMMU for a device into the rlookup table */
967static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
968{
969 amd_iommu_rlookup_table[devid] = iommu;
970}
971
b65233a9
JR
972/*
973 * This function takes the device specific flags read from the ACPI
974 * table and sets up the device table entry with that information
975 */
5ff4789d
JR
976static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
977 u16 devid, u32 flags, u32 ext_flags)
3566b778
JR
978{
979 if (flags & ACPI_DEVFLAG_INITPASS)
980 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
981 if (flags & ACPI_DEVFLAG_EXTINT)
982 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
983 if (flags & ACPI_DEVFLAG_NMI)
984 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
985 if (flags & ACPI_DEVFLAG_SYSMGT1)
986 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
987 if (flags & ACPI_DEVFLAG_SYSMGT2)
988 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
989 if (flags & ACPI_DEVFLAG_LINT0)
990 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
991 if (flags & ACPI_DEVFLAG_LINT1)
992 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
3566b778 993
c5cca146
JR
994 amd_iommu_apply_erratum_63(devid);
995
5ff4789d 996 set_iommu_for_device(iommu, devid);
3566b778
JR
997}
998
93d05155 999int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
6efed63b
JR
1000{
1001 struct devid_map *entry;
1002 struct list_head *list;
1003
31cff67f
JR
1004 if (type == IVHD_SPECIAL_IOAPIC)
1005 list = &ioapic_map;
1006 else if (type == IVHD_SPECIAL_HPET)
1007 list = &hpet_map;
1008 else
6efed63b
JR
1009 return -EINVAL;
1010
31cff67f
JR
1011 list_for_each_entry(entry, list, list) {
1012 if (!(entry->id == id && entry->cmd_line))
1013 continue;
1014
101fa037 1015 pr_info("Command-line override present for %s id %d - ignoring\n",
31cff67f
JR
1016 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1017
c50e3247
JR
1018 *devid = entry->devid;
1019
31cff67f
JR
1020 return 0;
1021 }
1022
6efed63b
JR
1023 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1024 if (!entry)
1025 return -ENOMEM;
1026
31cff67f 1027 entry->id = id;
c50e3247 1028 entry->devid = *devid;
31cff67f 1029 entry->cmd_line = cmd_line;
6efed63b
JR
1030
1031 list_add_tail(&entry->list, list);
1032
1033 return 0;
1034}
1035
2a0cb4e2
WZ
1036static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1037 bool cmd_line)
1038{
1039 struct acpihid_map_entry *entry;
1040 struct list_head *list = &acpihid_map;
1041
1042 list_for_each_entry(entry, list, list) {
1043 if (strcmp(entry->hid, hid) ||
1044 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1045 !entry->cmd_line)
1046 continue;
1047
101fa037 1048 pr_info("Command-line override for hid:%s uid:%s\n",
2a0cb4e2
WZ
1049 hid, uid);
1050 *devid = entry->devid;
1051 return 0;
1052 }
1053
1054 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1055 if (!entry)
1056 return -ENOMEM;
1057
1058 memcpy(entry->uid, uid, strlen(uid));
1059 memcpy(entry->hid, hid, strlen(hid));
1060 entry->devid = *devid;
1061 entry->cmd_line = cmd_line;
1062 entry->root_devid = (entry->devid & (~0x7));
1063
101fa037 1064 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
2a0cb4e2
WZ
1065 entry->cmd_line ? "cmd" : "ivrs",
1066 entry->hid, entry->uid, entry->root_devid);
1067
1068 list_add_tail(&entry->list, list);
1069 return 0;
1070}
1071
235dacbc
JR
1072static int __init add_early_maps(void)
1073{
1074 int i, ret;
1075
1076 for (i = 0; i < early_ioapic_map_size; ++i) {
1077 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1078 early_ioapic_map[i].id,
c50e3247 1079 &early_ioapic_map[i].devid,
235dacbc
JR
1080 early_ioapic_map[i].cmd_line);
1081 if (ret)
1082 return ret;
1083 }
1084
1085 for (i = 0; i < early_hpet_map_size; ++i) {
1086 ret = add_special_device(IVHD_SPECIAL_HPET,
1087 early_hpet_map[i].id,
c50e3247 1088 &early_hpet_map[i].devid,
235dacbc
JR
1089 early_hpet_map[i].cmd_line);
1090 if (ret)
1091 return ret;
1092 }
1093
2a0cb4e2
WZ
1094 for (i = 0; i < early_acpihid_map_size; ++i) {
1095 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1096 early_acpihid_map[i].uid,
1097 &early_acpihid_map[i].devid,
1098 early_acpihid_map[i].cmd_line);
1099 if (ret)
1100 return ret;
1101 }
1102
235dacbc
JR
1103 return 0;
1104}
1105
b65233a9 1106/*
df805abb 1107 * Reads the device exclusion range from ACPI and initializes the IOMMU with
b65233a9
JR
1108 * it
1109 */
3566b778
JR
1110static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1111{
3566b778
JR
1112 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1113 return;
1114
387caf0b
AH
1115 /*
1116 * Treat per-device exclusion ranges as r/w unity-mapped regions
1117 * since some buggy BIOSes might lead to the overwritten exclusion
1118 * range (exclusion_start and exclusion_length members). This
1119 * happens when there are multiple exclusion ranges (IVMD entries)
1120 * defined in ACPI table.
1121 */
1122 m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
3566b778
JR
1123}
1124
b65233a9
JR
1125/*
1126 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1127 * initializes the hardware and our data structures with it.
1128 */
6efed63b 1129static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
5d0c8e49
JR
1130 struct ivhd_header *h)
1131{
1132 u8 *p = (u8 *)h;
1133 u8 *end = p, flags = 0;
0de66d5b
JR
1134 u16 devid = 0, devid_start = 0, devid_to = 0;
1135 u32 dev_i, ext_flags = 0;
58a3bee5 1136 bool alias = false;
5d0c8e49 1137 struct ivhd_entry *e;
ac7ccf67 1138 u32 ivhd_size;
235dacbc
JR
1139 int ret;
1140
1141
1142 ret = add_early_maps();
1143 if (ret)
1144 return ret;
5d0c8e49 1145
93d05155
KHF
1146 amd_iommu_apply_ivrs_quirks();
1147
5d0c8e49 1148 /*
e9bf5197 1149 * First save the recommended feature enable bits from ACPI
5d0c8e49 1150 */
e9bf5197 1151 iommu->acpi_flags = h->flags;
5d0c8e49
JR
1152
1153 /*
1154 * Done. Now parse the device entries
1155 */
ac7ccf67
SS
1156 ivhd_size = get_ivhd_header_size(h);
1157 if (!ivhd_size) {
101fa037 1158 pr_err("Unsupported IVHD type %#x\n", h->type);
ac7ccf67
SS
1159 return -EINVAL;
1160 }
1161
1162 p += ivhd_size;
1163
5d0c8e49
JR
1164 end += h->length;
1165
42a698f4 1166
5d0c8e49
JR
1167 while (p < end) {
1168 e = (struct ivhd_entry *)p;
1169 switch (e->type) {
1170 case IVHD_DEV_ALL:
42a698f4 1171
226e889b 1172 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
42a698f4 1173
226e889b
JR
1174 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1175 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
5d0c8e49
JR
1176 break;
1177 case IVHD_DEV_SELECT:
42a698f4
JR
1178
1179 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1180 "flags: %02x\n",
c5081cd7 1181 PCI_BUS_NUM(e->devid),
42a698f4
JR
1182 PCI_SLOT(e->devid),
1183 PCI_FUNC(e->devid),
1184 e->flags);
1185
5d0c8e49 1186 devid = e->devid;
5ff4789d 1187 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
5d0c8e49
JR
1188 break;
1189 case IVHD_DEV_SELECT_RANGE_START:
42a698f4
JR
1190
1191 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1192 "devid: %02x:%02x.%x flags: %02x\n",
c5081cd7 1193 PCI_BUS_NUM(e->devid),
42a698f4
JR
1194 PCI_SLOT(e->devid),
1195 PCI_FUNC(e->devid),
1196 e->flags);
1197
5d0c8e49
JR
1198 devid_start = e->devid;
1199 flags = e->flags;
1200 ext_flags = 0;
58a3bee5 1201 alias = false;
5d0c8e49
JR
1202 break;
1203 case IVHD_DEV_ALIAS:
42a698f4
JR
1204
1205 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1206 "flags: %02x devid_to: %02x:%02x.%x\n",
c5081cd7 1207 PCI_BUS_NUM(e->devid),
42a698f4
JR
1208 PCI_SLOT(e->devid),
1209 PCI_FUNC(e->devid),
1210 e->flags,
c5081cd7 1211 PCI_BUS_NUM(e->ext >> 8),
42a698f4
JR
1212 PCI_SLOT(e->ext >> 8),
1213 PCI_FUNC(e->ext >> 8));
1214
5d0c8e49
JR
1215 devid = e->devid;
1216 devid_to = e->ext >> 8;
7a6a3a08 1217 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
7455aab1 1218 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
5d0c8e49
JR
1219 amd_iommu_alias_table[devid] = devid_to;
1220 break;
1221 case IVHD_DEV_ALIAS_RANGE:
42a698f4
JR
1222
1223 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1224 "devid: %02x:%02x.%x flags: %02x "
1225 "devid_to: %02x:%02x.%x\n",
c5081cd7 1226 PCI_BUS_NUM(e->devid),
42a698f4
JR
1227 PCI_SLOT(e->devid),
1228 PCI_FUNC(e->devid),
1229 e->flags,
c5081cd7 1230 PCI_BUS_NUM(e->ext >> 8),
42a698f4
JR
1231 PCI_SLOT(e->ext >> 8),
1232 PCI_FUNC(e->ext >> 8));
1233
5d0c8e49
JR
1234 devid_start = e->devid;
1235 flags = e->flags;
1236 devid_to = e->ext >> 8;
1237 ext_flags = 0;
58a3bee5 1238 alias = true;
5d0c8e49
JR
1239 break;
1240 case IVHD_DEV_EXT_SELECT:
42a698f4
JR
1241
1242 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1243 "flags: %02x ext: %08x\n",
c5081cd7 1244 PCI_BUS_NUM(e->devid),
42a698f4
JR
1245 PCI_SLOT(e->devid),
1246 PCI_FUNC(e->devid),
1247 e->flags, e->ext);
1248
5d0c8e49 1249 devid = e->devid;
5ff4789d
JR
1250 set_dev_entry_from_acpi(iommu, devid, e->flags,
1251 e->ext);
5d0c8e49
JR
1252 break;
1253 case IVHD_DEV_EXT_SELECT_RANGE:
42a698f4
JR
1254
1255 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1256 "%02x:%02x.%x flags: %02x ext: %08x\n",
c5081cd7 1257 PCI_BUS_NUM(e->devid),
42a698f4
JR
1258 PCI_SLOT(e->devid),
1259 PCI_FUNC(e->devid),
1260 e->flags, e->ext);
1261
5d0c8e49
JR
1262 devid_start = e->devid;
1263 flags = e->flags;
1264 ext_flags = e->ext;
58a3bee5 1265 alias = false;
5d0c8e49
JR
1266 break;
1267 case IVHD_DEV_RANGE_END:
42a698f4
JR
1268
1269 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
c5081cd7 1270 PCI_BUS_NUM(e->devid),
42a698f4
JR
1271 PCI_SLOT(e->devid),
1272 PCI_FUNC(e->devid));
1273
5d0c8e49
JR
1274 devid = e->devid;
1275 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
7a6a3a08 1276 if (alias) {
5d0c8e49 1277 amd_iommu_alias_table[dev_i] = devid_to;
7a6a3a08
JR
1278 set_dev_entry_from_acpi(iommu,
1279 devid_to, flags, ext_flags);
1280 }
1281 set_dev_entry_from_acpi(iommu, dev_i,
1282 flags, ext_flags);
5d0c8e49
JR
1283 }
1284 break;
6efed63b
JR
1285 case IVHD_DEV_SPECIAL: {
1286 u8 handle, type;
1287 const char *var;
1288 u16 devid;
1289 int ret;
1290
1291 handle = e->ext & 0xff;
1292 devid = (e->ext >> 8) & 0xffff;
1293 type = (e->ext >> 24) & 0xff;
1294
1295 if (type == IVHD_SPECIAL_IOAPIC)
1296 var = "IOAPIC";
1297 else if (type == IVHD_SPECIAL_HPET)
1298 var = "HPET";
1299 else
1300 var = "UNKNOWN";
1301
1302 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1303 var, (int)handle,
c5081cd7 1304 PCI_BUS_NUM(devid),
6efed63b
JR
1305 PCI_SLOT(devid),
1306 PCI_FUNC(devid));
1307
c50e3247 1308 ret = add_special_device(type, handle, &devid, false);
6efed63b
JR
1309 if (ret)
1310 return ret;
c50e3247
JR
1311
1312 /*
1313 * add_special_device might update the devid in case a
1314 * command-line override is present. So call
1315 * set_dev_entry_from_acpi after add_special_device.
1316 */
1317 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1318
6efed63b
JR
1319 break;
1320 }
2a0cb4e2
WZ
1321 case IVHD_DEV_ACPI_HID: {
1322 u16 devid;
e461b8c9
AM
1323 u8 hid[ACPIHID_HID_LEN];
1324 u8 uid[ACPIHID_UID_LEN];
2a0cb4e2
WZ
1325 int ret;
1326
1327 if (h->type != 0x40) {
1328 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1329 e->type);
1330 break;
1331 }
1332
1333 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1334 hid[ACPIHID_HID_LEN - 1] = '\0';
1335
1336 if (!(*hid)) {
1337 pr_err(FW_BUG "Invalid HID.\n");
1338 break;
1339 }
1340
e461b8c9 1341 uid[0] = '\0';
2a0cb4e2
WZ
1342 switch (e->uidf) {
1343 case UID_NOT_PRESENT:
1344
1345 if (e->uidl != 0)
1346 pr_warn(FW_BUG "Invalid UID length.\n");
1347
1348 break;
1349 case UID_IS_INTEGER:
1350
1351 sprintf(uid, "%d", e->uid);
1352
1353 break;
1354 case UID_IS_CHARACTER:
1355
e461b8c9
AM
1356 memcpy(uid, &e->uid, e->uidl);
1357 uid[e->uidl] = '\0';
2a0cb4e2
WZ
1358
1359 break;
1360 default:
1361 break;
1362 }
1363
6082ee72 1364 devid = e->devid;
2a0cb4e2
WZ
1365 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1366 hid, uid,
1367 PCI_BUS_NUM(devid),
1368 PCI_SLOT(devid),
1369 PCI_FUNC(devid));
1370
2a0cb4e2
WZ
1371 flags = e->flags;
1372
1373 ret = add_acpi_hid_device(hid, uid, &devid, false);
1374 if (ret)
1375 return ret;
1376
1377 /*
1378 * add_special_device might update the devid in case a
1379 * command-line override is present. So call
1380 * set_dev_entry_from_acpi after add_special_device.
1381 */
1382 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1383
1384 break;
1385 }
5d0c8e49
JR
1386 default:
1387 break;
1388 }
1389
b514e555 1390 p += ivhd_entry_length(p);
5d0c8e49 1391 }
6efed63b
JR
1392
1393 return 0;
5d0c8e49
JR
1394}
1395
e47d402d
JR
1396static void __init free_iommu_one(struct amd_iommu *iommu)
1397{
1398 free_command_buffer(iommu);
335503e5 1399 free_event_buffer(iommu);
1a29ac01 1400 free_ppr_log(iommu);
8bda0cfb 1401 free_ga_log(iommu);
e47d402d
JR
1402 iommu_unmap_mmio_space(iommu);
1403}
1404
1405static void __init free_iommu_all(void)
1406{
1407 struct amd_iommu *iommu, *next;
1408
3bd22172 1409 for_each_iommu_safe(iommu, next) {
e47d402d
JR
1410 list_del(&iommu->list);
1411 free_iommu_one(iommu);
1412 kfree(iommu);
1413 }
1414}
1415
318fe782
SS
1416/*
1417 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1418 * Workaround:
1419 * BIOS should disable L2B micellaneous clock gating by setting
1420 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1421 */
e2f1a3bd 1422static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
318fe782
SS
1423{
1424 u32 value;
1425
1426 if ((boot_cpu_data.x86 != 0x15) ||
1427 (boot_cpu_data.x86_model < 0x10) ||
1428 (boot_cpu_data.x86_model > 0x1f))
1429 return;
1430
1431 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1432 pci_read_config_dword(iommu->dev, 0xf4, &value);
1433
1434 if (value & BIT(2))
1435 return;
1436
1437 /* Select NB indirect register 0x90 and enable writing */
1438 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1439
1440 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
5f226da1 1441 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
318fe782
SS
1442
1443 /* Clear the enable writing bit */
1444 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1445}
1446
358875fd
JC
1447/*
1448 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1449 * Workaround:
1450 * BIOS should enable ATS write permission check by setting
1451 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1452 */
1453static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1454{
1455 u32 value;
1456
1457 if ((boot_cpu_data.x86 != 0x15) ||
1458 (boot_cpu_data.x86_model < 0x30) ||
1459 (boot_cpu_data.x86_model > 0x3f))
1460 return;
1461
1462 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1463 value = iommu_read_l2(iommu, 0x47);
1464
1465 if (value & BIT(0))
1466 return;
1467
1468 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1469 iommu_write_l2(iommu, 0x47, value | BIT(0));
1470
5f226da1 1471 pci_info(iommu->dev, "Applying ATS write check workaround\n");
358875fd
JC
1472}
1473
b65233a9
JR
1474/*
1475 * This function clues the initialization function for one IOMMU
1476 * together and also allocates the command buffer and programs the
1477 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1478 */
e47d402d
JR
1479static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1480{
6efed63b
JR
1481 int ret;
1482
27790398 1483 raw_spin_lock_init(&iommu->lock);
bb52777e
JR
1484
1485 /* Add IOMMU to internal data structures */
e47d402d 1486 list_add_tail(&iommu->list, &amd_iommu_list);
6b9376e3 1487 iommu->index = amd_iommus_present++;
bb52777e
JR
1488
1489 if (unlikely(iommu->index >= MAX_IOMMUS)) {
101fa037 1490 WARN(1, "System has more IOMMUs than supported by this driver\n");
bb52777e
JR
1491 return -ENOSYS;
1492 }
1493
1494 /* Index is fine - add IOMMU to the array */
1495 amd_iommus[iommu->index] = iommu;
e47d402d
JR
1496
1497 /*
1498 * Copy data from ACPI table entry to the iommu struct
1499 */
23c742db 1500 iommu->devid = h->devid;
e47d402d 1501 iommu->cap_ptr = h->cap_ptr;
ee893c24 1502 iommu->pci_seg = h->pci_seg;
e47d402d 1503 iommu->mmio_phys = h->mmio_phys;
30861ddc 1504
7d7d38af
SS
1505 switch (h->type) {
1506 case 0x10:
1507 /* Check if IVHD EFR contains proper max banks/counters */
1508 if ((h->efr_attr != 0) &&
1509 ((h->efr_attr & (0xF << 13)) != 0) &&
1510 ((h->efr_attr & (0x3F << 17)) != 0))
1511 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1512 else
1513 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
3928aa3f
SS
1514 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1515 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
7d7d38af
SS
1516 break;
1517 case 0x11:
1518 case 0x40:
1519 if (h->efr_reg & (1 << 9))
1520 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1521 else
1522 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
3928aa3f
SS
1523 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1524 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
81307143
SS
1525 /*
1526 * Note: Since iommu_update_intcapxt() leverages
1527 * the IOMMU MMIO access to MSI capability block registers
1528 * for MSI address lo/hi/data, we need to check both
1529 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1530 */
1531 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1532 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1533 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
7d7d38af
SS
1534 break;
1535 default:
1536 return -EINVAL;
30861ddc
SK
1537 }
1538
1539 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1540 iommu->mmio_phys_end);
e47d402d
JR
1541 if (!iommu->mmio_base)
1542 return -ENOMEM;
1543
f2c2db53 1544 if (alloc_command_buffer(iommu))
e47d402d
JR
1545 return -ENOMEM;
1546
f2c2db53 1547 if (alloc_event_buffer(iommu))
335503e5
JR
1548 return -ENOMEM;
1549
a80dc3e0
JR
1550 iommu->int_enabled = false;
1551
4c232a70 1552 init_translation_status(iommu);
3ac3e5ee
BH
1553 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1554 iommu_disable(iommu);
1555 clear_translation_pre_enabled(iommu);
1556 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1557 iommu->index);
1558 }
1559 if (amd_iommu_pre_enabled)
1560 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
4c232a70 1561
6efed63b
JR
1562 ret = init_iommu_from_acpi(iommu, h);
1563 if (ret)
1564 return ret;
f6fec00a 1565
7c71d306
JL
1566 ret = amd_iommu_create_irq_domain(iommu);
1567 if (ret)
1568 return ret;
1569
f6fec00a
JR
1570 /*
1571 * Make sure IOMMU is not considered to translate itself. The IVRS
1572 * table tells us so, but this is a lie!
1573 */
1574 amd_iommu_rlookup_table[iommu->devid] = NULL;
1575
23c742db 1576 return 0;
e47d402d
JR
1577}
1578
8c7142f5
SS
1579/**
1580 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1581 * @ivrs Pointer to the IVRS header
1582 *
1583 * This function search through all IVDB of the maximum supported IVHD
1584 */
1585static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1586{
1587 u8 *base = (u8 *)ivrs;
1588 struct ivhd_header *ivhd = (struct ivhd_header *)
1589 (base + IVRS_HEADER_LENGTH);
1590 u8 last_type = ivhd->type;
1591 u16 devid = ivhd->devid;
1592
1593 while (((u8 *)ivhd - base < ivrs->length) &&
1594 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1595 u8 *p = (u8 *) ivhd;
1596
1597 if (ivhd->devid == devid)
1598 last_type = ivhd->type;
1599 ivhd = (struct ivhd_header *)(p + ivhd->length);
1600 }
1601
1602 return last_type;
1603}
1604
b65233a9
JR
1605/*
1606 * Iterates over all IOMMU entries in the ACPI table, allocates the
1607 * IOMMU structure and initializes it with init_iommu_one()
1608 */
e47d402d
JR
1609static int __init init_iommu_all(struct acpi_table_header *table)
1610{
1611 u8 *p = (u8 *)table, *end = (u8 *)table;
1612 struct ivhd_header *h;
1613 struct amd_iommu *iommu;
1614 int ret;
1615
e47d402d
JR
1616 end += table->length;
1617 p += IVRS_HEADER_LENGTH;
1618
1619 while (p < end) {
1620 h = (struct ivhd_header *)p;
8c7142f5 1621 if (*p == amd_iommu_target_ivhd_type) {
9c72041f 1622
ae908c22 1623 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
9c72041f 1624 "seg: %d flags: %01x info %04x\n",
c5081cd7 1625 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
9c72041f
JR
1626 PCI_FUNC(h->devid), h->cap_ptr,
1627 h->pci_seg, h->flags, h->info);
1628 DUMP_printk(" mmio-addr: %016llx\n",
1629 h->mmio_phys);
1630
e47d402d 1631 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
02f3b3f5
JR
1632 if (iommu == NULL)
1633 return -ENOMEM;
3551a708 1634
e47d402d 1635 ret = init_iommu_one(iommu, h);
02f3b3f5
JR
1636 if (ret)
1637 return ret;
e47d402d
JR
1638 }
1639 p += h->length;
1640
1641 }
1642 WARN_ON(p != end);
1643
1644 return 0;
1645}
1646
1650dfd1
SS
1647static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1648 u8 fxn, u64 *value, bool is_write);
30861ddc
SK
1649
1650static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1651{
5f226da1 1652 struct pci_dev *pdev = iommu->dev;
8c17bbf6 1653 u64 val = 0xabcd, val2 = 0, save_reg = 0;
30861ddc
SK
1654
1655 if (!iommu_feature(iommu, FEATURE_PC))
1656 return;
1657
1658 amd_iommu_pc_present = true;
1659
8c17bbf6
SK
1660 /* save the value to restore, if writable */
1661 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1662 goto pc_false;
1663
30861ddc 1664 /* Check if the performance counters can be written to */
1650dfd1
SS
1665 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1666 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
8c17bbf6
SK
1667 (val != val2))
1668 goto pc_false;
1669
1670 /* restore */
1671 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1672 goto pc_false;
30861ddc 1673
5f226da1 1674 pci_info(pdev, "IOMMU performance counters supported\n");
30861ddc
SK
1675
1676 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1677 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1678 iommu->max_counters = (u8) ((val >> 7) & 0xf);
8c17bbf6
SK
1679
1680 return;
1681
1682pc_false:
1683 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1684 amd_iommu_pc_present = false;
1685 return;
30861ddc
SK
1686}
1687
066f2e98
AW
1688static ssize_t amd_iommu_show_cap(struct device *dev,
1689 struct device_attribute *attr,
1690 char *buf)
1691{
b7a42b9d 1692 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
066f2e98
AW
1693 return sprintf(buf, "%x\n", iommu->cap);
1694}
1695static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1696
1697static ssize_t amd_iommu_show_features(struct device *dev,
1698 struct device_attribute *attr,
1699 char *buf)
1700{
b7a42b9d 1701 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
066f2e98
AW
1702 return sprintf(buf, "%llx\n", iommu->features);
1703}
1704static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1705
1706static struct attribute *amd_iommu_attrs[] = {
1707 &dev_attr_cap.attr,
1708 &dev_attr_features.attr,
1709 NULL,
1710};
1711
1712static struct attribute_group amd_iommu_group = {
1713 .name = "amd-iommu",
1714 .attrs = amd_iommu_attrs,
1715};
1716
1717static const struct attribute_group *amd_iommu_groups[] = {
1718 &amd_iommu_group,
1719 NULL,
1720};
30861ddc 1721
24d2c521 1722static int __init iommu_init_pci(struct amd_iommu *iommu)
23c742db
JR
1723{
1724 int cap_ptr = iommu->cap_ptr;
8bda0cfb 1725 int ret;
23c742db 1726
d5bf0f4f
SK
1727 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1728 iommu->devid & 0xff);
23c742db
JR
1729 if (!iommu->dev)
1730 return -ENODEV;
1731
cbbc00be
JL
1732 /* Prevent binding other PCI device drivers to IOMMU devices */
1733 iommu->dev->match_driver = false;
1734
23c742db
JR
1735 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1736 &iommu->cap);
23c742db 1737
23c742db
JR
1738 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1739 amd_iommu_iotlb_sup = false;
1740
1741 /* read extended feature bits */
62dcee71 1742 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
23c742db
JR
1743
1744 if (iommu_feature(iommu, FEATURE_GT)) {
1745 int glxval;
a919a018
SS
1746 u32 max_pasid;
1747 u64 pasmax;
23c742db 1748
a919a018
SS
1749 pasmax = iommu->features & FEATURE_PASID_MASK;
1750 pasmax >>= FEATURE_PASID_SHIFT;
1751 max_pasid = (1 << (pasmax + 1)) - 1;
23c742db 1752
a919a018
SS
1753 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1754
1755 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
23c742db
JR
1756
1757 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1758 glxval >>= FEATURE_GLXVAL_SHIFT;
1759
1760 if (amd_iommu_max_glx_val == -1)
1761 amd_iommu_max_glx_val = glxval;
1762 else
1763 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1764 }
1765
1766 if (iommu_feature(iommu, FEATURE_GT) &&
1767 iommu_feature(iommu, FEATURE_PPR)) {
1768 iommu->is_iommu_v2 = true;
1769 amd_iommu_v2_present = true;
1770 }
1771
f2c2db53
JR
1772 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1773 return -ENOMEM;
23c742db 1774
8bda0cfb
SS
1775 ret = iommu_init_ga(iommu);
1776 if (ret)
1777 return ret;
3928aa3f 1778
23c742db
JR
1779 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1780 amd_iommu_np_cache = true;
1781
30861ddc
SK
1782 init_iommu_perf_ctr(iommu);
1783
23c742db
JR
1784 if (is_rd890_iommu(iommu->dev)) {
1785 int i, j;
1786
d5bf0f4f
SK
1787 iommu->root_pdev =
1788 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1789 PCI_DEVFN(0, 0));
23c742db
JR
1790
1791 /*
1792 * Some rd890 systems may not be fully reconfigured by the
1793 * BIOS, so it's necessary for us to store this information so
1794 * it can be reprogrammed on resume
1795 */
1796 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1797 &iommu->stored_addr_lo);
1798 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1799 &iommu->stored_addr_hi);
1800
1801 /* Low bit locks writes to configuration space */
1802 iommu->stored_addr_lo &= ~1;
1803
1804 for (i = 0; i < 6; i++)
1805 for (j = 0; j < 0x12; j++)
1806 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1807
1808 for (i = 0; i < 0x83; i++)
1809 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1810 }
1811
318fe782 1812 amd_iommu_erratum_746_workaround(iommu);
358875fd 1813 amd_iommu_ats_write_check_workaround(iommu);
318fe782 1814
39ab9555
JR
1815 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1816 amd_iommu_groups, "ivhd%d", iommu->index);
b0119e87
JR
1817 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1818 iommu_device_register(&iommu->iommu);
066f2e98 1819
23c742db
JR
1820 return pci_enable_device(iommu->dev);
1821}
1822
4d121c32
JR
1823static void print_iommu_info(void)
1824{
1825 static const char * const feat_str[] = {
1826 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1827 "IA", "GA", "HE", "PC"
1828 };
1829 struct amd_iommu *iommu;
1830
1831 for_each_iommu(iommu) {
5f226da1 1832 struct pci_dev *pdev = iommu->dev;
4d121c32
JR
1833 int i;
1834
5f226da1 1835 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
4d121c32
JR
1836
1837 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
9a295ff0 1838 pci_info(pdev, "Extended features (%#llx):",
5f226da1 1839 iommu->features);
2bd5ed00 1840 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
4d121c32
JR
1841 if (iommu_feature(iommu, (1ULL << i)))
1842 pr_cont(" %s", feat_str[i]);
1843 }
3928aa3f
SS
1844
1845 if (iommu->features & FEATURE_GAM_VAPIC)
1846 pr_cont(" GA_vAPIC");
1847
30861ddc 1848 pr_cont("\n");
500c25ed 1849 }
4d121c32 1850 }
3928aa3f 1851 if (irq_remapping_enabled) {
101fa037 1852 pr_info("Interrupt remapping enabled\n");
3928aa3f 1853 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
101fa037 1854 pr_info("Virtual APIC enabled\n");
90fcffd9 1855 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
101fa037 1856 pr_info("X2APIC enabled\n");
3928aa3f 1857 }
4d121c32
JR
1858}
1859
2c0ae172 1860static int __init amd_iommu_init_pci(void)
23c742db
JR
1861{
1862 struct amd_iommu *iommu;
1863 int ret = 0;
1864
1865 for_each_iommu(iommu) {
1866 ret = iommu_init_pci(iommu);
1867 if (ret)
1868 break;
1869 }
1870
522e5cb7
JR
1871 /*
1872 * Order is important here to make sure any unity map requirements are
1873 * fulfilled. The unity mappings are created and written to the device
1874 * table during the amd_iommu_init_api() call.
1875 *
1876 * After that we call init_device_table_dma() to make sure any
1877 * uninitialized DTE will block DMA, and in the end we flush the caches
1878 * of all IOMMUs to make sure the changes to the device table are
1879 * active.
1880 */
1881 ret = amd_iommu_init_api();
1882
aafd8ba0
JR
1883 init_device_table_dma();
1884
1885 for_each_iommu(iommu)
1886 iommu_flush_all_caches(iommu);
1887
3a18404c
JR
1888 if (!ret)
1889 print_iommu_info();
4d121c32 1890
23c742db
JR
1891 return ret;
1892}
1893
a80dc3e0
JR
1894/****************************************************************************
1895 *
1896 * The following functions initialize the MSI interrupts for all IOMMUs
df805abb 1897 * in the system. It's a bit challenging because there could be multiple
a80dc3e0
JR
1898 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1899 * pci_dev.
1900 *
1901 ****************************************************************************/
1902
9f800de3 1903static int iommu_setup_msi(struct amd_iommu *iommu)
a80dc3e0
JR
1904{
1905 int r;
a80dc3e0 1906
9ddd592a
JR
1907 r = pci_enable_msi(iommu->dev);
1908 if (r)
1909 return r;
a80dc3e0 1910
72fe00f0
JR
1911 r = request_threaded_irq(iommu->dev->irq,
1912 amd_iommu_int_handler,
1913 amd_iommu_int_thread,
1914 0, "AMD-Vi",
3f398bc7 1915 iommu);
a80dc3e0
JR
1916
1917 if (r) {
1918 pci_disable_msi(iommu->dev);
9ddd592a 1919 return r;
a80dc3e0
JR
1920 }
1921
fab6afa3 1922 iommu->int_enabled = true;
1a29ac01 1923
a80dc3e0
JR
1924 return 0;
1925}
1926
66929812
SS
1927#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
1928#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
1929#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
1930#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
1931
1932/**
1933 * Setup the IntCapXT registers with interrupt routing information
1934 * based on the PCI MSI capability block registers, accessed via
1935 * MMIO MSI address low/hi and MSI data registers.
1936 */
1937static void iommu_update_intcapxt(struct amd_iommu *iommu)
1938{
1939 u64 val;
1940 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1941 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1942 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1943 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1944 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1945
1946 if (x2apic_enabled())
1947 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1948
1949 val = XT_INT_VEC(data & 0xFF) |
1950 XT_INT_DEST_MODE(dm) |
1951 XT_INT_DEST_LO(dest) |
1952 XT_INT_DEST_HI(dest);
1953
1954 /**
1955 * Current IOMMU implemtation uses the same IRQ for all
1956 * 3 IOMMU interrupts.
1957 */
1958 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
1959 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
1960 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
1961}
1962
1963static void _irq_notifier_notify(struct irq_affinity_notify *notify,
1964 const cpumask_t *mask)
1965{
1966 struct amd_iommu *iommu;
1967
1968 for_each_iommu(iommu) {
1969 if (iommu->dev->irq == notify->irq) {
1970 iommu_update_intcapxt(iommu);
1971 break;
1972 }
1973 }
1974}
1975
1976static void _irq_notifier_release(struct kref *ref)
1977{
1978}
1979
1980static int iommu_init_intcapxt(struct amd_iommu *iommu)
1981{
1982 int ret;
1983 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
1984
1985 /**
81307143
SS
1986 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
1987 * which can be inferred from amd_iommu_xt_mode.
66929812
SS
1988 */
1989 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
1990 return 0;
1991
1992 /**
1993 * Also, we need to setup notifier to update the IntCapXT registers
1994 * whenever the irq affinity is changed from user-space.
1995 */
1996 notify->irq = iommu->dev->irq;
1997 notify->notify = _irq_notifier_notify,
1998 notify->release = _irq_notifier_release,
1999 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2000 if (ret) {
2001 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2002 iommu->devid, iommu->dev->irq);
2003 return ret;
2004 }
2005
2006 iommu_update_intcapxt(iommu);
2007 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2008 return ret;
2009}
2010
05f92db9 2011static int iommu_init_msi(struct amd_iommu *iommu)
a80dc3e0 2012{
9ddd592a
JR
2013 int ret;
2014
a80dc3e0 2015 if (iommu->int_enabled)
9ddd592a 2016 goto enable_faults;
a80dc3e0 2017
82fcfc67 2018 if (iommu->dev->msi_cap)
9ddd592a
JR
2019 ret = iommu_setup_msi(iommu);
2020 else
2021 ret = -ENODEV;
2022
2023 if (ret)
2024 return ret;
a80dc3e0 2025
9ddd592a 2026enable_faults:
66929812
SS
2027 ret = iommu_init_intcapxt(iommu);
2028 if (ret)
2029 return ret;
2030
9ddd592a 2031 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
a80dc3e0 2032
9ddd592a 2033 if (iommu->ppr_log != NULL)
bde9e6b9 2034 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
9ddd592a 2035
8bda0cfb
SS
2036 iommu_ga_log_enable(iommu);
2037
9ddd592a 2038 return 0;
a80dc3e0
JR
2039}
2040
b65233a9
JR
2041/****************************************************************************
2042 *
2043 * The next functions belong to the third pass of parsing the ACPI
2044 * table. In this last pass the memory mapping requirements are
df805abb 2045 * gathered (like exclusion and unity mapping ranges).
b65233a9
JR
2046 *
2047 ****************************************************************************/
2048
be2a022c
JR
2049static void __init free_unity_maps(void)
2050{
2051 struct unity_map_entry *entry, *next;
2052
2053 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2054 list_del(&entry->list);
2055 kfree(entry);
2056 }
2057}
2058
b65233a9 2059/* called when we find an exclusion range definition in ACPI */
be2a022c
JR
2060static int __init init_exclusion_range(struct ivmd_header *m)
2061{
2062 int i;
2063
2064 switch (m->type) {
2065 case ACPI_IVMD_TYPE:
2066 set_device_exclusion_range(m->devid, m);
2067 break;
2068 case ACPI_IVMD_TYPE_ALL:
3a61ec38 2069 for (i = 0; i <= amd_iommu_last_bdf; ++i)
be2a022c
JR
2070 set_device_exclusion_range(i, m);
2071 break;
2072 case ACPI_IVMD_TYPE_RANGE:
2073 for (i = m->devid; i <= m->aux; ++i)
2074 set_device_exclusion_range(i, m);
2075 break;
2076 default:
2077 break;
2078 }
2079
2080 return 0;
2081}
2082
b65233a9 2083/* called for unity map ACPI definition */
be2a022c
JR
2084static int __init init_unity_map_range(struct ivmd_header *m)
2085{
98f1ad25 2086 struct unity_map_entry *e = NULL;
02acc43a 2087 char *s;
be2a022c
JR
2088
2089 e = kzalloc(sizeof(*e), GFP_KERNEL);
2090 if (e == NULL)
2091 return -ENOMEM;
2092
8aafaaf2
JR
2093 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2094 init_exclusion_range(m);
2095
be2a022c
JR
2096 switch (m->type) {
2097 default:
0bc252f4
JR
2098 kfree(e);
2099 return 0;
be2a022c 2100 case ACPI_IVMD_TYPE:
02acc43a 2101 s = "IVMD_TYPEi\t\t\t";
be2a022c
JR
2102 e->devid_start = e->devid_end = m->devid;
2103 break;
2104 case ACPI_IVMD_TYPE_ALL:
02acc43a 2105 s = "IVMD_TYPE_ALL\t\t";
be2a022c
JR
2106 e->devid_start = 0;
2107 e->devid_end = amd_iommu_last_bdf;
2108 break;
2109 case ACPI_IVMD_TYPE_RANGE:
02acc43a 2110 s = "IVMD_TYPE_RANGE\t\t";
be2a022c
JR
2111 e->devid_start = m->devid;
2112 e->devid_end = m->aux;
2113 break;
2114 }
2115 e->address_start = PAGE_ALIGN(m->range_start);
2116 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2117 e->prot = m->flags >> 1;
2118
02acc43a
JR
2119 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2120 " range_start: %016llx range_end: %016llx flags: %x\n", s,
c5081cd7
SK
2121 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2122 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
02acc43a
JR
2123 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2124 e->address_start, e->address_end, m->flags);
2125
be2a022c
JR
2126 list_add_tail(&e->list, &amd_iommu_unity_map);
2127
2128 return 0;
2129}
2130
b65233a9 2131/* iterates over all memory definitions we find in the ACPI table */
be2a022c
JR
2132static int __init init_memory_definitions(struct acpi_table_header *table)
2133{
2134 u8 *p = (u8 *)table, *end = (u8 *)table;
2135 struct ivmd_header *m;
2136
be2a022c
JR
2137 end += table->length;
2138 p += IVRS_HEADER_LENGTH;
2139
2140 while (p < end) {
2141 m = (struct ivmd_header *)p;
8aafaaf2 2142 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
be2a022c
JR
2143 init_unity_map_range(m);
2144
2145 p += m->length;
2146 }
2147
2148 return 0;
2149}
2150
9f5f5fb3 2151/*
3ac3e5ee 2152 * Init the device table to not allow DMA access for devices
9f5f5fb3 2153 */
33f28c59 2154static void init_device_table_dma(void)
9f5f5fb3 2155{
0de66d5b 2156 u32 devid;
9f5f5fb3
JR
2157
2158 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2159 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2160 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
9f5f5fb3
JR
2161 }
2162}
2163
d04e0ba3
JR
2164static void __init uninit_device_table_dma(void)
2165{
2166 u32 devid;
2167
2168 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2169 amd_iommu_dev_table[devid].data[0] = 0ULL;
2170 amd_iommu_dev_table[devid].data[1] = 0ULL;
2171 }
2172}
2173
33f28c59
JR
2174static void init_device_table(void)
2175{
2176 u32 devid;
2177
2178 if (!amd_iommu_irq_remap)
2179 return;
2180
2181 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2182 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2183}
2184
e9bf5197
JR
2185static void iommu_init_flags(struct amd_iommu *iommu)
2186{
2187 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2188 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2189 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2190
2191 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2192 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2193 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2194
2195 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2196 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2197 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2198
2199 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2200 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2201 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2202
2203 /*
2204 * make IOMMU memory accesses cache coherent
2205 */
2206 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1456e9d2
JR
2207
2208 /* Set IOTLB invalidation timeout to 1s */
2209 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
e9bf5197
JR
2210}
2211
5bcd757f 2212static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
4c894f47 2213{
5bcd757f
MG
2214 int i, j;
2215 u32 ioc_feature_control;
c1bf94ec 2216 struct pci_dev *pdev = iommu->root_pdev;
5bcd757f
MG
2217
2218 /* RD890 BIOSes may not have completely reconfigured the iommu */
c1bf94ec 2219 if (!is_rd890_iommu(iommu->dev) || !pdev)
5bcd757f
MG
2220 return;
2221
2222 /*
2223 * First, we need to ensure that the iommu is enabled. This is
2224 * controlled by a register in the northbridge
2225 */
5bcd757f
MG
2226
2227 /* Select Northbridge indirect register 0x75 and enable writing */
2228 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2229 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2230
2231 /* Enable the iommu */
2232 if (!(ioc_feature_control & 0x1))
2233 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2234
5bcd757f
MG
2235 /* Restore the iommu BAR */
2236 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2237 iommu->stored_addr_lo);
2238 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2239 iommu->stored_addr_hi);
2240
2241 /* Restore the l1 indirect regs for each of the 6 l1s */
2242 for (i = 0; i < 6; i++)
2243 for (j = 0; j < 0x12; j++)
2244 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2245
2246 /* Restore the l2 indirect regs */
2247 for (i = 0; i < 0x83; i++)
2248 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2249
2250 /* Lock PCI setup registers */
2251 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2252 iommu->stored_addr_lo | 1);
4c894f47
JR
2253}
2254
3928aa3f
SS
2255static void iommu_enable_ga(struct amd_iommu *iommu)
2256{
2257#ifdef CONFIG_IRQ_REMAP
2258 switch (amd_iommu_guest_ir) {
2259 case AMD_IOMMU_GUEST_IR_VAPIC:
2260 iommu_feature_enable(iommu, CONTROL_GAM_EN);
df561f66 2261 fallthrough;
3928aa3f
SS
2262 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2263 iommu_feature_enable(iommu, CONTROL_GA_EN);
77bdab46 2264 iommu->irte_ops = &irte_128_ops;
3928aa3f
SS
2265 break;
2266 default:
77bdab46 2267 iommu->irte_ops = &irte_32_ops;
3928aa3f
SS
2268 break;
2269 }
2270#endif
2271}
2272
78d313c6
BH
2273static void early_enable_iommu(struct amd_iommu *iommu)
2274{
2275 iommu_disable(iommu);
2276 iommu_init_flags(iommu);
2277 iommu_set_device_table(iommu);
2278 iommu_enable_command_buffer(iommu);
2279 iommu_enable_event_buffer(iommu);
2280 iommu_set_exclusion_range(iommu);
2281 iommu_enable_ga(iommu);
90fcffd9 2282 iommu_enable_xt(iommu);
78d313c6
BH
2283 iommu_enable(iommu);
2284 iommu_flush_all_caches(iommu);
2285}
2286
b65233a9
JR
2287/*
2288 * This function finally enables all IOMMUs found in the system after
3ac3e5ee
BH
2289 * they have been initialized.
2290 *
2291 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2292 * the old content of device table entries. Not this case or copy failed,
2293 * just continue as normal kernel does.
b65233a9 2294 */
11ee5ac4 2295static void early_enable_iommus(void)
8736197b
JR
2296{
2297 struct amd_iommu *iommu;
2298
3ac3e5ee
BH
2299
2300 if (!copy_device_table()) {
2301 /*
2302 * If come here because of failure in copying device table from old
2303 * kernel with all IOMMUs enabled, print error message and try to
2304 * free allocated old_dev_tbl_cpy.
2305 */
2306 if (amd_iommu_pre_enabled)
2307 pr_err("Failed to copy DEV table from previous kernel.\n");
2308 if (old_dev_tbl_cpy != NULL)
2309 free_pages((unsigned long)old_dev_tbl_cpy,
2310 get_order(dev_table_size));
2311
2312 for_each_iommu(iommu) {
2313 clear_translation_pre_enabled(iommu);
2314 early_enable_iommu(iommu);
2315 }
2316 } else {
2317 pr_info("Copied DEV table from previous kernel.\n");
2318 free_pages((unsigned long)amd_iommu_dev_table,
2319 get_order(dev_table_size));
2320 amd_iommu_dev_table = old_dev_tbl_cpy;
2321 for_each_iommu(iommu) {
2322 iommu_disable_command_buffer(iommu);
2323 iommu_disable_event_buffer(iommu);
2324 iommu_enable_command_buffer(iommu);
2325 iommu_enable_event_buffer(iommu);
2326 iommu_enable_ga(iommu);
90fcffd9 2327 iommu_enable_xt(iommu);
3ac3e5ee
BH
2328 iommu_set_device_table(iommu);
2329 iommu_flush_all_caches(iommu);
2330 }
8736197b 2331 }
d98de49a
SS
2332
2333#ifdef CONFIG_IRQ_REMAP
2334 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2335 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2336#endif
8736197b
JR
2337}
2338
11ee5ac4
JR
2339static void enable_iommus_v2(void)
2340{
2341 struct amd_iommu *iommu;
2342
2343 for_each_iommu(iommu) {
2344 iommu_enable_ppr_log(iommu);
2345 iommu_enable_gt(iommu);
2346 }
2347}
2348
2349static void enable_iommus(void)
2350{
2351 early_enable_iommus();
2352
2353 enable_iommus_v2();
2354}
2355
92ac4320
JR
2356static void disable_iommus(void)
2357{
2358 struct amd_iommu *iommu;
2359
2360 for_each_iommu(iommu)
2361 iommu_disable(iommu);
d98de49a
SS
2362
2363#ifdef CONFIG_IRQ_REMAP
2364 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2365 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2366#endif
92ac4320
JR
2367}
2368
7441e9cb
JR
2369/*
2370 * Suspend/Resume support
2371 * disable suspend until real resume implemented
2372 */
2373
f3c6ea1b 2374static void amd_iommu_resume(void)
7441e9cb 2375{
5bcd757f
MG
2376 struct amd_iommu *iommu;
2377
2378 for_each_iommu(iommu)
2379 iommu_apply_resume_quirks(iommu);
2380
736501ee
JR
2381 /* re-load the hardware */
2382 enable_iommus();
3d9761e7
JR
2383
2384 amd_iommu_enable_interrupts();
7441e9cb
JR
2385}
2386
f3c6ea1b 2387static int amd_iommu_suspend(void)
7441e9cb 2388{
736501ee
JR
2389 /* disable IOMMUs to go out of the way for BIOS */
2390 disable_iommus();
2391
2392 return 0;
7441e9cb
JR
2393}
2394
f3c6ea1b 2395static struct syscore_ops amd_iommu_syscore_ops = {
7441e9cb
JR
2396 .suspend = amd_iommu_suspend,
2397 .resume = amd_iommu_resume,
2398};
2399
90b3eb03 2400static void __init free_iommu_resources(void)
8704a1ba 2401{
ebcfa284 2402 kmemleak_free(irq_lookup_table);
0ea2c422
JR
2403 free_pages((unsigned long)irq_lookup_table,
2404 get_order(rlookup_table_size));
f6019271 2405 irq_lookup_table = NULL;
8704a1ba 2406
a591989a
JL
2407 kmem_cache_destroy(amd_iommu_irq_cache);
2408 amd_iommu_irq_cache = NULL;
8704a1ba
JR
2409
2410 free_pages((unsigned long)amd_iommu_rlookup_table,
2411 get_order(rlookup_table_size));
f6019271 2412 amd_iommu_rlookup_table = NULL;
8704a1ba
JR
2413
2414 free_pages((unsigned long)amd_iommu_alias_table,
2415 get_order(alias_table_size));
f6019271 2416 amd_iommu_alias_table = NULL;
8704a1ba
JR
2417
2418 free_pages((unsigned long)amd_iommu_dev_table,
2419 get_order(dev_table_size));
f6019271 2420 amd_iommu_dev_table = NULL;
8704a1ba
JR
2421
2422 free_iommu_all();
8704a1ba
JR
2423}
2424
c2ff5cf5
JR
2425/* SB IOAPIC is always on this device in AMD systems */
2426#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2427
eb1eb7ae
JR
2428static bool __init check_ioapic_information(void)
2429{
dfbb6d47 2430 const char *fw_bug = FW_BUG;
c2ff5cf5 2431 bool ret, has_sb_ioapic;
eb1eb7ae
JR
2432 int idx;
2433
c2ff5cf5
JR
2434 has_sb_ioapic = false;
2435 ret = false;
eb1eb7ae 2436
dfbb6d47
JR
2437 /*
2438 * If we have map overrides on the kernel command line the
2439 * messages in this function might not describe firmware bugs
2440 * anymore - so be careful
2441 */
2442 if (cmdline_maps)
2443 fw_bug = "";
2444
c2ff5cf5
JR
2445 for (idx = 0; idx < nr_ioapics; idx++) {
2446 int devid, id = mpc_ioapic_id(idx);
2447
2448 devid = get_ioapic_devid(id);
2449 if (devid < 0) {
101fa037 2450 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
dfbb6d47 2451 fw_bug, id);
c2ff5cf5
JR
2452 ret = false;
2453 } else if (devid == IOAPIC_SB_DEVID) {
2454 has_sb_ioapic = true;
2455 ret = true;
eb1eb7ae
JR
2456 }
2457 }
2458
c2ff5cf5
JR
2459 if (!has_sb_ioapic) {
2460 /*
2461 * We expect the SB IOAPIC to be listed in the IVRS
2462 * table. The system timer is connected to the SB IOAPIC
2463 * and if we don't have it in the list the system will
2464 * panic at boot time. This situation usually happens
2465 * when the BIOS is buggy and provides us the wrong
2466 * device id for the IOAPIC in the system.
2467 */
101fa037 2468 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
c2ff5cf5
JR
2469 }
2470
2471 if (!ret)
101fa037 2472 pr_err("Disabling interrupt remapping\n");
c2ff5cf5
JR
2473
2474 return ret;
eb1eb7ae
JR
2475}
2476
d04e0ba3
JR
2477static void __init free_dma_resources(void)
2478{
d04e0ba3
JR
2479 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2480 get_order(MAX_DOMAIN_ID/8));
f6019271 2481 amd_iommu_pd_alloc_bitmap = NULL;
d04e0ba3
JR
2482
2483 free_unity_maps();
2484}
2485
b65233a9 2486/*
8704a1ba
JR
2487 * This is the hardware init function for AMD IOMMU in the system.
2488 * This function is called either from amd_iommu_init or from the interrupt
2489 * remapping setup code.
b65233a9
JR
2490 *
2491 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
8c7142f5 2492 * four times:
b65233a9 2493 *
8c7142f5
SS
2494 * 1 pass) Discover the most comprehensive IVHD type to use.
2495 *
2496 * 2 pass) Find the highest PCI device id the driver has to handle.
b65233a9
JR
2497 * Upon this information the size of the data structures is
2498 * determined that needs to be allocated.
2499 *
8c7142f5 2500 * 3 pass) Initialize the data structures just allocated with the
b65233a9
JR
2501 * information in the ACPI table about available AMD IOMMUs
2502 * in the system. It also maps the PCI devices in the
2503 * system to specific IOMMUs
2504 *
8c7142f5 2505 * 4 pass) After the basic data structures are allocated and
b65233a9
JR
2506 * initialized we update them with information about memory
2507 * remapping requirements parsed out of the ACPI table in
2508 * this last pass.
2509 *
8704a1ba
JR
2510 * After everything is set up the IOMMUs are enabled and the necessary
2511 * hotplug and suspend notifiers are registered.
b65233a9 2512 */
643511b3 2513static int __init early_amd_iommu_init(void)
fe74c9cf 2514{
02f3b3f5 2515 struct acpi_table_header *ivrs_base;
02f3b3f5 2516 acpi_status status;
3928aa3f 2517 int i, remap_cache_sz, ret = 0;
3dfee47b 2518 u32 pci_id;
fe74c9cf 2519
643511b3 2520 if (!amd_iommu_detected)
8704a1ba
JR
2521 return -ENODEV;
2522
6b11d1d6 2523 status = acpi_get_table("IVRS", 0, &ivrs_base);
02f3b3f5
JR
2524 if (status == AE_NOT_FOUND)
2525 return -ENODEV;
2526 else if (ACPI_FAILURE(status)) {
2527 const char *err = acpi_format_exception(status);
101fa037 2528 pr_err("IVRS table error: %s\n", err);
02f3b3f5
JR
2529 return -EINVAL;
2530 }
2531
8c7142f5
SS
2532 /*
2533 * Validate checksum here so we don't need to do it when
2534 * we actually parse the table
2535 */
2536 ret = check_ivrs_checksum(ivrs_base);
2537 if (ret)
99e8ccd3 2538 goto out;
8c7142f5
SS
2539
2540 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2541 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2542
fe74c9cf
JR
2543 /*
2544 * First parse ACPI tables to find the largest Bus/Dev/Func
2545 * we need to handle. Upon this information the shared data
2546 * structures for the IOMMUs in the system will be allocated
2547 */
2c0ae172
JR
2548 ret = find_last_devid_acpi(ivrs_base);
2549 if (ret)
3551a708
JR
2550 goto out;
2551
c571484e
JR
2552 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2553 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2554 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
fe74c9cf 2555
fe74c9cf 2556 /* Device table - directly used by all IOMMUs */
8704a1ba 2557 ret = -ENOMEM;
b336781b
BH
2558 amd_iommu_dev_table = (void *)__get_free_pages(
2559 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
fe74c9cf
JR
2560 get_order(dev_table_size));
2561 if (amd_iommu_dev_table == NULL)
2562 goto out;
2563
2564 /*
2565 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2566 * IOMMU see for that device
2567 */
2568 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2569 get_order(alias_table_size));
2570 if (amd_iommu_alias_table == NULL)
2c0ae172 2571 goto out;
fe74c9cf
JR
2572
2573 /* IOMMU rlookup table - find the IOMMU for a specific device */
83fd5cc6
JR
2574 amd_iommu_rlookup_table = (void *)__get_free_pages(
2575 GFP_KERNEL | __GFP_ZERO,
fe74c9cf
JR
2576 get_order(rlookup_table_size));
2577 if (amd_iommu_rlookup_table == NULL)
2c0ae172 2578 goto out;
fe74c9cf 2579
5dc8bff0
JR
2580 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2581 GFP_KERNEL | __GFP_ZERO,
fe74c9cf
JR
2582 get_order(MAX_DOMAIN_ID/8));
2583 if (amd_iommu_pd_alloc_bitmap == NULL)
2c0ae172 2584 goto out;
fe74c9cf
JR
2585
2586 /*
5dc8bff0 2587 * let all alias entries point to itself
fe74c9cf 2588 */
3a61ec38 2589 for (i = 0; i <= amd_iommu_last_bdf; ++i)
fe74c9cf
JR
2590 amd_iommu_alias_table[i] = i;
2591
fe74c9cf
JR
2592 /*
2593 * never allocate domain 0 because its used as the non-allocated and
2594 * error value placeholder
2595 */
5c87f62d 2596 __set_bit(0, amd_iommu_pd_alloc_bitmap);
fe74c9cf
JR
2597
2598 /*
2599 * now the data structures are allocated and basically initialized
2600 * start the real acpi table scan
2601 */
02f3b3f5
JR
2602 ret = init_iommu_all(ivrs_base);
2603 if (ret)
2c0ae172 2604 goto out;
fe74c9cf 2605
3dfee47b
KHF
2606 /* Disable IOMMU if there's Stoney Ridge graphics */
2607 for (i = 0; i < 32; i++) {
2608 pci_id = read_pci_config(0, i, 0, 0);
2609 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2610 pr_info("Disable IOMMU on Stoney Ridge\n");
2611 amd_iommu_disabled = true;
2612 break;
2613 }
2614 }
2615
11123741 2616 /* Disable any previously enabled IOMMUs */
20b46dff
BH
2617 if (!is_kdump_kernel() || amd_iommu_disabled)
2618 disable_iommus();
11123741 2619
eb1eb7ae
JR
2620 if (amd_iommu_irq_remap)
2621 amd_iommu_irq_remap = check_ioapic_information();
2622
05152a04
JR
2623 if (amd_iommu_irq_remap) {
2624 /*
2625 * Interrupt remapping enabled, create kmem_cache for the
2626 * remapping tables.
2627 */
83ed9c13 2628 ret = -ENOMEM;
3928aa3f
SS
2629 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2630 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2631 else
2632 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
05152a04 2633 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
3928aa3f
SS
2634 remap_cache_sz,
2635 IRQ_TABLE_ALIGNMENT,
2636 0, NULL);
05152a04
JR
2637 if (!amd_iommu_irq_cache)
2638 goto out;
0ea2c422
JR
2639
2640 irq_lookup_table = (void *)__get_free_pages(
2641 GFP_KERNEL | __GFP_ZERO,
2642 get_order(rlookup_table_size));
ebcfa284
LS
2643 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2644 1, GFP_KERNEL);
0ea2c422
JR
2645 if (!irq_lookup_table)
2646 goto out;
05152a04
JR
2647 }
2648
02f3b3f5
JR
2649 ret = init_memory_definitions(ivrs_base);
2650 if (ret)
2c0ae172 2651 goto out;
3551a708 2652
eb1eb7ae
JR
2653 /* init the device table */
2654 init_device_table();
2655
8704a1ba 2656out:
02f3b3f5 2657 /* Don't leak any ACPI memory */
6b11d1d6 2658 acpi_put_table(ivrs_base);
02f3b3f5
JR
2659 ivrs_base = NULL;
2660
643511b3
JR
2661 return ret;
2662}
2663
ae295142 2664static int amd_iommu_enable_interrupts(void)
3d9761e7
JR
2665{
2666 struct amd_iommu *iommu;
2667 int ret = 0;
2668
2669 for_each_iommu(iommu) {
2670 ret = iommu_init_msi(iommu);
2671 if (ret)
2672 goto out;
2673 }
2674
2675out:
2676 return ret;
2677}
2678
02f3b3f5
JR
2679static bool detect_ivrs(void)
2680{
2681 struct acpi_table_header *ivrs_base;
02f3b3f5
JR
2682 acpi_status status;
2683
6b11d1d6 2684 status = acpi_get_table("IVRS", 0, &ivrs_base);
02f3b3f5
JR
2685 if (status == AE_NOT_FOUND)
2686 return false;
2687 else if (ACPI_FAILURE(status)) {
2688 const char *err = acpi_format_exception(status);
101fa037 2689 pr_err("IVRS table error: %s\n", err);
02f3b3f5
JR
2690 return false;
2691 }
2692
6b11d1d6 2693 acpi_put_table(ivrs_base);
02f3b3f5 2694
1adb7d31
JR
2695 /* Make sure ACS will be enabled during PCI probe */
2696 pci_request_acs();
2697
02f3b3f5
JR
2698 return true;
2699}
2700
2c0ae172 2701/****************************************************************************
8704a1ba 2702 *
2c0ae172
JR
2703 * AMD IOMMU Initialization State Machine
2704 *
2705 ****************************************************************************/
2706
2707static int __init state_next(void)
8704a1ba
JR
2708{
2709 int ret = 0;
2710
2c0ae172
JR
2711 switch (init_state) {
2712 case IOMMU_START_STATE:
2713 if (!detect_ivrs()) {
2714 init_state = IOMMU_NOT_FOUND;
2715 ret = -ENODEV;
2716 } else {
2717 init_state = IOMMU_IVRS_DETECTED;
2718 }
2719 break;
2720 case IOMMU_IVRS_DETECTED:
2721 ret = early_amd_iommu_init();
2722 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
7ad820e4 2723 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
3dfee47b 2724 pr_info("AMD IOMMU disabled\n");
7ad820e4
JR
2725 init_state = IOMMU_CMDLINE_DISABLED;
2726 ret = -EINVAL;
2727 }
2c0ae172
JR
2728 break;
2729 case IOMMU_ACPI_FINISHED:
2730 early_enable_iommus();
2c0ae172
JR
2731 x86_platform.iommu_shutdown = disable_iommus;
2732 init_state = IOMMU_ENABLED;
2733 break;
2734 case IOMMU_ENABLED:
74ddda71 2735 register_syscore_ops(&amd_iommu_syscore_ops);
2c0ae172
JR
2736 ret = amd_iommu_init_pci();
2737 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2738 enable_iommus_v2();
2739 break;
2740 case IOMMU_PCI_INIT:
2741 ret = amd_iommu_enable_interrupts();
2742 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2743 break;
2744 case IOMMU_INTERRUPTS_EN:
1e6a7b04 2745 ret = amd_iommu_init_dma_ops();
2c0ae172
JR
2746 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2747 break;
2748 case IOMMU_DMA_OPS:
2749 init_state = IOMMU_INITIALIZED;
2750 break;
2751 case IOMMU_INITIALIZED:
2752 /* Nothing to do */
2753 break;
2754 case IOMMU_NOT_FOUND:
2755 case IOMMU_INIT_ERROR:
1b1e942e 2756 case IOMMU_CMDLINE_DISABLED:
2c0ae172
JR
2757 /* Error states => do nothing */
2758 ret = -EINVAL;
2759 break;
2760 default:
2761 /* Unknown state */
2762 BUG();
2763 }
3d9761e7 2764
5c90501a
KM
2765 if (ret) {
2766 free_dma_resources();
2767 if (!irq_remapping_enabled) {
2768 disable_iommus();
2769 free_iommu_resources();
2770 } else {
2771 struct amd_iommu *iommu;
2772
2773 uninit_device_table_dma();
2774 for_each_iommu(iommu)
2775 iommu_flush_all_caches(iommu);
2776 }
2777 }
2c0ae172
JR
2778 return ret;
2779}
7441e9cb 2780
2c0ae172
JR
2781static int __init iommu_go_to_state(enum iommu_init_state state)
2782{
151b0903 2783 int ret = -EINVAL;
f5325094 2784
2c0ae172 2785 while (init_state != state) {
1b1e942e
JR
2786 if (init_state == IOMMU_NOT_FOUND ||
2787 init_state == IOMMU_INIT_ERROR ||
2788 init_state == IOMMU_CMDLINE_DISABLED)
2c0ae172 2789 break;
151b0903 2790 ret = state_next();
2c0ae172 2791 }
f2f12b6f 2792
fe74c9cf 2793 return ret;
2c0ae172 2794}
fe74c9cf 2795
6b474b82
JR
2796#ifdef CONFIG_IRQ_REMAP
2797int __init amd_iommu_prepare(void)
2798{
3f4cb7c0
TG
2799 int ret;
2800
7fa1c842 2801 amd_iommu_irq_remap = true;
84d07793 2802
3f4cb7c0
TG
2803 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2804 if (ret)
2805 return ret;
2806 return amd_iommu_irq_remap ? 0 : -ENODEV;
6b474b82 2807}
d7f07769 2808
6b474b82
JR
2809int __init amd_iommu_enable(void)
2810{
2811 int ret;
2812
2813 ret = iommu_go_to_state(IOMMU_ENABLED);
2814 if (ret)
2815 return ret;
d7f07769 2816
6b474b82 2817 irq_remapping_enabled = 1;
90fcffd9 2818 return amd_iommu_xt_mode;
6b474b82
JR
2819}
2820
2821void amd_iommu_disable(void)
2822{
2823 amd_iommu_suspend();
2824}
2825
2826int amd_iommu_reenable(int mode)
2827{
2828 amd_iommu_resume();
2829
2830 return 0;
2831}
d7f07769 2832
6b474b82
JR
2833int __init amd_iommu_enable_faulting(void)
2834{
2835 /* We enable MSI later when PCI is initialized */
2836 return 0;
2837}
2838#endif
d7f07769 2839
2c0ae172
JR
2840/*
2841 * This is the core init function for AMD IOMMU hardware in the system.
2842 * This function is called from the generic x86 DMA layer initialization
2843 * code.
2844 */
2845static int __init amd_iommu_init(void)
2846{
7d0f5fd3 2847 struct amd_iommu *iommu;
2c0ae172
JR
2848 int ret;
2849
2850 ret = iommu_go_to_state(IOMMU_INITIALIZED);
bf4bff46
KM
2851#ifdef CONFIG_GART_IOMMU
2852 if (ret && list_empty(&amd_iommu_list)) {
2853 /*
2854 * We failed to initialize the AMD IOMMU - try fallback
2855 * to GART if possible.
2856 */
2857 gart_iommu_init();
2c0ae172 2858 }
bf4bff46 2859#endif
2c0ae172 2860
7d0f5fd3
GH
2861 for_each_iommu(iommu)
2862 amd_iommu_debugfs_setup(iommu);
2863
2c0ae172 2864 return ret;
fe74c9cf
JR
2865}
2866
2543a786
TL
2867static bool amd_iommu_sme_check(void)
2868{
2869 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2870 return true;
2871
2872 /* For Fam17h, a specific level of support is required */
2873 if (boot_cpu_data.microcode >= 0x08001205)
2874 return true;
2875
2876 if ((boot_cpu_data.microcode >= 0x08001126) &&
2877 (boot_cpu_data.microcode <= 0x080011ff))
2878 return true;
2879
101fa037 2880 pr_notice("IOMMU not currently supported when SME is active\n");
2543a786
TL
2881
2882 return false;
2883}
2884
b65233a9
JR
2885/****************************************************************************
2886 *
2887 * Early detect code. This code runs at IOMMU detection time in the DMA
2888 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2889 * IOMMUs
2890 *
2891 ****************************************************************************/
480125ba 2892int __init amd_iommu_detect(void)
ae7877de 2893{
2c0ae172 2894 int ret;
02f3b3f5 2895
75f1cdf1 2896 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
480125ba 2897 return -ENODEV;
ae7877de 2898
2543a786
TL
2899 if (!amd_iommu_sme_check())
2900 return -ENODEV;
2901
2c0ae172
JR
2902 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2903 if (ret)
2904 return ret;
11bd04f6 2905
02f3b3f5
JR
2906 amd_iommu_detected = true;
2907 iommu_detected = 1;
2908 x86_init.iommu.iommu_init = amd_iommu_init;
2909
4781bc42 2910 return 1;
ae7877de
JR
2911}
2912
b65233a9
JR
2913/****************************************************************************
2914 *
2915 * Parsing functions for the AMD IOMMU specific kernel command line
2916 * options.
2917 *
2918 ****************************************************************************/
2919
fefda117
JR
2920static int __init parse_amd_iommu_dump(char *str)
2921{
2922 amd_iommu_dump = true;
2923
2924 return 1;
2925}
2926
3928aa3f
SS
2927static int __init parse_amd_iommu_intr(char *str)
2928{
2929 for (; *str; ++str) {
2930 if (strncmp(str, "legacy", 6) == 0) {
b74aa02d 2931 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3928aa3f
SS
2932 break;
2933 }
2934 if (strncmp(str, "vapic", 5) == 0) {
2935 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2936 break;
2937 }
2938 }
2939 return 1;
2940}
2941
918ad6c5
JR
2942static int __init parse_amd_iommu_options(char *str)
2943{
2944 for (; *str; ++str) {
695b5676 2945 if (strncmp(str, "fullflush", 9) == 0)
afa9fdc2 2946 amd_iommu_unmap_flush = true;
a5235725
JR
2947 if (strncmp(str, "off", 3) == 0)
2948 amd_iommu_disabled = true;
5abcdba4
JR
2949 if (strncmp(str, "force_isolation", 15) == 0)
2950 amd_iommu_force_isolation = true;
918ad6c5
JR
2951 }
2952
2953 return 1;
2954}
2955
440e8998
JR
2956static int __init parse_ivrs_ioapic(char *str)
2957{
2958 unsigned int bus, dev, fn;
2959 int ret, id, i;
2960 u16 devid;
2961
2962 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2963
2964 if (ret != 4) {
101fa037 2965 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
440e8998
JR
2966 return 1;
2967 }
2968
2969 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
101fa037 2970 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
440e8998
JR
2971 str);
2972 return 1;
2973 }
2974
2975 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2976
dfbb6d47 2977 cmdline_maps = true;
440e8998
JR
2978 i = early_ioapic_map_size++;
2979 early_ioapic_map[i].id = id;
2980 early_ioapic_map[i].devid = devid;
2981 early_ioapic_map[i].cmd_line = true;
2982
2983 return 1;
2984}
2985
2986static int __init parse_ivrs_hpet(char *str)
2987{
2988 unsigned int bus, dev, fn;
2989 int ret, id, i;
2990 u16 devid;
2991
2992 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2993
2994 if (ret != 4) {
101fa037 2995 pr_err("Invalid command line: ivrs_hpet%s\n", str);
440e8998
JR
2996 return 1;
2997 }
2998
2999 if (early_hpet_map_size == EARLY_MAP_SIZE) {
101fa037 3000 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
440e8998
JR
3001 str);
3002 return 1;
3003 }
3004
3005 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3006
dfbb6d47 3007 cmdline_maps = true;
440e8998
JR
3008 i = early_hpet_map_size++;
3009 early_hpet_map[i].id = id;
3010 early_hpet_map[i].devid = devid;
3011 early_hpet_map[i].cmd_line = true;
3012
3013 return 1;
3014}
3015
ca3bf5d4
SS
3016static int __init parse_ivrs_acpihid(char *str)
3017{
3018 u32 bus, dev, fn;
3019 char *hid, *uid, *p;
3020 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3021 int ret, i;
3022
3023 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3024 if (ret != 4) {
101fa037 3025 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
ca3bf5d4
SS
3026 return 1;
3027 }
3028
3029 p = acpiid;
3030 hid = strsep(&p, ":");
3031 uid = p;
3032
3033 if (!hid || !(*hid) || !uid) {
101fa037 3034 pr_err("Invalid command line: hid or uid\n");
ca3bf5d4
SS
3035 return 1;
3036 }
3037
3038 i = early_acpihid_map_size++;
3039 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3040 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3041 early_acpihid_map[i].devid =
3042 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3043 early_acpihid_map[i].cmd_line = true;
3044
3045 return 1;
3046}
3047
440e8998
JR
3048__setup("amd_iommu_dump", parse_amd_iommu_dump);
3049__setup("amd_iommu=", parse_amd_iommu_options);
3928aa3f 3050__setup("amd_iommu_intr=", parse_amd_iommu_intr);
440e8998
JR
3051__setup("ivrs_ioapic", parse_ivrs_ioapic);
3052__setup("ivrs_hpet", parse_ivrs_hpet);
ca3bf5d4 3053__setup("ivrs_acpihid", parse_ivrs_acpihid);
22e6daf4
KRW
3054
3055IOMMU_INIT_FINISH(amd_iommu_detect,
3056 gart_iommu_hole_init,
98f1ad25
JR
3057 NULL,
3058 NULL);
400a28a0
JR
3059
3060bool amd_iommu_v2_supported(void)
3061{
3062 return amd_iommu_v2_present;
3063}
3064EXPORT_SYMBOL(amd_iommu_v2_supported);
30861ddc 3065
f5863a00
SS
3066struct amd_iommu *get_amd_iommu(unsigned int idx)
3067{
3068 unsigned int i = 0;
3069 struct amd_iommu *iommu;
3070
3071 for_each_iommu(iommu)
3072 if (i++ == idx)
3073 return iommu;
3074 return NULL;
3075}
3076EXPORT_SYMBOL(get_amd_iommu);
3077
30861ddc
SK
3078/****************************************************************************
3079 *
3080 * IOMMU EFR Performance Counter support functionality. This code allows
3081 * access to the IOMMU PC functionality.
3082 *
3083 ****************************************************************************/
3084
f5863a00 3085u8 amd_iommu_pc_get_max_banks(unsigned int idx)
30861ddc 3086{
f5863a00 3087 struct amd_iommu *iommu = get_amd_iommu(idx);
30861ddc 3088
30861ddc 3089 if (iommu)
f5863a00 3090 return iommu->max_banks;
30861ddc 3091
f5863a00 3092 return 0;
30861ddc
SK
3093}
3094EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3095
3096bool amd_iommu_pc_supported(void)
3097{
3098 return amd_iommu_pc_present;
3099}
3100EXPORT_SYMBOL(amd_iommu_pc_supported);
3101
f5863a00 3102u8 amd_iommu_pc_get_max_counters(unsigned int idx)
30861ddc 3103{
f5863a00 3104 struct amd_iommu *iommu = get_amd_iommu(idx);
30861ddc 3105
30861ddc 3106 if (iommu)
f5863a00 3107 return iommu->max_counters;
30861ddc 3108
f5863a00 3109 return 0;
30861ddc
SK
3110}
3111EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3112
1650dfd1
SS
3113static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3114 u8 fxn, u64 *value, bool is_write)
30861ddc 3115{
30861ddc
SK
3116 u32 offset;
3117 u32 max_offset_lim;
3118
1650dfd1
SS
3119 /* Make sure the IOMMU PC resource is available */
3120 if (!amd_iommu_pc_present)
3121 return -ENODEV;
3122
30861ddc 3123 /* Check for valid iommu and pc register indexing */
1650dfd1 3124 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
30861ddc
SK
3125 return -ENODEV;
3126
0a6d80c7 3127 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
30861ddc
SK
3128
3129 /* Limit the offset to the hw defined mmio region aperture */
0a6d80c7 3130 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
30861ddc
SK
3131 (iommu->max_counters << 8) | 0x28);
3132 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3133 (offset > max_offset_lim))
3134 return -EINVAL;
3135
3136 if (is_write) {
0a6d80c7
SS
3137 u64 val = *value & GENMASK_ULL(47, 0);
3138
3139 writel((u32)val, iommu->mmio_base + offset);
3140 writel((val >> 32), iommu->mmio_base + offset + 4);
30861ddc
SK
3141 } else {
3142 *value = readl(iommu->mmio_base + offset + 4);
3143 *value <<= 32;
0a6d80c7
SS
3144 *value |= readl(iommu->mmio_base + offset);
3145 *value &= GENMASK_ULL(47, 0);
30861ddc
SK
3146 }
3147
3148 return 0;
3149}
38e45d02 3150
1650dfd1 3151int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
38e45d02 3152{
1650dfd1
SS
3153 if (!iommu)
3154 return -EINVAL;
38e45d02 3155
1650dfd1
SS
3156 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3157}
3158EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3159
3160int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3161{
3162 if (!iommu)
3163 return -EINVAL;
38e45d02 3164
1650dfd1 3165 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
38e45d02 3166}
1650dfd1 3167EXPORT_SYMBOL(amd_iommu_pc_set_reg);