]>
Commit | Line | Data |
---|---|---|
f6e2e6b6 | 1 | /* |
5d0d7156 | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
63ce3ae8 | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
f6e2e6b6 JR |
4 | * Leo Duran <leo.duran@amd.com> |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
101fa037 | 20 | #define pr_fmt(fmt) "AMD-Vi: " fmt |
5f226da1 | 21 | #define dev_fmt(fmt) pr_fmt(fmt) |
101fa037 | 22 | |
f6e2e6b6 JR |
23 | #include <linux/pci.h> |
24 | #include <linux/acpi.h> | |
f6e2e6b6 | 25 | #include <linux/list.h> |
5c87f62d | 26 | #include <linux/bitmap.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
f3c6ea1b | 28 | #include <linux/syscore_ops.h> |
a80dc3e0 JR |
29 | #include <linux/interrupt.h> |
30 | #include <linux/msi.h> | |
403f81d8 | 31 | #include <linux/amd-iommu.h> |
400a28a0 | 32 | #include <linux/export.h> |
066f2e98 | 33 | #include <linux/iommu.h> |
ebcfa284 | 34 | #include <linux/kmemleak.h> |
2543a786 | 35 | #include <linux/mem_encrypt.h> |
f6e2e6b6 | 36 | #include <asm/pci-direct.h> |
46a7fa27 | 37 | #include <asm/iommu.h> |
1d9b16d1 | 38 | #include <asm/gart.h> |
ea1b0d39 | 39 | #include <asm/x86_init.h> |
22e6daf4 | 40 | #include <asm/iommu_table.h> |
eb1eb7ae | 41 | #include <asm/io_apic.h> |
6b474b82 | 42 | #include <asm/irq_remapping.h> |
403f81d8 | 43 | |
3ac3e5ee | 44 | #include <linux/crash_dump.h> |
403f81d8 JR |
45 | #include "amd_iommu_proto.h" |
46 | #include "amd_iommu_types.h" | |
05152a04 | 47 | #include "irq_remapping.h" |
403f81d8 | 48 | |
f6e2e6b6 JR |
49 | /* |
50 | * definitions for the ACPI scanning code | |
51 | */ | |
f6e2e6b6 | 52 | #define IVRS_HEADER_LENGTH 48 |
f6e2e6b6 | 53 | |
8c7142f5 | 54 | #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 |
f6e2e6b6 JR |
55 | #define ACPI_IVMD_TYPE_ALL 0x20 |
56 | #define ACPI_IVMD_TYPE 0x21 | |
57 | #define ACPI_IVMD_TYPE_RANGE 0x22 | |
58 | ||
59 | #define IVHD_DEV_ALL 0x01 | |
60 | #define IVHD_DEV_SELECT 0x02 | |
61 | #define IVHD_DEV_SELECT_RANGE_START 0x03 | |
62 | #define IVHD_DEV_RANGE_END 0x04 | |
63 | #define IVHD_DEV_ALIAS 0x42 | |
64 | #define IVHD_DEV_ALIAS_RANGE 0x43 | |
65 | #define IVHD_DEV_EXT_SELECT 0x46 | |
66 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 | |
6efed63b | 67 | #define IVHD_DEV_SPECIAL 0x48 |
8c7142f5 | 68 | #define IVHD_DEV_ACPI_HID 0xf0 |
6efed63b | 69 | |
2a0cb4e2 WZ |
70 | #define UID_NOT_PRESENT 0 |
71 | #define UID_IS_INTEGER 1 | |
72 | #define UID_IS_CHARACTER 2 | |
73 | ||
6efed63b JR |
74 | #define IVHD_SPECIAL_IOAPIC 1 |
75 | #define IVHD_SPECIAL_HPET 2 | |
f6e2e6b6 | 76 | |
6da7342f JR |
77 | #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 |
78 | #define IVHD_FLAG_PASSPW_EN_MASK 0x02 | |
79 | #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 | |
80 | #define IVHD_FLAG_ISOC_EN_MASK 0x08 | |
f6e2e6b6 JR |
81 | |
82 | #define IVMD_FLAG_EXCL_RANGE 0x08 | |
83 | #define IVMD_FLAG_UNITY_MAP 0x01 | |
84 | ||
85 | #define ACPI_DEVFLAG_INITPASS 0x01 | |
86 | #define ACPI_DEVFLAG_EXTINT 0x02 | |
87 | #define ACPI_DEVFLAG_NMI 0x04 | |
88 | #define ACPI_DEVFLAG_SYSMGT1 0x10 | |
89 | #define ACPI_DEVFLAG_SYSMGT2 0x20 | |
90 | #define ACPI_DEVFLAG_LINT0 0x40 | |
91 | #define ACPI_DEVFLAG_LINT1 0x80 | |
92 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 | |
93 | ||
8bda0cfb | 94 | #define LOOP_TIMEOUT 100000 |
b65233a9 JR |
95 | /* |
96 | * ACPI table definitions | |
97 | * | |
98 | * These data structures are laid over the table to parse the important values | |
99 | * out of it. | |
100 | */ | |
101 | ||
b0119e87 JR |
102 | extern const struct iommu_ops amd_iommu_ops; |
103 | ||
b65233a9 JR |
104 | /* |
105 | * structure describing one IOMMU in the ACPI table. Typically followed by one | |
106 | * or more ivhd_entrys. | |
107 | */ | |
f6e2e6b6 JR |
108 | struct ivhd_header { |
109 | u8 type; | |
110 | u8 flags; | |
111 | u16 length; | |
112 | u16 devid; | |
113 | u16 cap_ptr; | |
114 | u64 mmio_phys; | |
115 | u16 pci_seg; | |
116 | u16 info; | |
7d7d38af SS |
117 | u32 efr_attr; |
118 | ||
119 | /* Following only valid on IVHD type 11h and 40h */ | |
120 | u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ | |
121 | u64 res; | |
f6e2e6b6 JR |
122 | } __attribute__((packed)); |
123 | ||
b65233a9 JR |
124 | /* |
125 | * A device entry describing which devices a specific IOMMU translates and | |
126 | * which requestor ids they use. | |
127 | */ | |
f6e2e6b6 JR |
128 | struct ivhd_entry { |
129 | u8 type; | |
130 | u16 devid; | |
131 | u8 flags; | |
132 | u32 ext; | |
2a0cb4e2 WZ |
133 | u32 hidh; |
134 | u64 cid; | |
135 | u8 uidf; | |
136 | u8 uidl; | |
137 | u8 uid; | |
f6e2e6b6 JR |
138 | } __attribute__((packed)); |
139 | ||
b65233a9 JR |
140 | /* |
141 | * An AMD IOMMU memory definition structure. It defines things like exclusion | |
142 | * ranges for devices and regions that should be unity mapped. | |
143 | */ | |
f6e2e6b6 JR |
144 | struct ivmd_header { |
145 | u8 type; | |
146 | u8 flags; | |
147 | u16 length; | |
148 | u16 devid; | |
149 | u16 aux; | |
150 | u64 resv; | |
151 | u64 range_start; | |
152 | u64 range_length; | |
153 | } __attribute__((packed)); | |
154 | ||
fefda117 | 155 | bool amd_iommu_dump; |
05152a04 | 156 | bool amd_iommu_irq_remap __read_mostly; |
fefda117 | 157 | |
d98de49a | 158 | int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; |
90fcffd9 | 159 | static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; |
3928aa3f | 160 | |
02f3b3f5 | 161 | static bool amd_iommu_detected; |
a5235725 | 162 | static bool __initdata amd_iommu_disabled; |
8c7142f5 | 163 | static int amd_iommu_target_ivhd_type; |
c1cbebee | 164 | |
b65233a9 JR |
165 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
166 | to handle */ | |
2e22847f | 167 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
b65233a9 | 168 | we find in ACPI */ |
621a5f7a | 169 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
928abd25 | 170 | |
2e22847f | 171 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
b65233a9 | 172 | system */ |
928abd25 | 173 | |
bb52777e JR |
174 | /* Array to assign indices to IOMMUs*/ |
175 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; | |
6b9376e3 SS |
176 | |
177 | /* Number of IOMMUs present in the system */ | |
178 | static int amd_iommus_present; | |
bb52777e | 179 | |
318afd41 JR |
180 | /* IOMMUs have a non-present cache? */ |
181 | bool amd_iommu_np_cache __read_mostly; | |
60f723b4 | 182 | bool amd_iommu_iotlb_sup __read_mostly = true; |
318afd41 | 183 | |
a919a018 | 184 | u32 amd_iommu_max_pasid __read_mostly = ~0; |
62f71abb | 185 | |
400a28a0 | 186 | bool amd_iommu_v2_present __read_mostly; |
4160cd9e | 187 | static bool amd_iommu_pc_present __read_mostly; |
400a28a0 | 188 | |
5abcdba4 JR |
189 | bool amd_iommu_force_isolation __read_mostly; |
190 | ||
b65233a9 JR |
191 | /* |
192 | * Pointer to the device table which is shared by all AMD IOMMUs | |
193 | * it is indexed by the PCI device id or the HT unit id and contains | |
194 | * information about the domain the device belongs to as well as the | |
195 | * page table root pointer. | |
196 | */ | |
928abd25 | 197 | struct dev_table_entry *amd_iommu_dev_table; |
45a01c42 BH |
198 | /* |
199 | * Pointer to a device table which the content of old device table | |
200 | * will be copied to. It's only be used in kdump kernel. | |
201 | */ | |
202 | static struct dev_table_entry *old_dev_tbl_cpy; | |
b65233a9 JR |
203 | |
204 | /* | |
205 | * The alias table is a driver specific data structure which contains the | |
206 | * mappings of the PCI device ids to the actual requestor ids on the IOMMU. | |
207 | * More than one device can share the same requestor id. | |
208 | */ | |
928abd25 | 209 | u16 *amd_iommu_alias_table; |
b65233a9 JR |
210 | |
211 | /* | |
212 | * The rlookup table is used to find the IOMMU which is responsible | |
213 | * for a specific device. It is also indexed by the PCI device id. | |
214 | */ | |
928abd25 | 215 | struct amd_iommu **amd_iommu_rlookup_table; |
daae2d25 | 216 | EXPORT_SYMBOL(amd_iommu_rlookup_table); |
b65233a9 | 217 | |
b65233a9 | 218 | /* |
0ea2c422 JR |
219 | * This table is used to find the irq remapping table for a given device id |
220 | * quickly. | |
221 | */ | |
222 | struct irq_remap_table **irq_lookup_table; | |
223 | ||
b65233a9 | 224 | /* |
df805abb | 225 | * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap |
b65233a9 JR |
226 | * to know which ones are already in use. |
227 | */ | |
928abd25 JR |
228 | unsigned long *amd_iommu_pd_alloc_bitmap; |
229 | ||
b65233a9 JR |
230 | static u32 dev_table_size; /* size of the device table */ |
231 | static u32 alias_table_size; /* size of the alias table */ | |
232 | static u32 rlookup_table_size; /* size if the rlookup table */ | |
3e8064ba | 233 | |
2c0ae172 JR |
234 | enum iommu_init_state { |
235 | IOMMU_START_STATE, | |
236 | IOMMU_IVRS_DETECTED, | |
237 | IOMMU_ACPI_FINISHED, | |
238 | IOMMU_ENABLED, | |
239 | IOMMU_PCI_INIT, | |
240 | IOMMU_INTERRUPTS_EN, | |
241 | IOMMU_DMA_OPS, | |
242 | IOMMU_INITIALIZED, | |
243 | IOMMU_NOT_FOUND, | |
244 | IOMMU_INIT_ERROR, | |
1b1e942e | 245 | IOMMU_CMDLINE_DISABLED, |
2c0ae172 JR |
246 | }; |
247 | ||
235dacbc JR |
248 | /* Early ioapic and hpet maps from kernel command line */ |
249 | #define EARLY_MAP_SIZE 4 | |
250 | static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; | |
251 | static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; | |
2a0cb4e2 WZ |
252 | static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; |
253 | ||
235dacbc JR |
254 | static int __initdata early_ioapic_map_size; |
255 | static int __initdata early_hpet_map_size; | |
2a0cb4e2 WZ |
256 | static int __initdata early_acpihid_map_size; |
257 | ||
dfbb6d47 | 258 | static bool __initdata cmdline_maps; |
235dacbc | 259 | |
2c0ae172 JR |
260 | static enum iommu_init_state init_state = IOMMU_START_STATE; |
261 | ||
ae295142 | 262 | static int amd_iommu_enable_interrupts(void); |
2c0ae172 | 263 | static int __init iommu_go_to_state(enum iommu_init_state state); |
aafd8ba0 | 264 | static void init_device_table_dma(void); |
3d9761e7 | 265 | |
2479c631 | 266 | static bool amd_iommu_pre_enabled = true; |
3ac3e5ee | 267 | |
4c232a70 BH |
268 | bool translation_pre_enabled(struct amd_iommu *iommu) |
269 | { | |
270 | return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); | |
271 | } | |
daae2d25 | 272 | EXPORT_SYMBOL(translation_pre_enabled); |
4c232a70 BH |
273 | |
274 | static void clear_translation_pre_enabled(struct amd_iommu *iommu) | |
275 | { | |
276 | iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; | |
277 | } | |
278 | ||
279 | static void init_translation_status(struct amd_iommu *iommu) | |
280 | { | |
e881dbd5 | 281 | u64 ctrl; |
4c232a70 | 282 | |
e881dbd5 | 283 | ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
4c232a70 BH |
284 | if (ctrl & (1<<CONTROL_IOMMU_EN)) |
285 | iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; | |
286 | } | |
287 | ||
208ec8c9 JR |
288 | static inline void update_last_devid(u16 devid) |
289 | { | |
290 | if (devid > amd_iommu_last_bdf) | |
291 | amd_iommu_last_bdf = devid; | |
292 | } | |
293 | ||
c571484e JR |
294 | static inline unsigned long tbl_size(int entry_size) |
295 | { | |
296 | unsigned shift = PAGE_SHIFT + | |
421f909c | 297 | get_order(((int)amd_iommu_last_bdf + 1) * entry_size); |
c571484e JR |
298 | |
299 | return 1UL << shift; | |
300 | } | |
301 | ||
6b9376e3 SS |
302 | int amd_iommu_get_num_iommus(void) |
303 | { | |
304 | return amd_iommus_present; | |
305 | } | |
306 | ||
5bcd757f MG |
307 | /* Access to l1 and l2 indexed register spaces */ |
308 | ||
309 | static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) | |
310 | { | |
311 | u32 val; | |
312 | ||
313 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); | |
314 | pci_read_config_dword(iommu->dev, 0xfc, &val); | |
315 | return val; | |
316 | } | |
317 | ||
318 | static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) | |
319 | { | |
320 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); | |
321 | pci_write_config_dword(iommu->dev, 0xfc, val); | |
322 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); | |
323 | } | |
324 | ||
325 | static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) | |
326 | { | |
327 | u32 val; | |
328 | ||
329 | pci_write_config_dword(iommu->dev, 0xf0, address); | |
330 | pci_read_config_dword(iommu->dev, 0xf4, &val); | |
331 | return val; | |
332 | } | |
333 | ||
334 | static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) | |
335 | { | |
336 | pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); | |
337 | pci_write_config_dword(iommu->dev, 0xf4, val); | |
338 | } | |
339 | ||
b65233a9 JR |
340 | /**************************************************************************** |
341 | * | |
342 | * AMD IOMMU MMIO register space handling functions | |
343 | * | |
344 | * These functions are used to program the IOMMU device registers in | |
345 | * MMIO space required for that driver. | |
346 | * | |
347 | ****************************************************************************/ | |
3e8064ba | 348 | |
b65233a9 JR |
349 | /* |
350 | * This function set the exclusion range in the IOMMU. DMA accesses to the | |
351 | * exclusion range are passed through untranslated | |
352 | */ | |
05f92db9 | 353 | static void iommu_set_exclusion_range(struct amd_iommu *iommu) |
b2026aa2 JR |
354 | { |
355 | u64 start = iommu->exclusion_start & PAGE_MASK; | |
3c677d20 | 356 | u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; |
b2026aa2 JR |
357 | u64 entry; |
358 | ||
359 | if (!iommu->exclusion_start) | |
360 | return; | |
361 | ||
362 | entry = start | MMIO_EXCL_ENABLE_MASK; | |
363 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, | |
364 | &entry, sizeof(entry)); | |
365 | ||
366 | entry = limit; | |
367 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, | |
368 | &entry, sizeof(entry)); | |
369 | } | |
370 | ||
b65233a9 | 371 | /* Programs the physical address of the device table into the IOMMU hardware */ |
6b7f000e | 372 | static void iommu_set_device_table(struct amd_iommu *iommu) |
b2026aa2 | 373 | { |
f609891f | 374 | u64 entry; |
b2026aa2 JR |
375 | |
376 | BUG_ON(iommu->mmio_base == NULL); | |
377 | ||
2543a786 | 378 | entry = iommu_virt_to_phys(amd_iommu_dev_table); |
b2026aa2 JR |
379 | entry |= (dev_table_size >> 12) - 1; |
380 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, | |
381 | &entry, sizeof(entry)); | |
382 | } | |
383 | ||
b65233a9 | 384 | /* Generic functions to enable/disable certain features of the IOMMU. */ |
05f92db9 | 385 | static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
b2026aa2 | 386 | { |
e881dbd5 | 387 | u64 ctrl; |
b2026aa2 | 388 | |
e881dbd5 SS |
389 | ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
390 | ctrl |= (1ULL << bit); | |
391 | writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
b2026aa2 JR |
392 | } |
393 | ||
ca020711 | 394 | static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
b2026aa2 | 395 | { |
e881dbd5 | 396 | u64 ctrl; |
b2026aa2 | 397 | |
e881dbd5 SS |
398 | ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
399 | ctrl &= ~(1ULL << bit); | |
400 | writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
b2026aa2 JR |
401 | } |
402 | ||
1456e9d2 JR |
403 | static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) |
404 | { | |
e881dbd5 | 405 | u64 ctrl; |
1456e9d2 | 406 | |
e881dbd5 | 407 | ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
1456e9d2 JR |
408 | ctrl &= ~CTRL_INV_TO_MASK; |
409 | ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; | |
e881dbd5 | 410 | writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
1456e9d2 JR |
411 | } |
412 | ||
b65233a9 | 413 | /* Function to enable the hardware */ |
05f92db9 | 414 | static void iommu_enable(struct amd_iommu *iommu) |
b2026aa2 | 415 | { |
b2026aa2 | 416 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
b2026aa2 JR |
417 | } |
418 | ||
92ac4320 | 419 | static void iommu_disable(struct amd_iommu *iommu) |
126c52be | 420 | { |
a8c485bb CW |
421 | /* Disable command buffer */ |
422 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | |
423 | ||
424 | /* Disable event logging and event interrupts */ | |
425 | iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); | |
426 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); | |
427 | ||
8bda0cfb SS |
428 | /* Disable IOMMU GA_LOG */ |
429 | iommu_feature_disable(iommu, CONTROL_GALOG_EN); | |
430 | iommu_feature_disable(iommu, CONTROL_GAINT_EN); | |
431 | ||
a8c485bb | 432 | /* Disable IOMMU hardware itself */ |
92ac4320 | 433 | iommu_feature_disable(iommu, CONTROL_IOMMU_EN); |
126c52be JR |
434 | } |
435 | ||
b65233a9 JR |
436 | /* |
437 | * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in | |
438 | * the system has one. | |
439 | */ | |
30861ddc | 440 | static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) |
6c56747b | 441 | { |
30861ddc | 442 | if (!request_mem_region(address, end, "amd_iommu")) { |
101fa037 | 443 | pr_err("Can not reserve memory region %llx-%llx for mmio\n", |
30861ddc | 444 | address, end); |
101fa037 | 445 | pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); |
6c56747b | 446 | return NULL; |
e82752d8 | 447 | } |
6c56747b | 448 | |
30861ddc | 449 | return (u8 __iomem *)ioremap_nocache(address, end); |
6c56747b JR |
450 | } |
451 | ||
452 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) | |
453 | { | |
454 | if (iommu->mmio_base) | |
455 | iounmap(iommu->mmio_base); | |
30861ddc | 456 | release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); |
6c56747b JR |
457 | } |
458 | ||
ac7ccf67 SS |
459 | static inline u32 get_ivhd_header_size(struct ivhd_header *h) |
460 | { | |
461 | u32 size = 0; | |
462 | ||
463 | switch (h->type) { | |
464 | case 0x10: | |
465 | size = 24; | |
466 | break; | |
467 | case 0x11: | |
468 | case 0x40: | |
469 | size = 40; | |
470 | break; | |
471 | } | |
472 | return size; | |
473 | } | |
474 | ||
b65233a9 JR |
475 | /**************************************************************************** |
476 | * | |
477 | * The functions below belong to the first pass of AMD IOMMU ACPI table | |
478 | * parsing. In this pass we try to find out the highest device id this | |
479 | * code has to handle. Upon this information the size of the shared data | |
480 | * structures is determined later. | |
481 | * | |
482 | ****************************************************************************/ | |
483 | ||
b514e555 JR |
484 | /* |
485 | * This function calculates the length of a given IVHD entry | |
486 | */ | |
487 | static inline int ivhd_entry_length(u8 *ivhd) | |
488 | { | |
8c7142f5 SS |
489 | u32 type = ((struct ivhd_entry *)ivhd)->type; |
490 | ||
491 | if (type < 0x80) { | |
492 | return 0x04 << (*ivhd >> 6); | |
493 | } else if (type == IVHD_DEV_ACPI_HID) { | |
494 | /* For ACPI_HID, offset 21 is uid len */ | |
495 | return *((u8 *)ivhd + 21) + 22; | |
496 | } | |
497 | return 0; | |
b514e555 JR |
498 | } |
499 | ||
b65233a9 JR |
500 | /* |
501 | * After reading the highest device id from the IOMMU PCI capability header | |
502 | * this function looks if there is a higher device id defined in the ACPI table | |
503 | */ | |
3e8064ba JR |
504 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
505 | { | |
506 | u8 *p = (void *)h, *end = (void *)h; | |
507 | struct ivhd_entry *dev; | |
508 | ||
ac7ccf67 SS |
509 | u32 ivhd_size = get_ivhd_header_size(h); |
510 | ||
511 | if (!ivhd_size) { | |
101fa037 | 512 | pr_err("Unsupported IVHD type %#x\n", h->type); |
ac7ccf67 SS |
513 | return -EINVAL; |
514 | } | |
515 | ||
516 | p += ivhd_size; | |
3e8064ba JR |
517 | end += h->length; |
518 | ||
3e8064ba JR |
519 | while (p < end) { |
520 | dev = (struct ivhd_entry *)p; | |
521 | switch (dev->type) { | |
d1259416 JR |
522 | case IVHD_DEV_ALL: |
523 | /* Use maximum BDF value for DEV_ALL */ | |
524 | update_last_devid(0xffff); | |
525 | break; | |
3e8064ba JR |
526 | case IVHD_DEV_SELECT: |
527 | case IVHD_DEV_RANGE_END: | |
528 | case IVHD_DEV_ALIAS: | |
529 | case IVHD_DEV_EXT_SELECT: | |
b65233a9 | 530 | /* all the above subfield types refer to device ids */ |
208ec8c9 | 531 | update_last_devid(dev->devid); |
3e8064ba JR |
532 | break; |
533 | default: | |
534 | break; | |
535 | } | |
b514e555 | 536 | p += ivhd_entry_length(p); |
3e8064ba JR |
537 | } |
538 | ||
539 | WARN_ON(p != end); | |
540 | ||
541 | return 0; | |
542 | } | |
543 | ||
8c7142f5 SS |
544 | static int __init check_ivrs_checksum(struct acpi_table_header *table) |
545 | { | |
546 | int i; | |
547 | u8 checksum = 0, *p = (u8 *)table; | |
548 | ||
549 | for (i = 0; i < table->length; ++i) | |
550 | checksum += p[i]; | |
551 | if (checksum != 0) { | |
552 | /* ACPI table corrupt */ | |
101fa037 | 553 | pr_err(FW_BUG "IVRS invalid checksum\n"); |
8c7142f5 SS |
554 | return -ENODEV; |
555 | } | |
556 | ||
557 | return 0; | |
558 | } | |
559 | ||
b65233a9 JR |
560 | /* |
561 | * Iterate over all IVHD entries in the ACPI table and find the highest device | |
562 | * id which we need to handle. This is the first of three functions which parse | |
563 | * the ACPI table. So we check the checksum here. | |
564 | */ | |
3e8064ba JR |
565 | static int __init find_last_devid_acpi(struct acpi_table_header *table) |
566 | { | |
8c7142f5 | 567 | u8 *p = (u8 *)table, *end = (u8 *)table; |
3e8064ba JR |
568 | struct ivhd_header *h; |
569 | ||
3e8064ba JR |
570 | p += IVRS_HEADER_LENGTH; |
571 | ||
572 | end += table->length; | |
573 | while (p < end) { | |
574 | h = (struct ivhd_header *)p; | |
8c7142f5 SS |
575 | if (h->type == amd_iommu_target_ivhd_type) { |
576 | int ret = find_last_devid_from_ivhd(h); | |
577 | ||
578 | if (ret) | |
579 | return ret; | |
3e8064ba JR |
580 | } |
581 | p += h->length; | |
582 | } | |
583 | WARN_ON(p != end); | |
584 | ||
585 | return 0; | |
586 | } | |
587 | ||
b65233a9 JR |
588 | /**************************************************************************** |
589 | * | |
df805abb | 590 | * The following functions belong to the code path which parses the ACPI table |
b65233a9 JR |
591 | * the second time. In this ACPI parsing iteration we allocate IOMMU specific |
592 | * data structures, initialize the device/alias/rlookup table and also | |
593 | * basically initialize the hardware. | |
594 | * | |
595 | ****************************************************************************/ | |
596 | ||
597 | /* | |
598 | * Allocates the command buffer. This buffer is per AMD IOMMU. We can | |
599 | * write commands to that buffer later and the IOMMU will execute them | |
600 | * asynchronously | |
601 | */ | |
f2c2db53 | 602 | static int __init alloc_command_buffer(struct amd_iommu *iommu) |
b36ca91e | 603 | { |
f2c2db53 JR |
604 | iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
605 | get_order(CMD_BUFFER_SIZE)); | |
b36ca91e | 606 | |
f2c2db53 | 607 | return iommu->cmd_buf ? 0 : -ENOMEM; |
58492e12 JR |
608 | } |
609 | ||
93f1cc67 JR |
610 | /* |
611 | * This function resets the command buffer if the IOMMU stopped fetching | |
612 | * commands from it. | |
613 | */ | |
614 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) | |
615 | { | |
616 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | |
617 | ||
618 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | |
619 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | |
d334a563 TL |
620 | iommu->cmd_buf_head = 0; |
621 | iommu->cmd_buf_tail = 0; | |
93f1cc67 JR |
622 | |
623 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | |
624 | } | |
625 | ||
58492e12 JR |
626 | /* |
627 | * This function writes the command buffer address to the hardware and | |
628 | * enables it. | |
629 | */ | |
630 | static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |
631 | { | |
632 | u64 entry; | |
633 | ||
634 | BUG_ON(iommu->cmd_buf == NULL); | |
635 | ||
2543a786 | 636 | entry = iommu_virt_to_phys(iommu->cmd_buf); |
b36ca91e | 637 | entry |= MMIO_CMD_SIZE_512; |
58492e12 | 638 | |
b36ca91e | 639 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
58492e12 | 640 | &entry, sizeof(entry)); |
b36ca91e | 641 | |
93f1cc67 | 642 | amd_iommu_reset_cmd_buffer(iommu); |
b36ca91e JR |
643 | } |
644 | ||
78d313c6 BH |
645 | /* |
646 | * This function disables the command buffer | |
647 | */ | |
648 | static void iommu_disable_command_buffer(struct amd_iommu *iommu) | |
649 | { | |
650 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | |
651 | } | |
652 | ||
b36ca91e JR |
653 | static void __init free_command_buffer(struct amd_iommu *iommu) |
654 | { | |
deba4bce | 655 | free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); |
b36ca91e JR |
656 | } |
657 | ||
335503e5 | 658 | /* allocates the memory where the IOMMU will log its events to */ |
f2c2db53 | 659 | static int __init alloc_event_buffer(struct amd_iommu *iommu) |
335503e5 | 660 | { |
f2c2db53 JR |
661 | iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
662 | get_order(EVT_BUFFER_SIZE)); | |
335503e5 | 663 | |
f2c2db53 | 664 | return iommu->evt_buf ? 0 : -ENOMEM; |
58492e12 JR |
665 | } |
666 | ||
667 | static void iommu_enable_event_buffer(struct amd_iommu *iommu) | |
668 | { | |
669 | u64 entry; | |
670 | ||
671 | BUG_ON(iommu->evt_buf == NULL); | |
672 | ||
2543a786 | 673 | entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; |
58492e12 | 674 | |
335503e5 JR |
675 | memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, |
676 | &entry, sizeof(entry)); | |
677 | ||
09067207 JR |
678 | /* set head and tail to zero manually */ |
679 | writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); | |
680 | writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | |
681 | ||
58492e12 | 682 | iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); |
335503e5 JR |
683 | } |
684 | ||
78d313c6 BH |
685 | /* |
686 | * This function disables the event log buffer | |
687 | */ | |
688 | static void iommu_disable_event_buffer(struct amd_iommu *iommu) | |
689 | { | |
690 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); | |
691 | } | |
692 | ||
335503e5 JR |
693 | static void __init free_event_buffer(struct amd_iommu *iommu) |
694 | { | |
695 | free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); | |
696 | } | |
697 | ||
1a29ac01 | 698 | /* allocates the memory where the IOMMU will log its events to */ |
f2c2db53 | 699 | static int __init alloc_ppr_log(struct amd_iommu *iommu) |
1a29ac01 | 700 | { |
f2c2db53 JR |
701 | iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
702 | get_order(PPR_LOG_SIZE)); | |
1a29ac01 | 703 | |
f2c2db53 | 704 | return iommu->ppr_log ? 0 : -ENOMEM; |
1a29ac01 JR |
705 | } |
706 | ||
707 | static void iommu_enable_ppr_log(struct amd_iommu *iommu) | |
708 | { | |
709 | u64 entry; | |
710 | ||
711 | if (iommu->ppr_log == NULL) | |
712 | return; | |
713 | ||
2543a786 | 714 | entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; |
1a29ac01 JR |
715 | |
716 | memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, | |
717 | &entry, sizeof(entry)); | |
718 | ||
719 | /* set head and tail to zero manually */ | |
720 | writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | |
721 | writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | |
722 | ||
723 | iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); | |
724 | iommu_feature_enable(iommu, CONTROL_PPR_EN); | |
725 | } | |
726 | ||
727 | static void __init free_ppr_log(struct amd_iommu *iommu) | |
728 | { | |
729 | if (iommu->ppr_log == NULL) | |
730 | return; | |
731 | ||
732 | free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); | |
733 | } | |
734 | ||
8bda0cfb SS |
735 | static void free_ga_log(struct amd_iommu *iommu) |
736 | { | |
737 | #ifdef CONFIG_IRQ_REMAP | |
738 | if (iommu->ga_log) | |
739 | free_pages((unsigned long)iommu->ga_log, | |
740 | get_order(GA_LOG_SIZE)); | |
741 | if (iommu->ga_log_tail) | |
742 | free_pages((unsigned long)iommu->ga_log_tail, | |
743 | get_order(8)); | |
744 | #endif | |
745 | } | |
746 | ||
747 | static int iommu_ga_log_enable(struct amd_iommu *iommu) | |
748 | { | |
749 | #ifdef CONFIG_IRQ_REMAP | |
750 | u32 status, i; | |
751 | ||
752 | if (!iommu->ga_log) | |
753 | return -EINVAL; | |
754 | ||
755 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | |
756 | ||
757 | /* Check if already running */ | |
758 | if (status & (MMIO_STATUS_GALOG_RUN_MASK)) | |
759 | return 0; | |
760 | ||
761 | iommu_feature_enable(iommu, CONTROL_GAINT_EN); | |
762 | iommu_feature_enable(iommu, CONTROL_GALOG_EN); | |
763 | ||
764 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | |
765 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | |
766 | if (status & (MMIO_STATUS_GALOG_RUN_MASK)) | |
767 | break; | |
768 | } | |
769 | ||
770 | if (i >= LOOP_TIMEOUT) | |
771 | return -EINVAL; | |
772 | #endif /* CONFIG_IRQ_REMAP */ | |
773 | return 0; | |
774 | } | |
775 | ||
776 | #ifdef CONFIG_IRQ_REMAP | |
777 | static int iommu_init_ga_log(struct amd_iommu *iommu) | |
778 | { | |
779 | u64 entry; | |
780 | ||
781 | if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) | |
782 | return 0; | |
783 | ||
784 | iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
785 | get_order(GA_LOG_SIZE)); | |
786 | if (!iommu->ga_log) | |
787 | goto err_out; | |
788 | ||
789 | iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
790 | get_order(8)); | |
791 | if (!iommu->ga_log_tail) | |
792 | goto err_out; | |
793 | ||
2543a786 | 794 | entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; |
8bda0cfb SS |
795 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, |
796 | &entry, sizeof(entry)); | |
ab99be46 FS |
797 | entry = (iommu_virt_to_phys(iommu->ga_log_tail) & |
798 | (BIT_ULL(52)-1)) & ~7ULL; | |
8bda0cfb SS |
799 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, |
800 | &entry, sizeof(entry)); | |
801 | writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); | |
802 | writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); | |
803 | ||
804 | return 0; | |
805 | err_out: | |
806 | free_ga_log(iommu); | |
807 | return -EINVAL; | |
808 | } | |
809 | #endif /* CONFIG_IRQ_REMAP */ | |
810 | ||
811 | static int iommu_init_ga(struct amd_iommu *iommu) | |
812 | { | |
813 | int ret = 0; | |
814 | ||
815 | #ifdef CONFIG_IRQ_REMAP | |
816 | /* Note: We have already checked GASup from IVRS table. | |
817 | * Now, we need to make sure that GAMSup is set. | |
818 | */ | |
819 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && | |
820 | !iommu_feature(iommu, FEATURE_GAM_VAPIC)) | |
821 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; | |
822 | ||
823 | ret = iommu_init_ga_log(iommu); | |
824 | #endif /* CONFIG_IRQ_REMAP */ | |
825 | ||
826 | return ret; | |
827 | } | |
828 | ||
90fcffd9 SS |
829 | static void iommu_enable_xt(struct amd_iommu *iommu) |
830 | { | |
831 | #ifdef CONFIG_IRQ_REMAP | |
832 | /* | |
833 | * XT mode (32-bit APIC destination ID) requires | |
834 | * GA mode (128-bit IRTE support) as a prerequisite. | |
835 | */ | |
836 | if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && | |
837 | amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) | |
838 | iommu_feature_enable(iommu, CONTROL_XT_EN); | |
839 | #endif /* CONFIG_IRQ_REMAP */ | |
840 | } | |
841 | ||
cbc33a90 JR |
842 | static void iommu_enable_gt(struct amd_iommu *iommu) |
843 | { | |
844 | if (!iommu_feature(iommu, FEATURE_GT)) | |
845 | return; | |
846 | ||
847 | iommu_feature_enable(iommu, CONTROL_GT_EN); | |
848 | } | |
849 | ||
b65233a9 | 850 | /* sets a specific bit in the device table entry. */ |
3566b778 JR |
851 | static void set_dev_entry_bit(u16 devid, u8 bit) |
852 | { | |
ee6c2868 JR |
853 | int i = (bit >> 6) & 0x03; |
854 | int _bit = bit & 0x3f; | |
3566b778 | 855 | |
ee6c2868 | 856 | amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); |
3566b778 JR |
857 | } |
858 | ||
c5cca146 JR |
859 | static int get_dev_entry_bit(u16 devid, u8 bit) |
860 | { | |
ee6c2868 JR |
861 | int i = (bit >> 6) & 0x03; |
862 | int _bit = bit & 0x3f; | |
c5cca146 | 863 | |
ee6c2868 | 864 | return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; |
c5cca146 JR |
865 | } |
866 | ||
867 | ||
45a01c42 BH |
868 | static bool copy_device_table(void) |
869 | { | |
ae162efb | 870 | u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; |
45a01c42 BH |
871 | struct dev_table_entry *old_devtb = NULL; |
872 | u32 lo, hi, devid, old_devtb_size; | |
873 | phys_addr_t old_devtb_phys; | |
45a01c42 | 874 | struct amd_iommu *iommu; |
53019a9e | 875 | u16 dom_id, dte_v, irq_v; |
45a01c42 | 876 | gfp_t gfp_flag; |
daae2d25 | 877 | u64 tmp; |
45a01c42 | 878 | |
3ac3e5ee BH |
879 | if (!amd_iommu_pre_enabled) |
880 | return false; | |
45a01c42 BH |
881 | |
882 | pr_warn("Translation is already enabled - trying to copy translation structures\n"); | |
883 | for_each_iommu(iommu) { | |
884 | /* All IOMMUs should use the same device table with the same size */ | |
885 | lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); | |
886 | hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); | |
887 | entry = (((u64) hi) << 32) + lo; | |
888 | if (last_entry && last_entry != entry) { | |
3c6bae62 | 889 | pr_err("IOMMU:%d should use the same dev table as others!\n", |
45a01c42 BH |
890 | iommu->index); |
891 | return false; | |
892 | } | |
893 | last_entry = entry; | |
894 | ||
895 | old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; | |
896 | if (old_devtb_size != dev_table_size) { | |
3c6bae62 | 897 | pr_err("The device table size of IOMMU:%d is not expected!\n", |
45a01c42 BH |
898 | iommu->index); |
899 | return false; | |
900 | } | |
901 | } | |
902 | ||
8780158c LJ |
903 | /* |
904 | * When SME is enabled in the first kernel, the entry includes the | |
905 | * memory encryption mask(sme_me_mask), we must remove the memory | |
906 | * encryption mask to obtain the true physical address in kdump kernel. | |
907 | */ | |
908 | old_devtb_phys = __sme_clr(entry) & PAGE_MASK; | |
909 | ||
b336781b | 910 | if (old_devtb_phys >= 0x100000000ULL) { |
3c6bae62 | 911 | pr_err("The address of old device table is above 4G, not trustworthy!\n"); |
b336781b BH |
912 | return false; |
913 | } | |
8780158c LJ |
914 | old_devtb = (sme_active() && is_kdump_kernel()) |
915 | ? (__force void *)ioremap_encrypted(old_devtb_phys, | |
916 | dev_table_size) | |
917 | : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); | |
918 | ||
45a01c42 BH |
919 | if (!old_devtb) |
920 | return false; | |
921 | ||
b336781b | 922 | gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; |
45a01c42 BH |
923 | old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, |
924 | get_order(dev_table_size)); | |
925 | if (old_dev_tbl_cpy == NULL) { | |
3c6bae62 | 926 | pr_err("Failed to allocate memory for copying old device table!\n"); |
45a01c42 BH |
927 | return false; |
928 | } | |
929 | ||
930 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | |
931 | old_dev_tbl_cpy[devid] = old_devtb[devid]; | |
932 | dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; | |
933 | dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; | |
53019a9e BH |
934 | |
935 | if (dte_v && dom_id) { | |
936 | old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; | |
937 | old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; | |
45a01c42 | 938 | __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); |
daae2d25 BH |
939 | /* If gcr3 table existed, mask it out */ |
940 | if (old_devtb[devid].data[0] & DTE_FLAG_GV) { | |
941 | tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; | |
942 | tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; | |
943 | old_dev_tbl_cpy[devid].data[1] &= ~tmp; | |
944 | tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; | |
945 | tmp |= DTE_FLAG_GV; | |
946 | old_dev_tbl_cpy[devid].data[0] &= ~tmp; | |
947 | } | |
53019a9e BH |
948 | } |
949 | ||
950 | irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; | |
951 | int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; | |
952 | int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; | |
953 | if (irq_v && (int_ctl || int_tab_len)) { | |
954 | if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || | |
955 | (int_tab_len != DTE_IRQ_TABLE_LEN)) { | |
956 | pr_err("Wrong old irq remapping flag: %#x\n", devid); | |
957 | return false; | |
958 | } | |
959 | ||
960 | old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; | |
961 | } | |
45a01c42 BH |
962 | } |
963 | memunmap(old_devtb); | |
964 | ||
965 | return true; | |
966 | } | |
967 | ||
c5cca146 JR |
968 | void amd_iommu_apply_erratum_63(u16 devid) |
969 | { | |
970 | int sysmgt; | |
971 | ||
972 | sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | | |
973 | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); | |
974 | ||
975 | if (sysmgt == 0x01) | |
976 | set_dev_entry_bit(devid, DEV_ENTRY_IW); | |
977 | } | |
978 | ||
5ff4789d JR |
979 | /* Writes the specific IOMMU for a device into the rlookup table */ |
980 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | |
981 | { | |
982 | amd_iommu_rlookup_table[devid] = iommu; | |
983 | } | |
984 | ||
b65233a9 JR |
985 | /* |
986 | * This function takes the device specific flags read from the ACPI | |
987 | * table and sets up the device table entry with that information | |
988 | */ | |
5ff4789d JR |
989 | static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, |
990 | u16 devid, u32 flags, u32 ext_flags) | |
3566b778 JR |
991 | { |
992 | if (flags & ACPI_DEVFLAG_INITPASS) | |
993 | set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); | |
994 | if (flags & ACPI_DEVFLAG_EXTINT) | |
995 | set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); | |
996 | if (flags & ACPI_DEVFLAG_NMI) | |
997 | set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); | |
998 | if (flags & ACPI_DEVFLAG_SYSMGT1) | |
999 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); | |
1000 | if (flags & ACPI_DEVFLAG_SYSMGT2) | |
1001 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); | |
1002 | if (flags & ACPI_DEVFLAG_LINT0) | |
1003 | set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); | |
1004 | if (flags & ACPI_DEVFLAG_LINT1) | |
1005 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | |
3566b778 | 1006 | |
c5cca146 JR |
1007 | amd_iommu_apply_erratum_63(devid); |
1008 | ||
5ff4789d | 1009 | set_iommu_for_device(iommu, devid); |
3566b778 JR |
1010 | } |
1011 | ||
c50e3247 | 1012 | static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) |
6efed63b JR |
1013 | { |
1014 | struct devid_map *entry; | |
1015 | struct list_head *list; | |
1016 | ||
31cff67f JR |
1017 | if (type == IVHD_SPECIAL_IOAPIC) |
1018 | list = &ioapic_map; | |
1019 | else if (type == IVHD_SPECIAL_HPET) | |
1020 | list = &hpet_map; | |
1021 | else | |
6efed63b JR |
1022 | return -EINVAL; |
1023 | ||
31cff67f JR |
1024 | list_for_each_entry(entry, list, list) { |
1025 | if (!(entry->id == id && entry->cmd_line)) | |
1026 | continue; | |
1027 | ||
101fa037 | 1028 | pr_info("Command-line override present for %s id %d - ignoring\n", |
31cff67f JR |
1029 | type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); |
1030 | ||
c50e3247 JR |
1031 | *devid = entry->devid; |
1032 | ||
31cff67f JR |
1033 | return 0; |
1034 | } | |
1035 | ||
6efed63b JR |
1036 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
1037 | if (!entry) | |
1038 | return -ENOMEM; | |
1039 | ||
31cff67f | 1040 | entry->id = id; |
c50e3247 | 1041 | entry->devid = *devid; |
31cff67f | 1042 | entry->cmd_line = cmd_line; |
6efed63b JR |
1043 | |
1044 | list_add_tail(&entry->list, list); | |
1045 | ||
1046 | return 0; | |
1047 | } | |
1048 | ||
2a0cb4e2 WZ |
1049 | static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, |
1050 | bool cmd_line) | |
1051 | { | |
1052 | struct acpihid_map_entry *entry; | |
1053 | struct list_head *list = &acpihid_map; | |
1054 | ||
1055 | list_for_each_entry(entry, list, list) { | |
1056 | if (strcmp(entry->hid, hid) || | |
1057 | (*uid && *entry->uid && strcmp(entry->uid, uid)) || | |
1058 | !entry->cmd_line) | |
1059 | continue; | |
1060 | ||
101fa037 | 1061 | pr_info("Command-line override for hid:%s uid:%s\n", |
2a0cb4e2 WZ |
1062 | hid, uid); |
1063 | *devid = entry->devid; | |
1064 | return 0; | |
1065 | } | |
1066 | ||
1067 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
1068 | if (!entry) | |
1069 | return -ENOMEM; | |
1070 | ||
1071 | memcpy(entry->uid, uid, strlen(uid)); | |
1072 | memcpy(entry->hid, hid, strlen(hid)); | |
1073 | entry->devid = *devid; | |
1074 | entry->cmd_line = cmd_line; | |
1075 | entry->root_devid = (entry->devid & (~0x7)); | |
1076 | ||
101fa037 | 1077 | pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", |
2a0cb4e2 WZ |
1078 | entry->cmd_line ? "cmd" : "ivrs", |
1079 | entry->hid, entry->uid, entry->root_devid); | |
1080 | ||
1081 | list_add_tail(&entry->list, list); | |
1082 | return 0; | |
1083 | } | |
1084 | ||
235dacbc JR |
1085 | static int __init add_early_maps(void) |
1086 | { | |
1087 | int i, ret; | |
1088 | ||
1089 | for (i = 0; i < early_ioapic_map_size; ++i) { | |
1090 | ret = add_special_device(IVHD_SPECIAL_IOAPIC, | |
1091 | early_ioapic_map[i].id, | |
c50e3247 | 1092 | &early_ioapic_map[i].devid, |
235dacbc JR |
1093 | early_ioapic_map[i].cmd_line); |
1094 | if (ret) | |
1095 | return ret; | |
1096 | } | |
1097 | ||
1098 | for (i = 0; i < early_hpet_map_size; ++i) { | |
1099 | ret = add_special_device(IVHD_SPECIAL_HPET, | |
1100 | early_hpet_map[i].id, | |
c50e3247 | 1101 | &early_hpet_map[i].devid, |
235dacbc JR |
1102 | early_hpet_map[i].cmd_line); |
1103 | if (ret) | |
1104 | return ret; | |
1105 | } | |
1106 | ||
2a0cb4e2 WZ |
1107 | for (i = 0; i < early_acpihid_map_size; ++i) { |
1108 | ret = add_acpi_hid_device(early_acpihid_map[i].hid, | |
1109 | early_acpihid_map[i].uid, | |
1110 | &early_acpihid_map[i].devid, | |
1111 | early_acpihid_map[i].cmd_line); | |
1112 | if (ret) | |
1113 | return ret; | |
1114 | } | |
1115 | ||
235dacbc JR |
1116 | return 0; |
1117 | } | |
1118 | ||
b65233a9 | 1119 | /* |
df805abb | 1120 | * Reads the device exclusion range from ACPI and initializes the IOMMU with |
b65233a9 JR |
1121 | * it |
1122 | */ | |
3566b778 JR |
1123 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) |
1124 | { | |
1125 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | |
1126 | ||
1127 | if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) | |
1128 | return; | |
1129 | ||
1130 | if (iommu) { | |
b65233a9 JR |
1131 | /* |
1132 | * We only can configure exclusion ranges per IOMMU, not | |
1133 | * per device. But we can enable the exclusion range per | |
1134 | * device. This is done here | |
1135 | */ | |
2c16c9fd | 1136 | set_dev_entry_bit(devid, DEV_ENTRY_EX); |
3566b778 JR |
1137 | iommu->exclusion_start = m->range_start; |
1138 | iommu->exclusion_length = m->range_length; | |
1139 | } | |
1140 | } | |
1141 | ||
b65233a9 JR |
1142 | /* |
1143 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and | |
1144 | * initializes the hardware and our data structures with it. | |
1145 | */ | |
6efed63b | 1146 | static int __init init_iommu_from_acpi(struct amd_iommu *iommu, |
5d0c8e49 JR |
1147 | struct ivhd_header *h) |
1148 | { | |
1149 | u8 *p = (u8 *)h; | |
1150 | u8 *end = p, flags = 0; | |
0de66d5b JR |
1151 | u16 devid = 0, devid_start = 0, devid_to = 0; |
1152 | u32 dev_i, ext_flags = 0; | |
58a3bee5 | 1153 | bool alias = false; |
5d0c8e49 | 1154 | struct ivhd_entry *e; |
ac7ccf67 | 1155 | u32 ivhd_size; |
235dacbc JR |
1156 | int ret; |
1157 | ||
1158 | ||
1159 | ret = add_early_maps(); | |
1160 | if (ret) | |
1161 | return ret; | |
5d0c8e49 JR |
1162 | |
1163 | /* | |
e9bf5197 | 1164 | * First save the recommended feature enable bits from ACPI |
5d0c8e49 | 1165 | */ |
e9bf5197 | 1166 | iommu->acpi_flags = h->flags; |
5d0c8e49 JR |
1167 | |
1168 | /* | |
1169 | * Done. Now parse the device entries | |
1170 | */ | |
ac7ccf67 SS |
1171 | ivhd_size = get_ivhd_header_size(h); |
1172 | if (!ivhd_size) { | |
101fa037 | 1173 | pr_err("Unsupported IVHD type %#x\n", h->type); |
ac7ccf67 SS |
1174 | return -EINVAL; |
1175 | } | |
1176 | ||
1177 | p += ivhd_size; | |
1178 | ||
5d0c8e49 JR |
1179 | end += h->length; |
1180 | ||
42a698f4 | 1181 | |
5d0c8e49 JR |
1182 | while (p < end) { |
1183 | e = (struct ivhd_entry *)p; | |
1184 | switch (e->type) { | |
1185 | case IVHD_DEV_ALL: | |
42a698f4 | 1186 | |
226e889b | 1187 | DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); |
42a698f4 | 1188 | |
226e889b JR |
1189 | for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) |
1190 | set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); | |
5d0c8e49 JR |
1191 | break; |
1192 | case IVHD_DEV_SELECT: | |
42a698f4 JR |
1193 | |
1194 | DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " | |
1195 | "flags: %02x\n", | |
c5081cd7 | 1196 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1197 | PCI_SLOT(e->devid), |
1198 | PCI_FUNC(e->devid), | |
1199 | e->flags); | |
1200 | ||
5d0c8e49 | 1201 | devid = e->devid; |
5ff4789d | 1202 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); |
5d0c8e49 JR |
1203 | break; |
1204 | case IVHD_DEV_SELECT_RANGE_START: | |
42a698f4 JR |
1205 | |
1206 | DUMP_printk(" DEV_SELECT_RANGE_START\t " | |
1207 | "devid: %02x:%02x.%x flags: %02x\n", | |
c5081cd7 | 1208 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1209 | PCI_SLOT(e->devid), |
1210 | PCI_FUNC(e->devid), | |
1211 | e->flags); | |
1212 | ||
5d0c8e49 JR |
1213 | devid_start = e->devid; |
1214 | flags = e->flags; | |
1215 | ext_flags = 0; | |
58a3bee5 | 1216 | alias = false; |
5d0c8e49 JR |
1217 | break; |
1218 | case IVHD_DEV_ALIAS: | |
42a698f4 JR |
1219 | |
1220 | DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " | |
1221 | "flags: %02x devid_to: %02x:%02x.%x\n", | |
c5081cd7 | 1222 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1223 | PCI_SLOT(e->devid), |
1224 | PCI_FUNC(e->devid), | |
1225 | e->flags, | |
c5081cd7 | 1226 | PCI_BUS_NUM(e->ext >> 8), |
42a698f4 JR |
1227 | PCI_SLOT(e->ext >> 8), |
1228 | PCI_FUNC(e->ext >> 8)); | |
1229 | ||
5d0c8e49 JR |
1230 | devid = e->devid; |
1231 | devid_to = e->ext >> 8; | |
7a6a3a08 | 1232 | set_dev_entry_from_acpi(iommu, devid , e->flags, 0); |
7455aab1 | 1233 | set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); |
5d0c8e49 JR |
1234 | amd_iommu_alias_table[devid] = devid_to; |
1235 | break; | |
1236 | case IVHD_DEV_ALIAS_RANGE: | |
42a698f4 JR |
1237 | |
1238 | DUMP_printk(" DEV_ALIAS_RANGE\t\t " | |
1239 | "devid: %02x:%02x.%x flags: %02x " | |
1240 | "devid_to: %02x:%02x.%x\n", | |
c5081cd7 | 1241 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1242 | PCI_SLOT(e->devid), |
1243 | PCI_FUNC(e->devid), | |
1244 | e->flags, | |
c5081cd7 | 1245 | PCI_BUS_NUM(e->ext >> 8), |
42a698f4 JR |
1246 | PCI_SLOT(e->ext >> 8), |
1247 | PCI_FUNC(e->ext >> 8)); | |
1248 | ||
5d0c8e49 JR |
1249 | devid_start = e->devid; |
1250 | flags = e->flags; | |
1251 | devid_to = e->ext >> 8; | |
1252 | ext_flags = 0; | |
58a3bee5 | 1253 | alias = true; |
5d0c8e49 JR |
1254 | break; |
1255 | case IVHD_DEV_EXT_SELECT: | |
42a698f4 JR |
1256 | |
1257 | DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " | |
1258 | "flags: %02x ext: %08x\n", | |
c5081cd7 | 1259 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1260 | PCI_SLOT(e->devid), |
1261 | PCI_FUNC(e->devid), | |
1262 | e->flags, e->ext); | |
1263 | ||
5d0c8e49 | 1264 | devid = e->devid; |
5ff4789d JR |
1265 | set_dev_entry_from_acpi(iommu, devid, e->flags, |
1266 | e->ext); | |
5d0c8e49 JR |
1267 | break; |
1268 | case IVHD_DEV_EXT_SELECT_RANGE: | |
42a698f4 JR |
1269 | |
1270 | DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " | |
1271 | "%02x:%02x.%x flags: %02x ext: %08x\n", | |
c5081cd7 | 1272 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1273 | PCI_SLOT(e->devid), |
1274 | PCI_FUNC(e->devid), | |
1275 | e->flags, e->ext); | |
1276 | ||
5d0c8e49 JR |
1277 | devid_start = e->devid; |
1278 | flags = e->flags; | |
1279 | ext_flags = e->ext; | |
58a3bee5 | 1280 | alias = false; |
5d0c8e49 JR |
1281 | break; |
1282 | case IVHD_DEV_RANGE_END: | |
42a698f4 JR |
1283 | |
1284 | DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", | |
c5081cd7 | 1285 | PCI_BUS_NUM(e->devid), |
42a698f4 JR |
1286 | PCI_SLOT(e->devid), |
1287 | PCI_FUNC(e->devid)); | |
1288 | ||
5d0c8e49 JR |
1289 | devid = e->devid; |
1290 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { | |
7a6a3a08 | 1291 | if (alias) { |
5d0c8e49 | 1292 | amd_iommu_alias_table[dev_i] = devid_to; |
7a6a3a08 JR |
1293 | set_dev_entry_from_acpi(iommu, |
1294 | devid_to, flags, ext_flags); | |
1295 | } | |
1296 | set_dev_entry_from_acpi(iommu, dev_i, | |
1297 | flags, ext_flags); | |
5d0c8e49 JR |
1298 | } |
1299 | break; | |
6efed63b JR |
1300 | case IVHD_DEV_SPECIAL: { |
1301 | u8 handle, type; | |
1302 | const char *var; | |
1303 | u16 devid; | |
1304 | int ret; | |
1305 | ||
1306 | handle = e->ext & 0xff; | |
1307 | devid = (e->ext >> 8) & 0xffff; | |
1308 | type = (e->ext >> 24) & 0xff; | |
1309 | ||
1310 | if (type == IVHD_SPECIAL_IOAPIC) | |
1311 | var = "IOAPIC"; | |
1312 | else if (type == IVHD_SPECIAL_HPET) | |
1313 | var = "HPET"; | |
1314 | else | |
1315 | var = "UNKNOWN"; | |
1316 | ||
1317 | DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", | |
1318 | var, (int)handle, | |
c5081cd7 | 1319 | PCI_BUS_NUM(devid), |
6efed63b JR |
1320 | PCI_SLOT(devid), |
1321 | PCI_FUNC(devid)); | |
1322 | ||
c50e3247 | 1323 | ret = add_special_device(type, handle, &devid, false); |
6efed63b JR |
1324 | if (ret) |
1325 | return ret; | |
c50e3247 JR |
1326 | |
1327 | /* | |
1328 | * add_special_device might update the devid in case a | |
1329 | * command-line override is present. So call | |
1330 | * set_dev_entry_from_acpi after add_special_device. | |
1331 | */ | |
1332 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); | |
1333 | ||
6efed63b JR |
1334 | break; |
1335 | } | |
2a0cb4e2 WZ |
1336 | case IVHD_DEV_ACPI_HID: { |
1337 | u16 devid; | |
1338 | u8 hid[ACPIHID_HID_LEN] = {0}; | |
1339 | u8 uid[ACPIHID_UID_LEN] = {0}; | |
1340 | int ret; | |
1341 | ||
1342 | if (h->type != 0x40) { | |
1343 | pr_err(FW_BUG "Invalid IVHD device type %#x\n", | |
1344 | e->type); | |
1345 | break; | |
1346 | } | |
1347 | ||
1348 | memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); | |
1349 | hid[ACPIHID_HID_LEN - 1] = '\0'; | |
1350 | ||
1351 | if (!(*hid)) { | |
1352 | pr_err(FW_BUG "Invalid HID.\n"); | |
1353 | break; | |
1354 | } | |
1355 | ||
1356 | switch (e->uidf) { | |
1357 | case UID_NOT_PRESENT: | |
1358 | ||
1359 | if (e->uidl != 0) | |
1360 | pr_warn(FW_BUG "Invalid UID length.\n"); | |
1361 | ||
1362 | break; | |
1363 | case UID_IS_INTEGER: | |
1364 | ||
1365 | sprintf(uid, "%d", e->uid); | |
1366 | ||
1367 | break; | |
1368 | case UID_IS_CHARACTER: | |
1369 | ||
1370 | memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); | |
1371 | uid[ACPIHID_UID_LEN - 1] = '\0'; | |
1372 | ||
1373 | break; | |
1374 | default: | |
1375 | break; | |
1376 | } | |
1377 | ||
6082ee72 | 1378 | devid = e->devid; |
2a0cb4e2 WZ |
1379 | DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", |
1380 | hid, uid, | |
1381 | PCI_BUS_NUM(devid), | |
1382 | PCI_SLOT(devid), | |
1383 | PCI_FUNC(devid)); | |
1384 | ||
2a0cb4e2 WZ |
1385 | flags = e->flags; |
1386 | ||
1387 | ret = add_acpi_hid_device(hid, uid, &devid, false); | |
1388 | if (ret) | |
1389 | return ret; | |
1390 | ||
1391 | /* | |
1392 | * add_special_device might update the devid in case a | |
1393 | * command-line override is present. So call | |
1394 | * set_dev_entry_from_acpi after add_special_device. | |
1395 | */ | |
1396 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); | |
1397 | ||
1398 | break; | |
1399 | } | |
5d0c8e49 JR |
1400 | default: |
1401 | break; | |
1402 | } | |
1403 | ||
b514e555 | 1404 | p += ivhd_entry_length(p); |
5d0c8e49 | 1405 | } |
6efed63b JR |
1406 | |
1407 | return 0; | |
5d0c8e49 JR |
1408 | } |
1409 | ||
e47d402d JR |
1410 | static void __init free_iommu_one(struct amd_iommu *iommu) |
1411 | { | |
1412 | free_command_buffer(iommu); | |
335503e5 | 1413 | free_event_buffer(iommu); |
1a29ac01 | 1414 | free_ppr_log(iommu); |
8bda0cfb | 1415 | free_ga_log(iommu); |
e47d402d JR |
1416 | iommu_unmap_mmio_space(iommu); |
1417 | } | |
1418 | ||
1419 | static void __init free_iommu_all(void) | |
1420 | { | |
1421 | struct amd_iommu *iommu, *next; | |
1422 | ||
3bd22172 | 1423 | for_each_iommu_safe(iommu, next) { |
e47d402d JR |
1424 | list_del(&iommu->list); |
1425 | free_iommu_one(iommu); | |
1426 | kfree(iommu); | |
1427 | } | |
1428 | } | |
1429 | ||
318fe782 SS |
1430 | /* |
1431 | * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) | |
1432 | * Workaround: | |
1433 | * BIOS should disable L2B micellaneous clock gating by setting | |
1434 | * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b | |
1435 | */ | |
e2f1a3bd | 1436 | static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) |
318fe782 SS |
1437 | { |
1438 | u32 value; | |
1439 | ||
1440 | if ((boot_cpu_data.x86 != 0x15) || | |
1441 | (boot_cpu_data.x86_model < 0x10) || | |
1442 | (boot_cpu_data.x86_model > 0x1f)) | |
1443 | return; | |
1444 | ||
1445 | pci_write_config_dword(iommu->dev, 0xf0, 0x90); | |
1446 | pci_read_config_dword(iommu->dev, 0xf4, &value); | |
1447 | ||
1448 | if (value & BIT(2)) | |
1449 | return; | |
1450 | ||
1451 | /* Select NB indirect register 0x90 and enable writing */ | |
1452 | pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); | |
1453 | ||
1454 | pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); | |
5f226da1 | 1455 | pci_info(iommu->dev, "Applying erratum 746 workaround\n"); |
318fe782 SS |
1456 | |
1457 | /* Clear the enable writing bit */ | |
1458 | pci_write_config_dword(iommu->dev, 0xf0, 0x90); | |
1459 | } | |
1460 | ||
358875fd JC |
1461 | /* |
1462 | * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) | |
1463 | * Workaround: | |
1464 | * BIOS should enable ATS write permission check by setting | |
1465 | * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b | |
1466 | */ | |
1467 | static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) | |
1468 | { | |
1469 | u32 value; | |
1470 | ||
1471 | if ((boot_cpu_data.x86 != 0x15) || | |
1472 | (boot_cpu_data.x86_model < 0x30) || | |
1473 | (boot_cpu_data.x86_model > 0x3f)) | |
1474 | return; | |
1475 | ||
1476 | /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ | |
1477 | value = iommu_read_l2(iommu, 0x47); | |
1478 | ||
1479 | if (value & BIT(0)) | |
1480 | return; | |
1481 | ||
1482 | /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ | |
1483 | iommu_write_l2(iommu, 0x47, value | BIT(0)); | |
1484 | ||
5f226da1 | 1485 | pci_info(iommu->dev, "Applying ATS write check workaround\n"); |
358875fd JC |
1486 | } |
1487 | ||
b65233a9 JR |
1488 | /* |
1489 | * This function clues the initialization function for one IOMMU | |
1490 | * together and also allocates the command buffer and programs the | |
1491 | * hardware. It does NOT enable the IOMMU. This is done afterwards. | |
1492 | */ | |
e47d402d JR |
1493 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
1494 | { | |
6efed63b JR |
1495 | int ret; |
1496 | ||
27790398 | 1497 | raw_spin_lock_init(&iommu->lock); |
bb52777e JR |
1498 | |
1499 | /* Add IOMMU to internal data structures */ | |
e47d402d | 1500 | list_add_tail(&iommu->list, &amd_iommu_list); |
6b9376e3 | 1501 | iommu->index = amd_iommus_present++; |
bb52777e JR |
1502 | |
1503 | if (unlikely(iommu->index >= MAX_IOMMUS)) { | |
101fa037 | 1504 | WARN(1, "System has more IOMMUs than supported by this driver\n"); |
bb52777e JR |
1505 | return -ENOSYS; |
1506 | } | |
1507 | ||
1508 | /* Index is fine - add IOMMU to the array */ | |
1509 | amd_iommus[iommu->index] = iommu; | |
e47d402d JR |
1510 | |
1511 | /* | |
1512 | * Copy data from ACPI table entry to the iommu struct | |
1513 | */ | |
23c742db | 1514 | iommu->devid = h->devid; |
e47d402d | 1515 | iommu->cap_ptr = h->cap_ptr; |
ee893c24 | 1516 | iommu->pci_seg = h->pci_seg; |
e47d402d | 1517 | iommu->mmio_phys = h->mmio_phys; |
30861ddc | 1518 | |
7d7d38af SS |
1519 | switch (h->type) { |
1520 | case 0x10: | |
1521 | /* Check if IVHD EFR contains proper max banks/counters */ | |
1522 | if ((h->efr_attr != 0) && | |
1523 | ((h->efr_attr & (0xF << 13)) != 0) && | |
1524 | ((h->efr_attr & (0x3F << 17)) != 0)) | |
1525 | iommu->mmio_phys_end = MMIO_REG_END_OFFSET; | |
1526 | else | |
1527 | iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; | |
3928aa3f SS |
1528 | if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) |
1529 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; | |
90fcffd9 SS |
1530 | if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0)) |
1531 | amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; | |
7d7d38af SS |
1532 | break; |
1533 | case 0x11: | |
1534 | case 0x40: | |
1535 | if (h->efr_reg & (1 << 9)) | |
1536 | iommu->mmio_phys_end = MMIO_REG_END_OFFSET; | |
1537 | else | |
1538 | iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; | |
3928aa3f SS |
1539 | if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) |
1540 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; | |
90fcffd9 SS |
1541 | if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) |
1542 | amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; | |
7d7d38af SS |
1543 | break; |
1544 | default: | |
1545 | return -EINVAL; | |
30861ddc SK |
1546 | } |
1547 | ||
1548 | iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, | |
1549 | iommu->mmio_phys_end); | |
e47d402d JR |
1550 | if (!iommu->mmio_base) |
1551 | return -ENOMEM; | |
1552 | ||
f2c2db53 | 1553 | if (alloc_command_buffer(iommu)) |
e47d402d JR |
1554 | return -ENOMEM; |
1555 | ||
f2c2db53 | 1556 | if (alloc_event_buffer(iommu)) |
335503e5 JR |
1557 | return -ENOMEM; |
1558 | ||
a80dc3e0 JR |
1559 | iommu->int_enabled = false; |
1560 | ||
4c232a70 | 1561 | init_translation_status(iommu); |
3ac3e5ee BH |
1562 | if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { |
1563 | iommu_disable(iommu); | |
1564 | clear_translation_pre_enabled(iommu); | |
1565 | pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", | |
1566 | iommu->index); | |
1567 | } | |
1568 | if (amd_iommu_pre_enabled) | |
1569 | amd_iommu_pre_enabled = translation_pre_enabled(iommu); | |
4c232a70 | 1570 | |
6efed63b JR |
1571 | ret = init_iommu_from_acpi(iommu, h); |
1572 | if (ret) | |
1573 | return ret; | |
f6fec00a | 1574 | |
7c71d306 JL |
1575 | ret = amd_iommu_create_irq_domain(iommu); |
1576 | if (ret) | |
1577 | return ret; | |
1578 | ||
f6fec00a JR |
1579 | /* |
1580 | * Make sure IOMMU is not considered to translate itself. The IVRS | |
1581 | * table tells us so, but this is a lie! | |
1582 | */ | |
1583 | amd_iommu_rlookup_table[iommu->devid] = NULL; | |
1584 | ||
23c742db | 1585 | return 0; |
e47d402d JR |
1586 | } |
1587 | ||
8c7142f5 SS |
1588 | /** |
1589 | * get_highest_supported_ivhd_type - Look up the appropriate IVHD type | |
1590 | * @ivrs Pointer to the IVRS header | |
1591 | * | |
1592 | * This function search through all IVDB of the maximum supported IVHD | |
1593 | */ | |
1594 | static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) | |
1595 | { | |
1596 | u8 *base = (u8 *)ivrs; | |
1597 | struct ivhd_header *ivhd = (struct ivhd_header *) | |
1598 | (base + IVRS_HEADER_LENGTH); | |
1599 | u8 last_type = ivhd->type; | |
1600 | u16 devid = ivhd->devid; | |
1601 | ||
1602 | while (((u8 *)ivhd - base < ivrs->length) && | |
1603 | (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { | |
1604 | u8 *p = (u8 *) ivhd; | |
1605 | ||
1606 | if (ivhd->devid == devid) | |
1607 | last_type = ivhd->type; | |
1608 | ivhd = (struct ivhd_header *)(p + ivhd->length); | |
1609 | } | |
1610 | ||
1611 | return last_type; | |
1612 | } | |
1613 | ||
b65233a9 JR |
1614 | /* |
1615 | * Iterates over all IOMMU entries in the ACPI table, allocates the | |
1616 | * IOMMU structure and initializes it with init_iommu_one() | |
1617 | */ | |
e47d402d JR |
1618 | static int __init init_iommu_all(struct acpi_table_header *table) |
1619 | { | |
1620 | u8 *p = (u8 *)table, *end = (u8 *)table; | |
1621 | struct ivhd_header *h; | |
1622 | struct amd_iommu *iommu; | |
1623 | int ret; | |
1624 | ||
e47d402d JR |
1625 | end += table->length; |
1626 | p += IVRS_HEADER_LENGTH; | |
1627 | ||
1628 | while (p < end) { | |
1629 | h = (struct ivhd_header *)p; | |
8c7142f5 | 1630 | if (*p == amd_iommu_target_ivhd_type) { |
9c72041f | 1631 | |
ae908c22 | 1632 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
9c72041f | 1633 | "seg: %d flags: %01x info %04x\n", |
c5081cd7 | 1634 | PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), |
9c72041f JR |
1635 | PCI_FUNC(h->devid), h->cap_ptr, |
1636 | h->pci_seg, h->flags, h->info); | |
1637 | DUMP_printk(" mmio-addr: %016llx\n", | |
1638 | h->mmio_phys); | |
1639 | ||
e47d402d | 1640 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
02f3b3f5 JR |
1641 | if (iommu == NULL) |
1642 | return -ENOMEM; | |
3551a708 | 1643 | |
e47d402d | 1644 | ret = init_iommu_one(iommu, h); |
02f3b3f5 JR |
1645 | if (ret) |
1646 | return ret; | |
e47d402d JR |
1647 | } |
1648 | p += h->length; | |
1649 | ||
1650 | } | |
1651 | WARN_ON(p != end); | |
1652 | ||
1653 | return 0; | |
1654 | } | |
1655 | ||
1650dfd1 SS |
1656 | static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, |
1657 | u8 fxn, u64 *value, bool is_write); | |
30861ddc SK |
1658 | |
1659 | static void init_iommu_perf_ctr(struct amd_iommu *iommu) | |
1660 | { | |
5f226da1 | 1661 | struct pci_dev *pdev = iommu->dev; |
30861ddc SK |
1662 | u64 val = 0xabcd, val2 = 0; |
1663 | ||
1664 | if (!iommu_feature(iommu, FEATURE_PC)) | |
1665 | return; | |
1666 | ||
1667 | amd_iommu_pc_present = true; | |
1668 | ||
1669 | /* Check if the performance counters can be written to */ | |
1650dfd1 SS |
1670 | if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || |
1671 | (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || | |
30861ddc | 1672 | (val != val2)) { |
5f226da1 | 1673 | pci_err(pdev, "Unable to write to IOMMU perf counter.\n"); |
30861ddc SK |
1674 | amd_iommu_pc_present = false; |
1675 | return; | |
1676 | } | |
1677 | ||
5f226da1 | 1678 | pci_info(pdev, "IOMMU performance counters supported\n"); |
30861ddc SK |
1679 | |
1680 | val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); | |
1681 | iommu->max_banks = (u8) ((val >> 12) & 0x3f); | |
1682 | iommu->max_counters = (u8) ((val >> 7) & 0xf); | |
1683 | } | |
1684 | ||
066f2e98 AW |
1685 | static ssize_t amd_iommu_show_cap(struct device *dev, |
1686 | struct device_attribute *attr, | |
1687 | char *buf) | |
1688 | { | |
b7a42b9d | 1689 | struct amd_iommu *iommu = dev_to_amd_iommu(dev); |
066f2e98 AW |
1690 | return sprintf(buf, "%x\n", iommu->cap); |
1691 | } | |
1692 | static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); | |
1693 | ||
1694 | static ssize_t amd_iommu_show_features(struct device *dev, | |
1695 | struct device_attribute *attr, | |
1696 | char *buf) | |
1697 | { | |
b7a42b9d | 1698 | struct amd_iommu *iommu = dev_to_amd_iommu(dev); |
066f2e98 AW |
1699 | return sprintf(buf, "%llx\n", iommu->features); |
1700 | } | |
1701 | static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); | |
1702 | ||
1703 | static struct attribute *amd_iommu_attrs[] = { | |
1704 | &dev_attr_cap.attr, | |
1705 | &dev_attr_features.attr, | |
1706 | NULL, | |
1707 | }; | |
1708 | ||
1709 | static struct attribute_group amd_iommu_group = { | |
1710 | .name = "amd-iommu", | |
1711 | .attrs = amd_iommu_attrs, | |
1712 | }; | |
1713 | ||
1714 | static const struct attribute_group *amd_iommu_groups[] = { | |
1715 | &amd_iommu_group, | |
1716 | NULL, | |
1717 | }; | |
30861ddc | 1718 | |
24d2c521 | 1719 | static int __init iommu_init_pci(struct amd_iommu *iommu) |
23c742db JR |
1720 | { |
1721 | int cap_ptr = iommu->cap_ptr; | |
1722 | u32 range, misc, low, high; | |
8bda0cfb | 1723 | int ret; |
23c742db | 1724 | |
d5bf0f4f SK |
1725 | iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), |
1726 | iommu->devid & 0xff); | |
23c742db JR |
1727 | if (!iommu->dev) |
1728 | return -ENODEV; | |
1729 | ||
cbbc00be JL |
1730 | /* Prevent binding other PCI device drivers to IOMMU devices */ |
1731 | iommu->dev->match_driver = false; | |
1732 | ||
23c742db JR |
1733 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
1734 | &iommu->cap); | |
1735 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, | |
1736 | &range); | |
1737 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, | |
1738 | &misc); | |
1739 | ||
23c742db JR |
1740 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) |
1741 | amd_iommu_iotlb_sup = false; | |
1742 | ||
1743 | /* read extended feature bits */ | |
1744 | low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); | |
1745 | high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); | |
1746 | ||
1747 | iommu->features = ((u64)high << 32) | low; | |
1748 | ||
1749 | if (iommu_feature(iommu, FEATURE_GT)) { | |
1750 | int glxval; | |
a919a018 SS |
1751 | u32 max_pasid; |
1752 | u64 pasmax; | |
23c742db | 1753 | |
a919a018 SS |
1754 | pasmax = iommu->features & FEATURE_PASID_MASK; |
1755 | pasmax >>= FEATURE_PASID_SHIFT; | |
1756 | max_pasid = (1 << (pasmax + 1)) - 1; | |
23c742db | 1757 | |
a919a018 SS |
1758 | amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); |
1759 | ||
1760 | BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); | |
23c742db JR |
1761 | |
1762 | glxval = iommu->features & FEATURE_GLXVAL_MASK; | |
1763 | glxval >>= FEATURE_GLXVAL_SHIFT; | |
1764 | ||
1765 | if (amd_iommu_max_glx_val == -1) | |
1766 | amd_iommu_max_glx_val = glxval; | |
1767 | else | |
1768 | amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); | |
1769 | } | |
1770 | ||
1771 | if (iommu_feature(iommu, FEATURE_GT) && | |
1772 | iommu_feature(iommu, FEATURE_PPR)) { | |
1773 | iommu->is_iommu_v2 = true; | |
1774 | amd_iommu_v2_present = true; | |
1775 | } | |
1776 | ||
f2c2db53 JR |
1777 | if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) |
1778 | return -ENOMEM; | |
23c742db | 1779 | |
8bda0cfb SS |
1780 | ret = iommu_init_ga(iommu); |
1781 | if (ret) | |
1782 | return ret; | |
3928aa3f | 1783 | |
23c742db JR |
1784 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) |
1785 | amd_iommu_np_cache = true; | |
1786 | ||
30861ddc SK |
1787 | init_iommu_perf_ctr(iommu); |
1788 | ||
23c742db JR |
1789 | if (is_rd890_iommu(iommu->dev)) { |
1790 | int i, j; | |
1791 | ||
d5bf0f4f SK |
1792 | iommu->root_pdev = |
1793 | pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, | |
1794 | PCI_DEVFN(0, 0)); | |
23c742db JR |
1795 | |
1796 | /* | |
1797 | * Some rd890 systems may not be fully reconfigured by the | |
1798 | * BIOS, so it's necessary for us to store this information so | |
1799 | * it can be reprogrammed on resume | |
1800 | */ | |
1801 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, | |
1802 | &iommu->stored_addr_lo); | |
1803 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, | |
1804 | &iommu->stored_addr_hi); | |
1805 | ||
1806 | /* Low bit locks writes to configuration space */ | |
1807 | iommu->stored_addr_lo &= ~1; | |
1808 | ||
1809 | for (i = 0; i < 6; i++) | |
1810 | for (j = 0; j < 0x12; j++) | |
1811 | iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); | |
1812 | ||
1813 | for (i = 0; i < 0x83; i++) | |
1814 | iommu->stored_l2[i] = iommu_read_l2(iommu, i); | |
1815 | } | |
1816 | ||
318fe782 | 1817 | amd_iommu_erratum_746_workaround(iommu); |
358875fd | 1818 | amd_iommu_ats_write_check_workaround(iommu); |
318fe782 | 1819 | |
39ab9555 JR |
1820 | iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, |
1821 | amd_iommu_groups, "ivhd%d", iommu->index); | |
b0119e87 JR |
1822 | iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); |
1823 | iommu_device_register(&iommu->iommu); | |
066f2e98 | 1824 | |
23c742db JR |
1825 | return pci_enable_device(iommu->dev); |
1826 | } | |
1827 | ||
4d121c32 JR |
1828 | static void print_iommu_info(void) |
1829 | { | |
1830 | static const char * const feat_str[] = { | |
1831 | "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", | |
1832 | "IA", "GA", "HE", "PC" | |
1833 | }; | |
1834 | struct amd_iommu *iommu; | |
1835 | ||
1836 | for_each_iommu(iommu) { | |
5f226da1 | 1837 | struct pci_dev *pdev = iommu->dev; |
4d121c32 JR |
1838 | int i; |
1839 | ||
5f226da1 | 1840 | pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); |
4d121c32 JR |
1841 | |
1842 | if (iommu->cap & (1 << IOMMU_CAP_EFR)) { | |
5f226da1 BH |
1843 | pci_info(pdev, "Extended features (%#llx):\n", |
1844 | iommu->features); | |
2bd5ed00 | 1845 | for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { |
4d121c32 JR |
1846 | if (iommu_feature(iommu, (1ULL << i))) |
1847 | pr_cont(" %s", feat_str[i]); | |
1848 | } | |
3928aa3f SS |
1849 | |
1850 | if (iommu->features & FEATURE_GAM_VAPIC) | |
1851 | pr_cont(" GA_vAPIC"); | |
1852 | ||
30861ddc | 1853 | pr_cont("\n"); |
500c25ed | 1854 | } |
4d121c32 | 1855 | } |
3928aa3f | 1856 | if (irq_remapping_enabled) { |
101fa037 | 1857 | pr_info("Interrupt remapping enabled\n"); |
3928aa3f | 1858 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) |
101fa037 | 1859 | pr_info("Virtual APIC enabled\n"); |
90fcffd9 | 1860 | if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) |
101fa037 | 1861 | pr_info("X2APIC enabled\n"); |
3928aa3f | 1862 | } |
4d121c32 JR |
1863 | } |
1864 | ||
2c0ae172 | 1865 | static int __init amd_iommu_init_pci(void) |
23c742db JR |
1866 | { |
1867 | struct amd_iommu *iommu; | |
1868 | int ret = 0; | |
1869 | ||
1870 | for_each_iommu(iommu) { | |
1871 | ret = iommu_init_pci(iommu); | |
1872 | if (ret) | |
1873 | break; | |
1874 | } | |
1875 | ||
522e5cb7 JR |
1876 | /* |
1877 | * Order is important here to make sure any unity map requirements are | |
1878 | * fulfilled. The unity mappings are created and written to the device | |
1879 | * table during the amd_iommu_init_api() call. | |
1880 | * | |
1881 | * After that we call init_device_table_dma() to make sure any | |
1882 | * uninitialized DTE will block DMA, and in the end we flush the caches | |
1883 | * of all IOMMUs to make sure the changes to the device table are | |
1884 | * active. | |
1885 | */ | |
1886 | ret = amd_iommu_init_api(); | |
1887 | ||
aafd8ba0 JR |
1888 | init_device_table_dma(); |
1889 | ||
1890 | for_each_iommu(iommu) | |
1891 | iommu_flush_all_caches(iommu); | |
1892 | ||
3a18404c JR |
1893 | if (!ret) |
1894 | print_iommu_info(); | |
4d121c32 | 1895 | |
23c742db JR |
1896 | return ret; |
1897 | } | |
1898 | ||
a80dc3e0 JR |
1899 | /**************************************************************************** |
1900 | * | |
1901 | * The following functions initialize the MSI interrupts for all IOMMUs | |
df805abb | 1902 | * in the system. It's a bit challenging because there could be multiple |
a80dc3e0 JR |
1903 | * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per |
1904 | * pci_dev. | |
1905 | * | |
1906 | ****************************************************************************/ | |
1907 | ||
9f800de3 | 1908 | static int iommu_setup_msi(struct amd_iommu *iommu) |
a80dc3e0 JR |
1909 | { |
1910 | int r; | |
a80dc3e0 | 1911 | |
9ddd592a JR |
1912 | r = pci_enable_msi(iommu->dev); |
1913 | if (r) | |
1914 | return r; | |
a80dc3e0 | 1915 | |
72fe00f0 JR |
1916 | r = request_threaded_irq(iommu->dev->irq, |
1917 | amd_iommu_int_handler, | |
1918 | amd_iommu_int_thread, | |
1919 | 0, "AMD-Vi", | |
3f398bc7 | 1920 | iommu); |
a80dc3e0 JR |
1921 | |
1922 | if (r) { | |
1923 | pci_disable_msi(iommu->dev); | |
9ddd592a | 1924 | return r; |
a80dc3e0 JR |
1925 | } |
1926 | ||
fab6afa3 | 1927 | iommu->int_enabled = true; |
1a29ac01 | 1928 | |
a80dc3e0 JR |
1929 | return 0; |
1930 | } | |
1931 | ||
05f92db9 | 1932 | static int iommu_init_msi(struct amd_iommu *iommu) |
a80dc3e0 | 1933 | { |
9ddd592a JR |
1934 | int ret; |
1935 | ||
a80dc3e0 | 1936 | if (iommu->int_enabled) |
9ddd592a | 1937 | goto enable_faults; |
a80dc3e0 | 1938 | |
82fcfc67 | 1939 | if (iommu->dev->msi_cap) |
9ddd592a JR |
1940 | ret = iommu_setup_msi(iommu); |
1941 | else | |
1942 | ret = -ENODEV; | |
1943 | ||
1944 | if (ret) | |
1945 | return ret; | |
a80dc3e0 | 1946 | |
9ddd592a JR |
1947 | enable_faults: |
1948 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); | |
a80dc3e0 | 1949 | |
9ddd592a JR |
1950 | if (iommu->ppr_log != NULL) |
1951 | iommu_feature_enable(iommu, CONTROL_PPFINT_EN); | |
1952 | ||
8bda0cfb SS |
1953 | iommu_ga_log_enable(iommu); |
1954 | ||
9ddd592a | 1955 | return 0; |
a80dc3e0 JR |
1956 | } |
1957 | ||
b65233a9 JR |
1958 | /**************************************************************************** |
1959 | * | |
1960 | * The next functions belong to the third pass of parsing the ACPI | |
1961 | * table. In this last pass the memory mapping requirements are | |
df805abb | 1962 | * gathered (like exclusion and unity mapping ranges). |
b65233a9 JR |
1963 | * |
1964 | ****************************************************************************/ | |
1965 | ||
be2a022c JR |
1966 | static void __init free_unity_maps(void) |
1967 | { | |
1968 | struct unity_map_entry *entry, *next; | |
1969 | ||
1970 | list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { | |
1971 | list_del(&entry->list); | |
1972 | kfree(entry); | |
1973 | } | |
1974 | } | |
1975 | ||
b65233a9 | 1976 | /* called when we find an exclusion range definition in ACPI */ |
be2a022c JR |
1977 | static int __init init_exclusion_range(struct ivmd_header *m) |
1978 | { | |
1979 | int i; | |
1980 | ||
1981 | switch (m->type) { | |
1982 | case ACPI_IVMD_TYPE: | |
1983 | set_device_exclusion_range(m->devid, m); | |
1984 | break; | |
1985 | case ACPI_IVMD_TYPE_ALL: | |
3a61ec38 | 1986 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
be2a022c JR |
1987 | set_device_exclusion_range(i, m); |
1988 | break; | |
1989 | case ACPI_IVMD_TYPE_RANGE: | |
1990 | for (i = m->devid; i <= m->aux; ++i) | |
1991 | set_device_exclusion_range(i, m); | |
1992 | break; | |
1993 | default: | |
1994 | break; | |
1995 | } | |
1996 | ||
1997 | return 0; | |
1998 | } | |
1999 | ||
b65233a9 | 2000 | /* called for unity map ACPI definition */ |
be2a022c JR |
2001 | static int __init init_unity_map_range(struct ivmd_header *m) |
2002 | { | |
98f1ad25 | 2003 | struct unity_map_entry *e = NULL; |
02acc43a | 2004 | char *s; |
be2a022c JR |
2005 | |
2006 | e = kzalloc(sizeof(*e), GFP_KERNEL); | |
2007 | if (e == NULL) | |
2008 | return -ENOMEM; | |
2009 | ||
8aafaaf2 JR |
2010 | if (m->flags & IVMD_FLAG_EXCL_RANGE) |
2011 | init_exclusion_range(m); | |
2012 | ||
be2a022c JR |
2013 | switch (m->type) { |
2014 | default: | |
0bc252f4 JR |
2015 | kfree(e); |
2016 | return 0; | |
be2a022c | 2017 | case ACPI_IVMD_TYPE: |
02acc43a | 2018 | s = "IVMD_TYPEi\t\t\t"; |
be2a022c JR |
2019 | e->devid_start = e->devid_end = m->devid; |
2020 | break; | |
2021 | case ACPI_IVMD_TYPE_ALL: | |
02acc43a | 2022 | s = "IVMD_TYPE_ALL\t\t"; |
be2a022c JR |
2023 | e->devid_start = 0; |
2024 | e->devid_end = amd_iommu_last_bdf; | |
2025 | break; | |
2026 | case ACPI_IVMD_TYPE_RANGE: | |
02acc43a | 2027 | s = "IVMD_TYPE_RANGE\t\t"; |
be2a022c JR |
2028 | e->devid_start = m->devid; |
2029 | e->devid_end = m->aux; | |
2030 | break; | |
2031 | } | |
2032 | e->address_start = PAGE_ALIGN(m->range_start); | |
2033 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); | |
2034 | e->prot = m->flags >> 1; | |
2035 | ||
02acc43a JR |
2036 | DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" |
2037 | " range_start: %016llx range_end: %016llx flags: %x\n", s, | |
c5081cd7 SK |
2038 | PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), |
2039 | PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), | |
02acc43a JR |
2040 | PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), |
2041 | e->address_start, e->address_end, m->flags); | |
2042 | ||
be2a022c JR |
2043 | list_add_tail(&e->list, &amd_iommu_unity_map); |
2044 | ||
2045 | return 0; | |
2046 | } | |
2047 | ||
b65233a9 | 2048 | /* iterates over all memory definitions we find in the ACPI table */ |
be2a022c JR |
2049 | static int __init init_memory_definitions(struct acpi_table_header *table) |
2050 | { | |
2051 | u8 *p = (u8 *)table, *end = (u8 *)table; | |
2052 | struct ivmd_header *m; | |
2053 | ||
be2a022c JR |
2054 | end += table->length; |
2055 | p += IVRS_HEADER_LENGTH; | |
2056 | ||
2057 | while (p < end) { | |
2058 | m = (struct ivmd_header *)p; | |
8aafaaf2 | 2059 | if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) |
be2a022c JR |
2060 | init_unity_map_range(m); |
2061 | ||
2062 | p += m->length; | |
2063 | } | |
2064 | ||
2065 | return 0; | |
2066 | } | |
2067 | ||
9f5f5fb3 | 2068 | /* |
3ac3e5ee | 2069 | * Init the device table to not allow DMA access for devices |
9f5f5fb3 | 2070 | */ |
33f28c59 | 2071 | static void init_device_table_dma(void) |
9f5f5fb3 | 2072 | { |
0de66d5b | 2073 | u32 devid; |
9f5f5fb3 JR |
2074 | |
2075 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | |
2076 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); | |
2077 | set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); | |
9f5f5fb3 JR |
2078 | } |
2079 | } | |
2080 | ||
d04e0ba3 JR |
2081 | static void __init uninit_device_table_dma(void) |
2082 | { | |
2083 | u32 devid; | |
2084 | ||
2085 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | |
2086 | amd_iommu_dev_table[devid].data[0] = 0ULL; | |
2087 | amd_iommu_dev_table[devid].data[1] = 0ULL; | |
2088 | } | |
2089 | } | |
2090 | ||
33f28c59 JR |
2091 | static void init_device_table(void) |
2092 | { | |
2093 | u32 devid; | |
2094 | ||
2095 | if (!amd_iommu_irq_remap) | |
2096 | return; | |
2097 | ||
2098 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) | |
2099 | set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); | |
2100 | } | |
2101 | ||
e9bf5197 JR |
2102 | static void iommu_init_flags(struct amd_iommu *iommu) |
2103 | { | |
2104 | iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? | |
2105 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | |
2106 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | |
2107 | ||
2108 | iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? | |
2109 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | |
2110 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | |
2111 | ||
2112 | iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? | |
2113 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | |
2114 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | |
2115 | ||
2116 | iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? | |
2117 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | |
2118 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | |
2119 | ||
2120 | /* | |
2121 | * make IOMMU memory accesses cache coherent | |
2122 | */ | |
2123 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | |
1456e9d2 JR |
2124 | |
2125 | /* Set IOTLB invalidation timeout to 1s */ | |
2126 | iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); | |
e9bf5197 JR |
2127 | } |
2128 | ||
5bcd757f | 2129 | static void iommu_apply_resume_quirks(struct amd_iommu *iommu) |
4c894f47 | 2130 | { |
5bcd757f MG |
2131 | int i, j; |
2132 | u32 ioc_feature_control; | |
c1bf94ec | 2133 | struct pci_dev *pdev = iommu->root_pdev; |
5bcd757f MG |
2134 | |
2135 | /* RD890 BIOSes may not have completely reconfigured the iommu */ | |
c1bf94ec | 2136 | if (!is_rd890_iommu(iommu->dev) || !pdev) |
5bcd757f MG |
2137 | return; |
2138 | ||
2139 | /* | |
2140 | * First, we need to ensure that the iommu is enabled. This is | |
2141 | * controlled by a register in the northbridge | |
2142 | */ | |
5bcd757f MG |
2143 | |
2144 | /* Select Northbridge indirect register 0x75 and enable writing */ | |
2145 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); | |
2146 | pci_read_config_dword(pdev, 0x64, &ioc_feature_control); | |
2147 | ||
2148 | /* Enable the iommu */ | |
2149 | if (!(ioc_feature_control & 0x1)) | |
2150 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); | |
2151 | ||
5bcd757f MG |
2152 | /* Restore the iommu BAR */ |
2153 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | |
2154 | iommu->stored_addr_lo); | |
2155 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, | |
2156 | iommu->stored_addr_hi); | |
2157 | ||
2158 | /* Restore the l1 indirect regs for each of the 6 l1s */ | |
2159 | for (i = 0; i < 6; i++) | |
2160 | for (j = 0; j < 0x12; j++) | |
2161 | iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); | |
2162 | ||
2163 | /* Restore the l2 indirect regs */ | |
2164 | for (i = 0; i < 0x83; i++) | |
2165 | iommu_write_l2(iommu, i, iommu->stored_l2[i]); | |
2166 | ||
2167 | /* Lock PCI setup registers */ | |
2168 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | |
2169 | iommu->stored_addr_lo | 1); | |
4c894f47 JR |
2170 | } |
2171 | ||
3928aa3f SS |
2172 | static void iommu_enable_ga(struct amd_iommu *iommu) |
2173 | { | |
2174 | #ifdef CONFIG_IRQ_REMAP | |
2175 | switch (amd_iommu_guest_ir) { | |
2176 | case AMD_IOMMU_GUEST_IR_VAPIC: | |
2177 | iommu_feature_enable(iommu, CONTROL_GAM_EN); | |
2178 | /* Fall through */ | |
2179 | case AMD_IOMMU_GUEST_IR_LEGACY_GA: | |
2180 | iommu_feature_enable(iommu, CONTROL_GA_EN); | |
77bdab46 | 2181 | iommu->irte_ops = &irte_128_ops; |
3928aa3f SS |
2182 | break; |
2183 | default: | |
77bdab46 | 2184 | iommu->irte_ops = &irte_32_ops; |
3928aa3f SS |
2185 | break; |
2186 | } | |
2187 | #endif | |
2188 | } | |
2189 | ||
78d313c6 BH |
2190 | static void early_enable_iommu(struct amd_iommu *iommu) |
2191 | { | |
2192 | iommu_disable(iommu); | |
2193 | iommu_init_flags(iommu); | |
2194 | iommu_set_device_table(iommu); | |
2195 | iommu_enable_command_buffer(iommu); | |
2196 | iommu_enable_event_buffer(iommu); | |
2197 | iommu_set_exclusion_range(iommu); | |
2198 | iommu_enable_ga(iommu); | |
90fcffd9 | 2199 | iommu_enable_xt(iommu); |
78d313c6 BH |
2200 | iommu_enable(iommu); |
2201 | iommu_flush_all_caches(iommu); | |
2202 | } | |
2203 | ||
b65233a9 JR |
2204 | /* |
2205 | * This function finally enables all IOMMUs found in the system after | |
3ac3e5ee BH |
2206 | * they have been initialized. |
2207 | * | |
2208 | * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy | |
2209 | * the old content of device table entries. Not this case or copy failed, | |
2210 | * just continue as normal kernel does. | |
b65233a9 | 2211 | */ |
11ee5ac4 | 2212 | static void early_enable_iommus(void) |
8736197b JR |
2213 | { |
2214 | struct amd_iommu *iommu; | |
2215 | ||
3ac3e5ee BH |
2216 | |
2217 | if (!copy_device_table()) { | |
2218 | /* | |
2219 | * If come here because of failure in copying device table from old | |
2220 | * kernel with all IOMMUs enabled, print error message and try to | |
2221 | * free allocated old_dev_tbl_cpy. | |
2222 | */ | |
2223 | if (amd_iommu_pre_enabled) | |
2224 | pr_err("Failed to copy DEV table from previous kernel.\n"); | |
2225 | if (old_dev_tbl_cpy != NULL) | |
2226 | free_pages((unsigned long)old_dev_tbl_cpy, | |
2227 | get_order(dev_table_size)); | |
2228 | ||
2229 | for_each_iommu(iommu) { | |
2230 | clear_translation_pre_enabled(iommu); | |
2231 | early_enable_iommu(iommu); | |
2232 | } | |
2233 | } else { | |
2234 | pr_info("Copied DEV table from previous kernel.\n"); | |
2235 | free_pages((unsigned long)amd_iommu_dev_table, | |
2236 | get_order(dev_table_size)); | |
2237 | amd_iommu_dev_table = old_dev_tbl_cpy; | |
2238 | for_each_iommu(iommu) { | |
2239 | iommu_disable_command_buffer(iommu); | |
2240 | iommu_disable_event_buffer(iommu); | |
2241 | iommu_enable_command_buffer(iommu); | |
2242 | iommu_enable_event_buffer(iommu); | |
2243 | iommu_enable_ga(iommu); | |
90fcffd9 | 2244 | iommu_enable_xt(iommu); |
3ac3e5ee BH |
2245 | iommu_set_device_table(iommu); |
2246 | iommu_flush_all_caches(iommu); | |
2247 | } | |
8736197b | 2248 | } |
d98de49a SS |
2249 | |
2250 | #ifdef CONFIG_IRQ_REMAP | |
2251 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) | |
2252 | amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); | |
2253 | #endif | |
8736197b JR |
2254 | } |
2255 | ||
11ee5ac4 JR |
2256 | static void enable_iommus_v2(void) |
2257 | { | |
2258 | struct amd_iommu *iommu; | |
2259 | ||
2260 | for_each_iommu(iommu) { | |
2261 | iommu_enable_ppr_log(iommu); | |
2262 | iommu_enable_gt(iommu); | |
2263 | } | |
2264 | } | |
2265 | ||
2266 | static void enable_iommus(void) | |
2267 | { | |
2268 | early_enable_iommus(); | |
2269 | ||
2270 | enable_iommus_v2(); | |
2271 | } | |
2272 | ||
92ac4320 JR |
2273 | static void disable_iommus(void) |
2274 | { | |
2275 | struct amd_iommu *iommu; | |
2276 | ||
2277 | for_each_iommu(iommu) | |
2278 | iommu_disable(iommu); | |
d98de49a SS |
2279 | |
2280 | #ifdef CONFIG_IRQ_REMAP | |
2281 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) | |
2282 | amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); | |
2283 | #endif | |
92ac4320 JR |
2284 | } |
2285 | ||
7441e9cb JR |
2286 | /* |
2287 | * Suspend/Resume support | |
2288 | * disable suspend until real resume implemented | |
2289 | */ | |
2290 | ||
f3c6ea1b | 2291 | static void amd_iommu_resume(void) |
7441e9cb | 2292 | { |
5bcd757f MG |
2293 | struct amd_iommu *iommu; |
2294 | ||
2295 | for_each_iommu(iommu) | |
2296 | iommu_apply_resume_quirks(iommu); | |
2297 | ||
736501ee JR |
2298 | /* re-load the hardware */ |
2299 | enable_iommus(); | |
3d9761e7 JR |
2300 | |
2301 | amd_iommu_enable_interrupts(); | |
7441e9cb JR |
2302 | } |
2303 | ||
f3c6ea1b | 2304 | static int amd_iommu_suspend(void) |
7441e9cb | 2305 | { |
736501ee JR |
2306 | /* disable IOMMUs to go out of the way for BIOS */ |
2307 | disable_iommus(); | |
2308 | ||
2309 | return 0; | |
7441e9cb JR |
2310 | } |
2311 | ||
f3c6ea1b | 2312 | static struct syscore_ops amd_iommu_syscore_ops = { |
7441e9cb JR |
2313 | .suspend = amd_iommu_suspend, |
2314 | .resume = amd_iommu_resume, | |
2315 | }; | |
2316 | ||
90b3eb03 | 2317 | static void __init free_iommu_resources(void) |
8704a1ba | 2318 | { |
ebcfa284 | 2319 | kmemleak_free(irq_lookup_table); |
0ea2c422 JR |
2320 | free_pages((unsigned long)irq_lookup_table, |
2321 | get_order(rlookup_table_size)); | |
f6019271 | 2322 | irq_lookup_table = NULL; |
8704a1ba | 2323 | |
a591989a JL |
2324 | kmem_cache_destroy(amd_iommu_irq_cache); |
2325 | amd_iommu_irq_cache = NULL; | |
8704a1ba JR |
2326 | |
2327 | free_pages((unsigned long)amd_iommu_rlookup_table, | |
2328 | get_order(rlookup_table_size)); | |
f6019271 | 2329 | amd_iommu_rlookup_table = NULL; |
8704a1ba JR |
2330 | |
2331 | free_pages((unsigned long)amd_iommu_alias_table, | |
2332 | get_order(alias_table_size)); | |
f6019271 | 2333 | amd_iommu_alias_table = NULL; |
8704a1ba JR |
2334 | |
2335 | free_pages((unsigned long)amd_iommu_dev_table, | |
2336 | get_order(dev_table_size)); | |
f6019271 | 2337 | amd_iommu_dev_table = NULL; |
8704a1ba JR |
2338 | |
2339 | free_iommu_all(); | |
2340 | ||
8704a1ba JR |
2341 | #ifdef CONFIG_GART_IOMMU |
2342 | /* | |
2343 | * We failed to initialize the AMD IOMMU - try fallback to GART | |
2344 | * if possible. | |
2345 | */ | |
2346 | gart_iommu_init(); | |
2347 | ||
2348 | #endif | |
2349 | } | |
2350 | ||
c2ff5cf5 JR |
2351 | /* SB IOAPIC is always on this device in AMD systems */ |
2352 | #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) | |
2353 | ||
eb1eb7ae JR |
2354 | static bool __init check_ioapic_information(void) |
2355 | { | |
dfbb6d47 | 2356 | const char *fw_bug = FW_BUG; |
c2ff5cf5 | 2357 | bool ret, has_sb_ioapic; |
eb1eb7ae JR |
2358 | int idx; |
2359 | ||
c2ff5cf5 JR |
2360 | has_sb_ioapic = false; |
2361 | ret = false; | |
eb1eb7ae | 2362 | |
dfbb6d47 JR |
2363 | /* |
2364 | * If we have map overrides on the kernel command line the | |
2365 | * messages in this function might not describe firmware bugs | |
2366 | * anymore - so be careful | |
2367 | */ | |
2368 | if (cmdline_maps) | |
2369 | fw_bug = ""; | |
2370 | ||
c2ff5cf5 JR |
2371 | for (idx = 0; idx < nr_ioapics; idx++) { |
2372 | int devid, id = mpc_ioapic_id(idx); | |
2373 | ||
2374 | devid = get_ioapic_devid(id); | |
2375 | if (devid < 0) { | |
101fa037 | 2376 | pr_err("%s: IOAPIC[%d] not in IVRS table\n", |
dfbb6d47 | 2377 | fw_bug, id); |
c2ff5cf5 JR |
2378 | ret = false; |
2379 | } else if (devid == IOAPIC_SB_DEVID) { | |
2380 | has_sb_ioapic = true; | |
2381 | ret = true; | |
eb1eb7ae JR |
2382 | } |
2383 | } | |
2384 | ||
c2ff5cf5 JR |
2385 | if (!has_sb_ioapic) { |
2386 | /* | |
2387 | * We expect the SB IOAPIC to be listed in the IVRS | |
2388 | * table. The system timer is connected to the SB IOAPIC | |
2389 | * and if we don't have it in the list the system will | |
2390 | * panic at boot time. This situation usually happens | |
2391 | * when the BIOS is buggy and provides us the wrong | |
2392 | * device id for the IOAPIC in the system. | |
2393 | */ | |
101fa037 | 2394 | pr_err("%s: No southbridge IOAPIC found\n", fw_bug); |
c2ff5cf5 JR |
2395 | } |
2396 | ||
2397 | if (!ret) | |
101fa037 | 2398 | pr_err("Disabling interrupt remapping\n"); |
c2ff5cf5 JR |
2399 | |
2400 | return ret; | |
eb1eb7ae JR |
2401 | } |
2402 | ||
d04e0ba3 JR |
2403 | static void __init free_dma_resources(void) |
2404 | { | |
d04e0ba3 JR |
2405 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
2406 | get_order(MAX_DOMAIN_ID/8)); | |
f6019271 | 2407 | amd_iommu_pd_alloc_bitmap = NULL; |
d04e0ba3 JR |
2408 | |
2409 | free_unity_maps(); | |
2410 | } | |
2411 | ||
b65233a9 | 2412 | /* |
8704a1ba JR |
2413 | * This is the hardware init function for AMD IOMMU in the system. |
2414 | * This function is called either from amd_iommu_init or from the interrupt | |
2415 | * remapping setup code. | |
b65233a9 JR |
2416 | * |
2417 | * This function basically parses the ACPI table for AMD IOMMU (IVRS) | |
8c7142f5 | 2418 | * four times: |
b65233a9 | 2419 | * |
8c7142f5 SS |
2420 | * 1 pass) Discover the most comprehensive IVHD type to use. |
2421 | * | |
2422 | * 2 pass) Find the highest PCI device id the driver has to handle. | |
b65233a9 JR |
2423 | * Upon this information the size of the data structures is |
2424 | * determined that needs to be allocated. | |
2425 | * | |
8c7142f5 | 2426 | * 3 pass) Initialize the data structures just allocated with the |
b65233a9 JR |
2427 | * information in the ACPI table about available AMD IOMMUs |
2428 | * in the system. It also maps the PCI devices in the | |
2429 | * system to specific IOMMUs | |
2430 | * | |
8c7142f5 | 2431 | * 4 pass) After the basic data structures are allocated and |
b65233a9 JR |
2432 | * initialized we update them with information about memory |
2433 | * remapping requirements parsed out of the ACPI table in | |
2434 | * this last pass. | |
2435 | * | |
8704a1ba JR |
2436 | * After everything is set up the IOMMUs are enabled and the necessary |
2437 | * hotplug and suspend notifiers are registered. | |
b65233a9 | 2438 | */ |
643511b3 | 2439 | static int __init early_amd_iommu_init(void) |
fe74c9cf | 2440 | { |
02f3b3f5 | 2441 | struct acpi_table_header *ivrs_base; |
02f3b3f5 | 2442 | acpi_status status; |
3928aa3f | 2443 | int i, remap_cache_sz, ret = 0; |
fe74c9cf | 2444 | |
643511b3 | 2445 | if (!amd_iommu_detected) |
8704a1ba JR |
2446 | return -ENODEV; |
2447 | ||
6b11d1d6 | 2448 | status = acpi_get_table("IVRS", 0, &ivrs_base); |
02f3b3f5 JR |
2449 | if (status == AE_NOT_FOUND) |
2450 | return -ENODEV; | |
2451 | else if (ACPI_FAILURE(status)) { | |
2452 | const char *err = acpi_format_exception(status); | |
101fa037 | 2453 | pr_err("IVRS table error: %s\n", err); |
02f3b3f5 JR |
2454 | return -EINVAL; |
2455 | } | |
2456 | ||
8c7142f5 SS |
2457 | /* |
2458 | * Validate checksum here so we don't need to do it when | |
2459 | * we actually parse the table | |
2460 | */ | |
2461 | ret = check_ivrs_checksum(ivrs_base); | |
2462 | if (ret) | |
99e8ccd3 | 2463 | goto out; |
8c7142f5 SS |
2464 | |
2465 | amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); | |
2466 | DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); | |
2467 | ||
fe74c9cf JR |
2468 | /* |
2469 | * First parse ACPI tables to find the largest Bus/Dev/Func | |
2470 | * we need to handle. Upon this information the shared data | |
2471 | * structures for the IOMMUs in the system will be allocated | |
2472 | */ | |
2c0ae172 JR |
2473 | ret = find_last_devid_acpi(ivrs_base); |
2474 | if (ret) | |
3551a708 JR |
2475 | goto out; |
2476 | ||
c571484e JR |
2477 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
2478 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | |
2479 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | |
fe74c9cf | 2480 | |
fe74c9cf | 2481 | /* Device table - directly used by all IOMMUs */ |
8704a1ba | 2482 | ret = -ENOMEM; |
b336781b BH |
2483 | amd_iommu_dev_table = (void *)__get_free_pages( |
2484 | GFP_KERNEL | __GFP_ZERO | GFP_DMA32, | |
fe74c9cf JR |
2485 | get_order(dev_table_size)); |
2486 | if (amd_iommu_dev_table == NULL) | |
2487 | goto out; | |
2488 | ||
2489 | /* | |
2490 | * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the | |
2491 | * IOMMU see for that device | |
2492 | */ | |
2493 | amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, | |
2494 | get_order(alias_table_size)); | |
2495 | if (amd_iommu_alias_table == NULL) | |
2c0ae172 | 2496 | goto out; |
fe74c9cf JR |
2497 | |
2498 | /* IOMMU rlookup table - find the IOMMU for a specific device */ | |
83fd5cc6 JR |
2499 | amd_iommu_rlookup_table = (void *)__get_free_pages( |
2500 | GFP_KERNEL | __GFP_ZERO, | |
fe74c9cf JR |
2501 | get_order(rlookup_table_size)); |
2502 | if (amd_iommu_rlookup_table == NULL) | |
2c0ae172 | 2503 | goto out; |
fe74c9cf | 2504 | |
5dc8bff0 JR |
2505 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
2506 | GFP_KERNEL | __GFP_ZERO, | |
fe74c9cf JR |
2507 | get_order(MAX_DOMAIN_ID/8)); |
2508 | if (amd_iommu_pd_alloc_bitmap == NULL) | |
2c0ae172 | 2509 | goto out; |
fe74c9cf JR |
2510 | |
2511 | /* | |
5dc8bff0 | 2512 | * let all alias entries point to itself |
fe74c9cf | 2513 | */ |
3a61ec38 | 2514 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
fe74c9cf JR |
2515 | amd_iommu_alias_table[i] = i; |
2516 | ||
fe74c9cf JR |
2517 | /* |
2518 | * never allocate domain 0 because its used as the non-allocated and | |
2519 | * error value placeholder | |
2520 | */ | |
5c87f62d | 2521 | __set_bit(0, amd_iommu_pd_alloc_bitmap); |
fe74c9cf JR |
2522 | |
2523 | /* | |
2524 | * now the data structures are allocated and basically initialized | |
2525 | * start the real acpi table scan | |
2526 | */ | |
02f3b3f5 JR |
2527 | ret = init_iommu_all(ivrs_base); |
2528 | if (ret) | |
2c0ae172 | 2529 | goto out; |
fe74c9cf | 2530 | |
11123741 | 2531 | /* Disable any previously enabled IOMMUs */ |
20b46dff BH |
2532 | if (!is_kdump_kernel() || amd_iommu_disabled) |
2533 | disable_iommus(); | |
11123741 | 2534 | |
eb1eb7ae JR |
2535 | if (amd_iommu_irq_remap) |
2536 | amd_iommu_irq_remap = check_ioapic_information(); | |
2537 | ||
05152a04 JR |
2538 | if (amd_iommu_irq_remap) { |
2539 | /* | |
2540 | * Interrupt remapping enabled, create kmem_cache for the | |
2541 | * remapping tables. | |
2542 | */ | |
83ed9c13 | 2543 | ret = -ENOMEM; |
3928aa3f SS |
2544 | if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) |
2545 | remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); | |
2546 | else | |
2547 | remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); | |
05152a04 | 2548 | amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", |
3928aa3f SS |
2549 | remap_cache_sz, |
2550 | IRQ_TABLE_ALIGNMENT, | |
2551 | 0, NULL); | |
05152a04 JR |
2552 | if (!amd_iommu_irq_cache) |
2553 | goto out; | |
0ea2c422 JR |
2554 | |
2555 | irq_lookup_table = (void *)__get_free_pages( | |
2556 | GFP_KERNEL | __GFP_ZERO, | |
2557 | get_order(rlookup_table_size)); | |
ebcfa284 LS |
2558 | kmemleak_alloc(irq_lookup_table, rlookup_table_size, |
2559 | 1, GFP_KERNEL); | |
0ea2c422 JR |
2560 | if (!irq_lookup_table) |
2561 | goto out; | |
05152a04 JR |
2562 | } |
2563 | ||
02f3b3f5 JR |
2564 | ret = init_memory_definitions(ivrs_base); |
2565 | if (ret) | |
2c0ae172 | 2566 | goto out; |
3551a708 | 2567 | |
eb1eb7ae JR |
2568 | /* init the device table */ |
2569 | init_device_table(); | |
2570 | ||
8704a1ba | 2571 | out: |
02f3b3f5 | 2572 | /* Don't leak any ACPI memory */ |
6b11d1d6 | 2573 | acpi_put_table(ivrs_base); |
02f3b3f5 JR |
2574 | ivrs_base = NULL; |
2575 | ||
643511b3 JR |
2576 | return ret; |
2577 | } | |
2578 | ||
ae295142 | 2579 | static int amd_iommu_enable_interrupts(void) |
3d9761e7 JR |
2580 | { |
2581 | struct amd_iommu *iommu; | |
2582 | int ret = 0; | |
2583 | ||
2584 | for_each_iommu(iommu) { | |
2585 | ret = iommu_init_msi(iommu); | |
2586 | if (ret) | |
2587 | goto out; | |
2588 | } | |
2589 | ||
2590 | out: | |
2591 | return ret; | |
2592 | } | |
2593 | ||
02f3b3f5 JR |
2594 | static bool detect_ivrs(void) |
2595 | { | |
2596 | struct acpi_table_header *ivrs_base; | |
02f3b3f5 JR |
2597 | acpi_status status; |
2598 | ||
6b11d1d6 | 2599 | status = acpi_get_table("IVRS", 0, &ivrs_base); |
02f3b3f5 JR |
2600 | if (status == AE_NOT_FOUND) |
2601 | return false; | |
2602 | else if (ACPI_FAILURE(status)) { | |
2603 | const char *err = acpi_format_exception(status); | |
101fa037 | 2604 | pr_err("IVRS table error: %s\n", err); |
02f3b3f5 JR |
2605 | return false; |
2606 | } | |
2607 | ||
6b11d1d6 | 2608 | acpi_put_table(ivrs_base); |
02f3b3f5 | 2609 | |
1adb7d31 JR |
2610 | /* Make sure ACS will be enabled during PCI probe */ |
2611 | pci_request_acs(); | |
2612 | ||
02f3b3f5 JR |
2613 | return true; |
2614 | } | |
2615 | ||
2c0ae172 | 2616 | /**************************************************************************** |
8704a1ba | 2617 | * |
2c0ae172 JR |
2618 | * AMD IOMMU Initialization State Machine |
2619 | * | |
2620 | ****************************************************************************/ | |
2621 | ||
2622 | static int __init state_next(void) | |
8704a1ba JR |
2623 | { |
2624 | int ret = 0; | |
2625 | ||
2c0ae172 JR |
2626 | switch (init_state) { |
2627 | case IOMMU_START_STATE: | |
2628 | if (!detect_ivrs()) { | |
2629 | init_state = IOMMU_NOT_FOUND; | |
2630 | ret = -ENODEV; | |
2631 | } else { | |
2632 | init_state = IOMMU_IVRS_DETECTED; | |
2633 | } | |
2634 | break; | |
2635 | case IOMMU_IVRS_DETECTED: | |
2636 | ret = early_amd_iommu_init(); | |
2637 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; | |
7ad820e4 | 2638 | if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { |
101fa037 | 2639 | pr_info("AMD IOMMU disabled on kernel command-line\n"); |
7ad820e4 JR |
2640 | free_dma_resources(); |
2641 | free_iommu_resources(); | |
2642 | init_state = IOMMU_CMDLINE_DISABLED; | |
2643 | ret = -EINVAL; | |
2644 | } | |
2c0ae172 JR |
2645 | break; |
2646 | case IOMMU_ACPI_FINISHED: | |
2647 | early_enable_iommus(); | |
2c0ae172 JR |
2648 | x86_platform.iommu_shutdown = disable_iommus; |
2649 | init_state = IOMMU_ENABLED; | |
2650 | break; | |
2651 | case IOMMU_ENABLED: | |
74ddda71 | 2652 | register_syscore_ops(&amd_iommu_syscore_ops); |
2c0ae172 JR |
2653 | ret = amd_iommu_init_pci(); |
2654 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; | |
2655 | enable_iommus_v2(); | |
2656 | break; | |
2657 | case IOMMU_PCI_INIT: | |
2658 | ret = amd_iommu_enable_interrupts(); | |
2659 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; | |
2660 | break; | |
2661 | case IOMMU_INTERRUPTS_EN: | |
1e6a7b04 | 2662 | ret = amd_iommu_init_dma_ops(); |
2c0ae172 JR |
2663 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; |
2664 | break; | |
2665 | case IOMMU_DMA_OPS: | |
2666 | init_state = IOMMU_INITIALIZED; | |
2667 | break; | |
2668 | case IOMMU_INITIALIZED: | |
2669 | /* Nothing to do */ | |
2670 | break; | |
2671 | case IOMMU_NOT_FOUND: | |
2672 | case IOMMU_INIT_ERROR: | |
1b1e942e | 2673 | case IOMMU_CMDLINE_DISABLED: |
2c0ae172 JR |
2674 | /* Error states => do nothing */ |
2675 | ret = -EINVAL; | |
2676 | break; | |
2677 | default: | |
2678 | /* Unknown state */ | |
2679 | BUG(); | |
2680 | } | |
3d9761e7 | 2681 | |
2c0ae172 JR |
2682 | return ret; |
2683 | } | |
7441e9cb | 2684 | |
2c0ae172 JR |
2685 | static int __init iommu_go_to_state(enum iommu_init_state state) |
2686 | { | |
151b0903 | 2687 | int ret = -EINVAL; |
f5325094 | 2688 | |
2c0ae172 | 2689 | while (init_state != state) { |
1b1e942e JR |
2690 | if (init_state == IOMMU_NOT_FOUND || |
2691 | init_state == IOMMU_INIT_ERROR || | |
2692 | init_state == IOMMU_CMDLINE_DISABLED) | |
2c0ae172 | 2693 | break; |
151b0903 | 2694 | ret = state_next(); |
2c0ae172 | 2695 | } |
f2f12b6f | 2696 | |
fe74c9cf | 2697 | return ret; |
2c0ae172 | 2698 | } |
fe74c9cf | 2699 | |
6b474b82 JR |
2700 | #ifdef CONFIG_IRQ_REMAP |
2701 | int __init amd_iommu_prepare(void) | |
2702 | { | |
3f4cb7c0 TG |
2703 | int ret; |
2704 | ||
7fa1c842 | 2705 | amd_iommu_irq_remap = true; |
84d07793 | 2706 | |
3f4cb7c0 TG |
2707 | ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); |
2708 | if (ret) | |
2709 | return ret; | |
2710 | return amd_iommu_irq_remap ? 0 : -ENODEV; | |
6b474b82 | 2711 | } |
d7f07769 | 2712 | |
6b474b82 JR |
2713 | int __init amd_iommu_enable(void) |
2714 | { | |
2715 | int ret; | |
2716 | ||
2717 | ret = iommu_go_to_state(IOMMU_ENABLED); | |
2718 | if (ret) | |
2719 | return ret; | |
d7f07769 | 2720 | |
6b474b82 | 2721 | irq_remapping_enabled = 1; |
90fcffd9 | 2722 | return amd_iommu_xt_mode; |
6b474b82 JR |
2723 | } |
2724 | ||
2725 | void amd_iommu_disable(void) | |
2726 | { | |
2727 | amd_iommu_suspend(); | |
2728 | } | |
2729 | ||
2730 | int amd_iommu_reenable(int mode) | |
2731 | { | |
2732 | amd_iommu_resume(); | |
2733 | ||
2734 | return 0; | |
2735 | } | |
d7f07769 | 2736 | |
6b474b82 JR |
2737 | int __init amd_iommu_enable_faulting(void) |
2738 | { | |
2739 | /* We enable MSI later when PCI is initialized */ | |
2740 | return 0; | |
2741 | } | |
2742 | #endif | |
d7f07769 | 2743 | |
2c0ae172 JR |
2744 | /* |
2745 | * This is the core init function for AMD IOMMU hardware in the system. | |
2746 | * This function is called from the generic x86 DMA layer initialization | |
2747 | * code. | |
2748 | */ | |
2749 | static int __init amd_iommu_init(void) | |
2750 | { | |
7d0f5fd3 | 2751 | struct amd_iommu *iommu; |
2c0ae172 JR |
2752 | int ret; |
2753 | ||
2754 | ret = iommu_go_to_state(IOMMU_INITIALIZED); | |
2755 | if (ret) { | |
d04e0ba3 JR |
2756 | free_dma_resources(); |
2757 | if (!irq_remapping_enabled) { | |
2758 | disable_iommus(); | |
90b3eb03 | 2759 | free_iommu_resources(); |
d04e0ba3 | 2760 | } else { |
d04e0ba3 JR |
2761 | uninit_device_table_dma(); |
2762 | for_each_iommu(iommu) | |
2763 | iommu_flush_all_caches(iommu); | |
2764 | } | |
2c0ae172 JR |
2765 | } |
2766 | ||
7d0f5fd3 GH |
2767 | for_each_iommu(iommu) |
2768 | amd_iommu_debugfs_setup(iommu); | |
2769 | ||
2c0ae172 | 2770 | return ret; |
fe74c9cf JR |
2771 | } |
2772 | ||
2543a786 TL |
2773 | static bool amd_iommu_sme_check(void) |
2774 | { | |
2775 | if (!sme_active() || (boot_cpu_data.x86 != 0x17)) | |
2776 | return true; | |
2777 | ||
2778 | /* For Fam17h, a specific level of support is required */ | |
2779 | if (boot_cpu_data.microcode >= 0x08001205) | |
2780 | return true; | |
2781 | ||
2782 | if ((boot_cpu_data.microcode >= 0x08001126) && | |
2783 | (boot_cpu_data.microcode <= 0x080011ff)) | |
2784 | return true; | |
2785 | ||
101fa037 | 2786 | pr_notice("IOMMU not currently supported when SME is active\n"); |
2543a786 TL |
2787 | |
2788 | return false; | |
2789 | } | |
2790 | ||
b65233a9 JR |
2791 | /**************************************************************************** |
2792 | * | |
2793 | * Early detect code. This code runs at IOMMU detection time in the DMA | |
2794 | * layer. It just looks if there is an IVRS ACPI table to detect AMD | |
2795 | * IOMMUs | |
2796 | * | |
2797 | ****************************************************************************/ | |
480125ba | 2798 | int __init amd_iommu_detect(void) |
ae7877de | 2799 | { |
2c0ae172 | 2800 | int ret; |
02f3b3f5 | 2801 | |
75f1cdf1 | 2802 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
480125ba | 2803 | return -ENODEV; |
ae7877de | 2804 | |
2543a786 TL |
2805 | if (!amd_iommu_sme_check()) |
2806 | return -ENODEV; | |
2807 | ||
2c0ae172 JR |
2808 | ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); |
2809 | if (ret) | |
2810 | return ret; | |
11bd04f6 | 2811 | |
02f3b3f5 JR |
2812 | amd_iommu_detected = true; |
2813 | iommu_detected = 1; | |
2814 | x86_init.iommu.iommu_init = amd_iommu_init; | |
2815 | ||
4781bc42 | 2816 | return 1; |
ae7877de JR |
2817 | } |
2818 | ||
b65233a9 JR |
2819 | /**************************************************************************** |
2820 | * | |
2821 | * Parsing functions for the AMD IOMMU specific kernel command line | |
2822 | * options. | |
2823 | * | |
2824 | ****************************************************************************/ | |
2825 | ||
fefda117 JR |
2826 | static int __init parse_amd_iommu_dump(char *str) |
2827 | { | |
2828 | amd_iommu_dump = true; | |
2829 | ||
2830 | return 1; | |
2831 | } | |
2832 | ||
3928aa3f SS |
2833 | static int __init parse_amd_iommu_intr(char *str) |
2834 | { | |
2835 | for (; *str; ++str) { | |
2836 | if (strncmp(str, "legacy", 6) == 0) { | |
2837 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; | |
2838 | break; | |
2839 | } | |
2840 | if (strncmp(str, "vapic", 5) == 0) { | |
2841 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; | |
2842 | break; | |
2843 | } | |
2844 | } | |
2845 | return 1; | |
2846 | } | |
2847 | ||
918ad6c5 JR |
2848 | static int __init parse_amd_iommu_options(char *str) |
2849 | { | |
2850 | for (; *str; ++str) { | |
695b5676 | 2851 | if (strncmp(str, "fullflush", 9) == 0) |
afa9fdc2 | 2852 | amd_iommu_unmap_flush = true; |
a5235725 JR |
2853 | if (strncmp(str, "off", 3) == 0) |
2854 | amd_iommu_disabled = true; | |
5abcdba4 JR |
2855 | if (strncmp(str, "force_isolation", 15) == 0) |
2856 | amd_iommu_force_isolation = true; | |
918ad6c5 JR |
2857 | } |
2858 | ||
2859 | return 1; | |
2860 | } | |
2861 | ||
440e8998 JR |
2862 | static int __init parse_ivrs_ioapic(char *str) |
2863 | { | |
2864 | unsigned int bus, dev, fn; | |
2865 | int ret, id, i; | |
2866 | u16 devid; | |
2867 | ||
2868 | ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); | |
2869 | ||
2870 | if (ret != 4) { | |
101fa037 | 2871 | pr_err("Invalid command line: ivrs_ioapic%s\n", str); |
440e8998 JR |
2872 | return 1; |
2873 | } | |
2874 | ||
2875 | if (early_ioapic_map_size == EARLY_MAP_SIZE) { | |
101fa037 | 2876 | pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", |
440e8998 JR |
2877 | str); |
2878 | return 1; | |
2879 | } | |
2880 | ||
2881 | devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); | |
2882 | ||
dfbb6d47 | 2883 | cmdline_maps = true; |
440e8998 JR |
2884 | i = early_ioapic_map_size++; |
2885 | early_ioapic_map[i].id = id; | |
2886 | early_ioapic_map[i].devid = devid; | |
2887 | early_ioapic_map[i].cmd_line = true; | |
2888 | ||
2889 | return 1; | |
2890 | } | |
2891 | ||
2892 | static int __init parse_ivrs_hpet(char *str) | |
2893 | { | |
2894 | unsigned int bus, dev, fn; | |
2895 | int ret, id, i; | |
2896 | u16 devid; | |
2897 | ||
2898 | ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); | |
2899 | ||
2900 | if (ret != 4) { | |
101fa037 | 2901 | pr_err("Invalid command line: ivrs_hpet%s\n", str); |
440e8998 JR |
2902 | return 1; |
2903 | } | |
2904 | ||
2905 | if (early_hpet_map_size == EARLY_MAP_SIZE) { | |
101fa037 | 2906 | pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", |
440e8998 JR |
2907 | str); |
2908 | return 1; | |
2909 | } | |
2910 | ||
2911 | devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); | |
2912 | ||
dfbb6d47 | 2913 | cmdline_maps = true; |
440e8998 JR |
2914 | i = early_hpet_map_size++; |
2915 | early_hpet_map[i].id = id; | |
2916 | early_hpet_map[i].devid = devid; | |
2917 | early_hpet_map[i].cmd_line = true; | |
2918 | ||
2919 | return 1; | |
2920 | } | |
2921 | ||
ca3bf5d4 SS |
2922 | static int __init parse_ivrs_acpihid(char *str) |
2923 | { | |
2924 | u32 bus, dev, fn; | |
2925 | char *hid, *uid, *p; | |
2926 | char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; | |
2927 | int ret, i; | |
2928 | ||
2929 | ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); | |
2930 | if (ret != 4) { | |
101fa037 | 2931 | pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); |
ca3bf5d4 SS |
2932 | return 1; |
2933 | } | |
2934 | ||
2935 | p = acpiid; | |
2936 | hid = strsep(&p, ":"); | |
2937 | uid = p; | |
2938 | ||
2939 | if (!hid || !(*hid) || !uid) { | |
101fa037 | 2940 | pr_err("Invalid command line: hid or uid\n"); |
ca3bf5d4 SS |
2941 | return 1; |
2942 | } | |
2943 | ||
2944 | i = early_acpihid_map_size++; | |
2945 | memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); | |
2946 | memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); | |
2947 | early_acpihid_map[i].devid = | |
2948 | ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); | |
2949 | early_acpihid_map[i].cmd_line = true; | |
2950 | ||
2951 | return 1; | |
2952 | } | |
2953 | ||
440e8998 JR |
2954 | __setup("amd_iommu_dump", parse_amd_iommu_dump); |
2955 | __setup("amd_iommu=", parse_amd_iommu_options); | |
3928aa3f | 2956 | __setup("amd_iommu_intr=", parse_amd_iommu_intr); |
440e8998 JR |
2957 | __setup("ivrs_ioapic", parse_ivrs_ioapic); |
2958 | __setup("ivrs_hpet", parse_ivrs_hpet); | |
ca3bf5d4 | 2959 | __setup("ivrs_acpihid", parse_ivrs_acpihid); |
22e6daf4 KRW |
2960 | |
2961 | IOMMU_INIT_FINISH(amd_iommu_detect, | |
2962 | gart_iommu_hole_init, | |
98f1ad25 JR |
2963 | NULL, |
2964 | NULL); | |
400a28a0 JR |
2965 | |
2966 | bool amd_iommu_v2_supported(void) | |
2967 | { | |
2968 | return amd_iommu_v2_present; | |
2969 | } | |
2970 | EXPORT_SYMBOL(amd_iommu_v2_supported); | |
30861ddc | 2971 | |
f5863a00 SS |
2972 | struct amd_iommu *get_amd_iommu(unsigned int idx) |
2973 | { | |
2974 | unsigned int i = 0; | |
2975 | struct amd_iommu *iommu; | |
2976 | ||
2977 | for_each_iommu(iommu) | |
2978 | if (i++ == idx) | |
2979 | return iommu; | |
2980 | return NULL; | |
2981 | } | |
2982 | EXPORT_SYMBOL(get_amd_iommu); | |
2983 | ||
30861ddc SK |
2984 | /**************************************************************************** |
2985 | * | |
2986 | * IOMMU EFR Performance Counter support functionality. This code allows | |
2987 | * access to the IOMMU PC functionality. | |
2988 | * | |
2989 | ****************************************************************************/ | |
2990 | ||
f5863a00 | 2991 | u8 amd_iommu_pc_get_max_banks(unsigned int idx) |
30861ddc | 2992 | { |
f5863a00 | 2993 | struct amd_iommu *iommu = get_amd_iommu(idx); |
30861ddc | 2994 | |
30861ddc | 2995 | if (iommu) |
f5863a00 | 2996 | return iommu->max_banks; |
30861ddc | 2997 | |
f5863a00 | 2998 | return 0; |
30861ddc SK |
2999 | } |
3000 | EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); | |
3001 | ||
3002 | bool amd_iommu_pc_supported(void) | |
3003 | { | |
3004 | return amd_iommu_pc_present; | |
3005 | } | |
3006 | EXPORT_SYMBOL(amd_iommu_pc_supported); | |
3007 | ||
f5863a00 | 3008 | u8 amd_iommu_pc_get_max_counters(unsigned int idx) |
30861ddc | 3009 | { |
f5863a00 | 3010 | struct amd_iommu *iommu = get_amd_iommu(idx); |
30861ddc | 3011 | |
30861ddc | 3012 | if (iommu) |
f5863a00 | 3013 | return iommu->max_counters; |
30861ddc | 3014 | |
f5863a00 | 3015 | return 0; |
30861ddc SK |
3016 | } |
3017 | EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); | |
3018 | ||
1650dfd1 SS |
3019 | static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, |
3020 | u8 fxn, u64 *value, bool is_write) | |
30861ddc | 3021 | { |
30861ddc SK |
3022 | u32 offset; |
3023 | u32 max_offset_lim; | |
3024 | ||
1650dfd1 SS |
3025 | /* Make sure the IOMMU PC resource is available */ |
3026 | if (!amd_iommu_pc_present) | |
3027 | return -ENODEV; | |
3028 | ||
30861ddc | 3029 | /* Check for valid iommu and pc register indexing */ |
1650dfd1 | 3030 | if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) |
30861ddc SK |
3031 | return -ENODEV; |
3032 | ||
0a6d80c7 | 3033 | offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); |
30861ddc SK |
3034 | |
3035 | /* Limit the offset to the hw defined mmio region aperture */ | |
0a6d80c7 | 3036 | max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | |
30861ddc SK |
3037 | (iommu->max_counters << 8) | 0x28); |
3038 | if ((offset < MMIO_CNTR_REG_OFFSET) || | |
3039 | (offset > max_offset_lim)) | |
3040 | return -EINVAL; | |
3041 | ||
3042 | if (is_write) { | |
0a6d80c7 SS |
3043 | u64 val = *value & GENMASK_ULL(47, 0); |
3044 | ||
3045 | writel((u32)val, iommu->mmio_base + offset); | |
3046 | writel((val >> 32), iommu->mmio_base + offset + 4); | |
30861ddc SK |
3047 | } else { |
3048 | *value = readl(iommu->mmio_base + offset + 4); | |
3049 | *value <<= 32; | |
0a6d80c7 SS |
3050 | *value |= readl(iommu->mmio_base + offset); |
3051 | *value &= GENMASK_ULL(47, 0); | |
30861ddc SK |
3052 | } |
3053 | ||
3054 | return 0; | |
3055 | } | |
38e45d02 | 3056 | |
1650dfd1 | 3057 | int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) |
38e45d02 | 3058 | { |
1650dfd1 SS |
3059 | if (!iommu) |
3060 | return -EINVAL; | |
38e45d02 | 3061 | |
1650dfd1 SS |
3062 | return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); |
3063 | } | |
3064 | EXPORT_SYMBOL(amd_iommu_pc_get_reg); | |
3065 | ||
3066 | int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) | |
3067 | { | |
3068 | if (!iommu) | |
3069 | return -EINVAL; | |
38e45d02 | 3070 | |
1650dfd1 | 3071 | return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); |
38e45d02 | 3072 | } |
1650dfd1 | 3073 | EXPORT_SYMBOL(amd_iommu_pc_set_reg); |