]>
Commit | Line | Data |
---|---|---|
f6e2e6b6 | 1 | /* |
bf3118c1 | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
f6e2e6b6 JR |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
20 | #include <linux/pci.h> | |
21 | #include <linux/acpi.h> | |
22 | #include <linux/gfp.h> | |
23 | #include <linux/list.h> | |
7441e9cb | 24 | #include <linux/sysdev.h> |
a80dc3e0 JR |
25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> | |
f6e2e6b6 | 27 | #include <asm/pci-direct.h> |
6a9401a7 | 28 | #include <asm/amd_iommu_proto.h> |
f6e2e6b6 | 29 | #include <asm/amd_iommu_types.h> |
c6da992e | 30 | #include <asm/amd_iommu.h> |
46a7fa27 | 31 | #include <asm/iommu.h> |
1d9b16d1 | 32 | #include <asm/gart.h> |
ea1b0d39 | 33 | #include <asm/x86_init.h> |
f6e2e6b6 JR |
34 | |
35 | /* | |
36 | * definitions for the ACPI scanning code | |
37 | */ | |
f6e2e6b6 | 38 | #define IVRS_HEADER_LENGTH 48 |
f6e2e6b6 JR |
39 | |
40 | #define ACPI_IVHD_TYPE 0x10 | |
41 | #define ACPI_IVMD_TYPE_ALL 0x20 | |
42 | #define ACPI_IVMD_TYPE 0x21 | |
43 | #define ACPI_IVMD_TYPE_RANGE 0x22 | |
44 | ||
45 | #define IVHD_DEV_ALL 0x01 | |
46 | #define IVHD_DEV_SELECT 0x02 | |
47 | #define IVHD_DEV_SELECT_RANGE_START 0x03 | |
48 | #define IVHD_DEV_RANGE_END 0x04 | |
49 | #define IVHD_DEV_ALIAS 0x42 | |
50 | #define IVHD_DEV_ALIAS_RANGE 0x43 | |
51 | #define IVHD_DEV_EXT_SELECT 0x46 | |
52 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 | |
53 | ||
6da7342f JR |
54 | #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 |
55 | #define IVHD_FLAG_PASSPW_EN_MASK 0x02 | |
56 | #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 | |
57 | #define IVHD_FLAG_ISOC_EN_MASK 0x08 | |
f6e2e6b6 JR |
58 | |
59 | #define IVMD_FLAG_EXCL_RANGE 0x08 | |
60 | #define IVMD_FLAG_UNITY_MAP 0x01 | |
61 | ||
62 | #define ACPI_DEVFLAG_INITPASS 0x01 | |
63 | #define ACPI_DEVFLAG_EXTINT 0x02 | |
64 | #define ACPI_DEVFLAG_NMI 0x04 | |
65 | #define ACPI_DEVFLAG_SYSMGT1 0x10 | |
66 | #define ACPI_DEVFLAG_SYSMGT2 0x20 | |
67 | #define ACPI_DEVFLAG_LINT0 0x40 | |
68 | #define ACPI_DEVFLAG_LINT1 0x80 | |
69 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 | |
70 | ||
b65233a9 JR |
71 | /* |
72 | * ACPI table definitions | |
73 | * | |
74 | * These data structures are laid over the table to parse the important values | |
75 | * out of it. | |
76 | */ | |
77 | ||
78 | /* | |
79 | * structure describing one IOMMU in the ACPI table. Typically followed by one | |
80 | * or more ivhd_entrys. | |
81 | */ | |
f6e2e6b6 JR |
82 | struct ivhd_header { |
83 | u8 type; | |
84 | u8 flags; | |
85 | u16 length; | |
86 | u16 devid; | |
87 | u16 cap_ptr; | |
88 | u64 mmio_phys; | |
89 | u16 pci_seg; | |
90 | u16 info; | |
91 | u32 reserved; | |
92 | } __attribute__((packed)); | |
93 | ||
b65233a9 JR |
94 | /* |
95 | * A device entry describing which devices a specific IOMMU translates and | |
96 | * which requestor ids they use. | |
97 | */ | |
f6e2e6b6 JR |
98 | struct ivhd_entry { |
99 | u8 type; | |
100 | u16 devid; | |
101 | u8 flags; | |
102 | u32 ext; | |
103 | } __attribute__((packed)); | |
104 | ||
b65233a9 JR |
105 | /* |
106 | * An AMD IOMMU memory definition structure. It defines things like exclusion | |
107 | * ranges for devices and regions that should be unity mapped. | |
108 | */ | |
f6e2e6b6 JR |
109 | struct ivmd_header { |
110 | u8 type; | |
111 | u8 flags; | |
112 | u16 length; | |
113 | u16 devid; | |
114 | u16 aux; | |
115 | u64 resv; | |
116 | u64 range_start; | |
117 | u64 range_length; | |
118 | } __attribute__((packed)); | |
119 | ||
fefda117 JR |
120 | bool amd_iommu_dump; |
121 | ||
c1cbebee JR |
122 | static int __initdata amd_iommu_detected; |
123 | ||
b65233a9 JR |
124 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
125 | to handle */ | |
2e22847f | 126 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
b65233a9 | 127 | we find in ACPI */ |
afa9fdc2 | 128 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
928abd25 | 129 | |
2e22847f | 130 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
b65233a9 | 131 | system */ |
928abd25 | 132 | |
bb52777e JR |
133 | /* Array to assign indices to IOMMUs*/ |
134 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; | |
135 | int amd_iommus_present; | |
136 | ||
318afd41 JR |
137 | /* IOMMUs have a non-present cache? */ |
138 | bool amd_iommu_np_cache __read_mostly; | |
139 | ||
0f764806 JR |
140 | /* |
141 | * Set to true if ACPI table parsing and hardware intialization went properly | |
142 | */ | |
143 | static bool amd_iommu_initialized; | |
144 | ||
aeb26f55 JR |
145 | /* |
146 | * List of protection domains - used during resume | |
147 | */ | |
148 | LIST_HEAD(amd_iommu_pd_list); | |
149 | spinlock_t amd_iommu_pd_lock; | |
150 | ||
b65233a9 JR |
151 | /* |
152 | * Pointer to the device table which is shared by all AMD IOMMUs | |
153 | * it is indexed by the PCI device id or the HT unit id and contains | |
154 | * information about the domain the device belongs to as well as the | |
155 | * page table root pointer. | |
156 | */ | |
928abd25 | 157 | struct dev_table_entry *amd_iommu_dev_table; |
b65233a9 JR |
158 | |
159 | /* | |
160 | * The alias table is a driver specific data structure which contains the | |
161 | * mappings of the PCI device ids to the actual requestor ids on the IOMMU. | |
162 | * More than one device can share the same requestor id. | |
163 | */ | |
928abd25 | 164 | u16 *amd_iommu_alias_table; |
b65233a9 JR |
165 | |
166 | /* | |
167 | * The rlookup table is used to find the IOMMU which is responsible | |
168 | * for a specific device. It is also indexed by the PCI device id. | |
169 | */ | |
928abd25 | 170 | struct amd_iommu **amd_iommu_rlookup_table; |
b65233a9 | 171 | |
b65233a9 JR |
172 | /* |
173 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap | |
174 | * to know which ones are already in use. | |
175 | */ | |
928abd25 JR |
176 | unsigned long *amd_iommu_pd_alloc_bitmap; |
177 | ||
b65233a9 JR |
178 | static u32 dev_table_size; /* size of the device table */ |
179 | static u32 alias_table_size; /* size of the alias table */ | |
180 | static u32 rlookup_table_size; /* size if the rlookup table */ | |
3e8064ba | 181 | |
208ec8c9 JR |
182 | static inline void update_last_devid(u16 devid) |
183 | { | |
184 | if (devid > amd_iommu_last_bdf) | |
185 | amd_iommu_last_bdf = devid; | |
186 | } | |
187 | ||
c571484e JR |
188 | static inline unsigned long tbl_size(int entry_size) |
189 | { | |
190 | unsigned shift = PAGE_SHIFT + | |
421f909c | 191 | get_order(((int)amd_iommu_last_bdf + 1) * entry_size); |
c571484e JR |
192 | |
193 | return 1UL << shift; | |
194 | } | |
195 | ||
b65233a9 JR |
196 | /**************************************************************************** |
197 | * | |
198 | * AMD IOMMU MMIO register space handling functions | |
199 | * | |
200 | * These functions are used to program the IOMMU device registers in | |
201 | * MMIO space required for that driver. | |
202 | * | |
203 | ****************************************************************************/ | |
3e8064ba | 204 | |
b65233a9 JR |
205 | /* |
206 | * This function set the exclusion range in the IOMMU. DMA accesses to the | |
207 | * exclusion range are passed through untranslated | |
208 | */ | |
05f92db9 | 209 | static void iommu_set_exclusion_range(struct amd_iommu *iommu) |
b2026aa2 JR |
210 | { |
211 | u64 start = iommu->exclusion_start & PAGE_MASK; | |
212 | u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; | |
213 | u64 entry; | |
214 | ||
215 | if (!iommu->exclusion_start) | |
216 | return; | |
217 | ||
218 | entry = start | MMIO_EXCL_ENABLE_MASK; | |
219 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, | |
220 | &entry, sizeof(entry)); | |
221 | ||
222 | entry = limit; | |
223 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, | |
224 | &entry, sizeof(entry)); | |
225 | } | |
226 | ||
b65233a9 | 227 | /* Programs the physical address of the device table into the IOMMU hardware */ |
b2026aa2 JR |
228 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
229 | { | |
f609891f | 230 | u64 entry; |
b2026aa2 JR |
231 | |
232 | BUG_ON(iommu->mmio_base == NULL); | |
233 | ||
234 | entry = virt_to_phys(amd_iommu_dev_table); | |
235 | entry |= (dev_table_size >> 12) - 1; | |
236 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, | |
237 | &entry, sizeof(entry)); | |
238 | } | |
239 | ||
b65233a9 | 240 | /* Generic functions to enable/disable certain features of the IOMMU. */ |
05f92db9 | 241 | static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
b2026aa2 JR |
242 | { |
243 | u32 ctrl; | |
244 | ||
245 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
246 | ctrl |= (1 << bit); | |
247 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
248 | } | |
249 | ||
ca020711 | 250 | static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
b2026aa2 JR |
251 | { |
252 | u32 ctrl; | |
253 | ||
199d0d50 | 254 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
b2026aa2 JR |
255 | ctrl &= ~(1 << bit); |
256 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
257 | } | |
258 | ||
b65233a9 | 259 | /* Function to enable the hardware */ |
05f92db9 | 260 | static void iommu_enable(struct amd_iommu *iommu) |
b2026aa2 | 261 | { |
4c6f40d4 | 262 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", |
a4e267c8 | 263 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
b2026aa2 JR |
264 | |
265 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | |
b2026aa2 JR |
266 | } |
267 | ||
92ac4320 | 268 | static void iommu_disable(struct amd_iommu *iommu) |
126c52be | 269 | { |
a8c485bb CW |
270 | /* Disable command buffer */ |
271 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | |
272 | ||
273 | /* Disable event logging and event interrupts */ | |
274 | iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); | |
275 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); | |
276 | ||
277 | /* Disable IOMMU hardware itself */ | |
92ac4320 | 278 | iommu_feature_disable(iommu, CONTROL_IOMMU_EN); |
126c52be JR |
279 | } |
280 | ||
b65233a9 JR |
281 | /* |
282 | * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in | |
283 | * the system has one. | |
284 | */ | |
6c56747b JR |
285 | static u8 * __init iommu_map_mmio_space(u64 address) |
286 | { | |
287 | u8 *ret; | |
288 | ||
289 | if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) | |
290 | return NULL; | |
291 | ||
292 | ret = ioremap_nocache(address, MMIO_REGION_LENGTH); | |
293 | if (ret != NULL) | |
294 | return ret; | |
295 | ||
296 | release_mem_region(address, MMIO_REGION_LENGTH); | |
297 | ||
298 | return NULL; | |
299 | } | |
300 | ||
301 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) | |
302 | { | |
303 | if (iommu->mmio_base) | |
304 | iounmap(iommu->mmio_base); | |
305 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); | |
306 | } | |
307 | ||
b65233a9 JR |
308 | /**************************************************************************** |
309 | * | |
310 | * The functions below belong to the first pass of AMD IOMMU ACPI table | |
311 | * parsing. In this pass we try to find out the highest device id this | |
312 | * code has to handle. Upon this information the size of the shared data | |
313 | * structures is determined later. | |
314 | * | |
315 | ****************************************************************************/ | |
316 | ||
b514e555 JR |
317 | /* |
318 | * This function calculates the length of a given IVHD entry | |
319 | */ | |
320 | static inline int ivhd_entry_length(u8 *ivhd) | |
321 | { | |
322 | return 0x04 << (*ivhd >> 6); | |
323 | } | |
324 | ||
b65233a9 JR |
325 | /* |
326 | * This function reads the last device id the IOMMU has to handle from the PCI | |
327 | * capability header for this IOMMU | |
328 | */ | |
3e8064ba JR |
329 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) |
330 | { | |
331 | u32 cap; | |
332 | ||
333 | cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); | |
d591b0a3 | 334 | update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); |
3e8064ba JR |
335 | |
336 | return 0; | |
337 | } | |
338 | ||
b65233a9 JR |
339 | /* |
340 | * After reading the highest device id from the IOMMU PCI capability header | |
341 | * this function looks if there is a higher device id defined in the ACPI table | |
342 | */ | |
3e8064ba JR |
343 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
344 | { | |
345 | u8 *p = (void *)h, *end = (void *)h; | |
346 | struct ivhd_entry *dev; | |
347 | ||
348 | p += sizeof(*h); | |
349 | end += h->length; | |
350 | ||
351 | find_last_devid_on_pci(PCI_BUS(h->devid), | |
352 | PCI_SLOT(h->devid), | |
353 | PCI_FUNC(h->devid), | |
354 | h->cap_ptr); | |
355 | ||
356 | while (p < end) { | |
357 | dev = (struct ivhd_entry *)p; | |
358 | switch (dev->type) { | |
359 | case IVHD_DEV_SELECT: | |
360 | case IVHD_DEV_RANGE_END: | |
361 | case IVHD_DEV_ALIAS: | |
362 | case IVHD_DEV_EXT_SELECT: | |
b65233a9 | 363 | /* all the above subfield types refer to device ids */ |
208ec8c9 | 364 | update_last_devid(dev->devid); |
3e8064ba JR |
365 | break; |
366 | default: | |
367 | break; | |
368 | } | |
b514e555 | 369 | p += ivhd_entry_length(p); |
3e8064ba JR |
370 | } |
371 | ||
372 | WARN_ON(p != end); | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
b65233a9 JR |
377 | /* |
378 | * Iterate over all IVHD entries in the ACPI table and find the highest device | |
379 | * id which we need to handle. This is the first of three functions which parse | |
380 | * the ACPI table. So we check the checksum here. | |
381 | */ | |
3e8064ba JR |
382 | static int __init find_last_devid_acpi(struct acpi_table_header *table) |
383 | { | |
384 | int i; | |
385 | u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; | |
386 | struct ivhd_header *h; | |
387 | ||
388 | /* | |
389 | * Validate checksum here so we don't need to do it when | |
390 | * we actually parse the table | |
391 | */ | |
392 | for (i = 0; i < table->length; ++i) | |
393 | checksum += p[i]; | |
394 | if (checksum != 0) | |
395 | /* ACPI table corrupt */ | |
396 | return -ENODEV; | |
397 | ||
398 | p += IVRS_HEADER_LENGTH; | |
399 | ||
400 | end += table->length; | |
401 | while (p < end) { | |
402 | h = (struct ivhd_header *)p; | |
403 | switch (h->type) { | |
404 | case ACPI_IVHD_TYPE: | |
405 | find_last_devid_from_ivhd(h); | |
406 | break; | |
407 | default: | |
408 | break; | |
409 | } | |
410 | p += h->length; | |
411 | } | |
412 | WARN_ON(p != end); | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
b65233a9 JR |
417 | /**************************************************************************** |
418 | * | |
419 | * The following functions belong the the code path which parses the ACPI table | |
420 | * the second time. In this ACPI parsing iteration we allocate IOMMU specific | |
421 | * data structures, initialize the device/alias/rlookup table and also | |
422 | * basically initialize the hardware. | |
423 | * | |
424 | ****************************************************************************/ | |
425 | ||
426 | /* | |
427 | * Allocates the command buffer. This buffer is per AMD IOMMU. We can | |
428 | * write commands to that buffer later and the IOMMU will execute them | |
429 | * asynchronously | |
430 | */ | |
b36ca91e JR |
431 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) |
432 | { | |
d0312b21 | 433 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
b36ca91e | 434 | get_order(CMD_BUFFER_SIZE)); |
b36ca91e JR |
435 | |
436 | if (cmd_buf == NULL) | |
437 | return NULL; | |
438 | ||
439 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | |
440 | ||
58492e12 JR |
441 | return cmd_buf; |
442 | } | |
443 | ||
93f1cc67 JR |
444 | /* |
445 | * This function resets the command buffer if the IOMMU stopped fetching | |
446 | * commands from it. | |
447 | */ | |
448 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) | |
449 | { | |
450 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | |
451 | ||
452 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | |
453 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | |
454 | ||
455 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | |
456 | } | |
457 | ||
58492e12 JR |
458 | /* |
459 | * This function writes the command buffer address to the hardware and | |
460 | * enables it. | |
461 | */ | |
462 | static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |
463 | { | |
464 | u64 entry; | |
465 | ||
466 | BUG_ON(iommu->cmd_buf == NULL); | |
467 | ||
468 | entry = (u64)virt_to_phys(iommu->cmd_buf); | |
b36ca91e | 469 | entry |= MMIO_CMD_SIZE_512; |
58492e12 | 470 | |
b36ca91e | 471 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
58492e12 | 472 | &entry, sizeof(entry)); |
b36ca91e | 473 | |
93f1cc67 | 474 | amd_iommu_reset_cmd_buffer(iommu); |
b36ca91e JR |
475 | } |
476 | ||
477 | static void __init free_command_buffer(struct amd_iommu *iommu) | |
478 | { | |
23c1713f JR |
479 | free_pages((unsigned long)iommu->cmd_buf, |
480 | get_order(iommu->cmd_buf_size)); | |
b36ca91e JR |
481 | } |
482 | ||
335503e5 JR |
483 | /* allocates the memory where the IOMMU will log its events to */ |
484 | static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) | |
485 | { | |
335503e5 JR |
486 | iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
487 | get_order(EVT_BUFFER_SIZE)); | |
488 | ||
489 | if (iommu->evt_buf == NULL) | |
490 | return NULL; | |
491 | ||
1bc6f838 JR |
492 | iommu->evt_buf_size = EVT_BUFFER_SIZE; |
493 | ||
58492e12 JR |
494 | return iommu->evt_buf; |
495 | } | |
496 | ||
497 | static void iommu_enable_event_buffer(struct amd_iommu *iommu) | |
498 | { | |
499 | u64 entry; | |
500 | ||
501 | BUG_ON(iommu->evt_buf == NULL); | |
502 | ||
335503e5 | 503 | entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; |
58492e12 | 504 | |
335503e5 JR |
505 | memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, |
506 | &entry, sizeof(entry)); | |
507 | ||
09067207 JR |
508 | /* set head and tail to zero manually */ |
509 | writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); | |
510 | writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | |
511 | ||
58492e12 | 512 | iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); |
335503e5 JR |
513 | } |
514 | ||
515 | static void __init free_event_buffer(struct amd_iommu *iommu) | |
516 | { | |
517 | free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); | |
518 | } | |
519 | ||
b65233a9 | 520 | /* sets a specific bit in the device table entry. */ |
3566b778 JR |
521 | static void set_dev_entry_bit(u16 devid, u8 bit) |
522 | { | |
523 | int i = (bit >> 5) & 0x07; | |
524 | int _bit = bit & 0x1f; | |
525 | ||
526 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | |
527 | } | |
528 | ||
c5cca146 JR |
529 | static int get_dev_entry_bit(u16 devid, u8 bit) |
530 | { | |
531 | int i = (bit >> 5) & 0x07; | |
532 | int _bit = bit & 0x1f; | |
533 | ||
534 | return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; | |
535 | } | |
536 | ||
537 | ||
538 | void amd_iommu_apply_erratum_63(u16 devid) | |
539 | { | |
540 | int sysmgt; | |
541 | ||
542 | sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | | |
543 | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); | |
544 | ||
545 | if (sysmgt == 0x01) | |
546 | set_dev_entry_bit(devid, DEV_ENTRY_IW); | |
547 | } | |
548 | ||
5ff4789d JR |
549 | /* Writes the specific IOMMU for a device into the rlookup table */ |
550 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | |
551 | { | |
552 | amd_iommu_rlookup_table[devid] = iommu; | |
553 | } | |
554 | ||
b65233a9 JR |
555 | /* |
556 | * This function takes the device specific flags read from the ACPI | |
557 | * table and sets up the device table entry with that information | |
558 | */ | |
5ff4789d JR |
559 | static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, |
560 | u16 devid, u32 flags, u32 ext_flags) | |
3566b778 JR |
561 | { |
562 | if (flags & ACPI_DEVFLAG_INITPASS) | |
563 | set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); | |
564 | if (flags & ACPI_DEVFLAG_EXTINT) | |
565 | set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); | |
566 | if (flags & ACPI_DEVFLAG_NMI) | |
567 | set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); | |
568 | if (flags & ACPI_DEVFLAG_SYSMGT1) | |
569 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); | |
570 | if (flags & ACPI_DEVFLAG_SYSMGT2) | |
571 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); | |
572 | if (flags & ACPI_DEVFLAG_LINT0) | |
573 | set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); | |
574 | if (flags & ACPI_DEVFLAG_LINT1) | |
575 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | |
3566b778 | 576 | |
c5cca146 JR |
577 | amd_iommu_apply_erratum_63(devid); |
578 | ||
5ff4789d | 579 | set_iommu_for_device(iommu, devid); |
3566b778 JR |
580 | } |
581 | ||
b65233a9 JR |
582 | /* |
583 | * Reads the device exclusion range from ACPI and initialize IOMMU with | |
584 | * it | |
585 | */ | |
3566b778 JR |
586 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) |
587 | { | |
588 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | |
589 | ||
590 | if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) | |
591 | return; | |
592 | ||
593 | if (iommu) { | |
b65233a9 JR |
594 | /* |
595 | * We only can configure exclusion ranges per IOMMU, not | |
596 | * per device. But we can enable the exclusion range per | |
597 | * device. This is done here | |
598 | */ | |
3566b778 JR |
599 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); |
600 | iommu->exclusion_start = m->range_start; | |
601 | iommu->exclusion_length = m->range_length; | |
602 | } | |
603 | } | |
604 | ||
b65233a9 JR |
605 | /* |
606 | * This function reads some important data from the IOMMU PCI space and | |
607 | * initializes the driver data structure with it. It reads the hardware | |
608 | * capabilities and the first/last device entries | |
609 | */ | |
5d0c8e49 JR |
610 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) |
611 | { | |
5d0c8e49 | 612 | int cap_ptr = iommu->cap_ptr; |
a80dc3e0 | 613 | u32 range, misc; |
5d0c8e49 | 614 | |
3eaf28a1 JR |
615 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
616 | &iommu->cap); | |
617 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, | |
618 | &range); | |
a80dc3e0 JR |
619 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, |
620 | &misc); | |
5d0c8e49 | 621 | |
d591b0a3 JR |
622 | iommu->first_device = calc_devid(MMIO_GET_BUS(range), |
623 | MMIO_GET_FD(range)); | |
624 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), | |
625 | MMIO_GET_LD(range)); | |
a80dc3e0 | 626 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); |
5d0c8e49 JR |
627 | } |
628 | ||
b65233a9 JR |
629 | /* |
630 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and | |
631 | * initializes the hardware and our data structures with it. | |
632 | */ | |
5d0c8e49 JR |
633 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, |
634 | struct ivhd_header *h) | |
635 | { | |
636 | u8 *p = (u8 *)h; | |
637 | u8 *end = p, flags = 0; | |
638 | u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; | |
639 | u32 ext_flags = 0; | |
58a3bee5 | 640 | bool alias = false; |
5d0c8e49 JR |
641 | struct ivhd_entry *e; |
642 | ||
643 | /* | |
644 | * First set the recommended feature enable bits from ACPI | |
645 | * into the IOMMU control registers | |
646 | */ | |
6da7342f | 647 | h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? |
5d0c8e49 JR |
648 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : |
649 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | |
650 | ||
6da7342f | 651 | h->flags & IVHD_FLAG_PASSPW_EN_MASK ? |
5d0c8e49 JR |
652 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : |
653 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | |
654 | ||
6da7342f | 655 | h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? |
5d0c8e49 JR |
656 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : |
657 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | |
658 | ||
6da7342f | 659 | h->flags & IVHD_FLAG_ISOC_EN_MASK ? |
5d0c8e49 JR |
660 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : |
661 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | |
662 | ||
663 | /* | |
664 | * make IOMMU memory accesses cache coherent | |
665 | */ | |
666 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | |
667 | ||
668 | /* | |
669 | * Done. Now parse the device entries | |
670 | */ | |
671 | p += sizeof(struct ivhd_header); | |
672 | end += h->length; | |
673 | ||
42a698f4 | 674 | |
5d0c8e49 JR |
675 | while (p < end) { |
676 | e = (struct ivhd_entry *)p; | |
677 | switch (e->type) { | |
678 | case IVHD_DEV_ALL: | |
42a698f4 JR |
679 | |
680 | DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" | |
681 | " last device %02x:%02x.%x flags: %02x\n", | |
682 | PCI_BUS(iommu->first_device), | |
683 | PCI_SLOT(iommu->first_device), | |
684 | PCI_FUNC(iommu->first_device), | |
685 | PCI_BUS(iommu->last_device), | |
686 | PCI_SLOT(iommu->last_device), | |
687 | PCI_FUNC(iommu->last_device), | |
688 | e->flags); | |
689 | ||
5d0c8e49 JR |
690 | for (dev_i = iommu->first_device; |
691 | dev_i <= iommu->last_device; ++dev_i) | |
5ff4789d JR |
692 | set_dev_entry_from_acpi(iommu, dev_i, |
693 | e->flags, 0); | |
5d0c8e49 JR |
694 | break; |
695 | case IVHD_DEV_SELECT: | |
42a698f4 JR |
696 | |
697 | DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " | |
698 | "flags: %02x\n", | |
699 | PCI_BUS(e->devid), | |
700 | PCI_SLOT(e->devid), | |
701 | PCI_FUNC(e->devid), | |
702 | e->flags); | |
703 | ||
5d0c8e49 | 704 | devid = e->devid; |
5ff4789d | 705 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); |
5d0c8e49 JR |
706 | break; |
707 | case IVHD_DEV_SELECT_RANGE_START: | |
42a698f4 JR |
708 | |
709 | DUMP_printk(" DEV_SELECT_RANGE_START\t " | |
710 | "devid: %02x:%02x.%x flags: %02x\n", | |
711 | PCI_BUS(e->devid), | |
712 | PCI_SLOT(e->devid), | |
713 | PCI_FUNC(e->devid), | |
714 | e->flags); | |
715 | ||
5d0c8e49 JR |
716 | devid_start = e->devid; |
717 | flags = e->flags; | |
718 | ext_flags = 0; | |
58a3bee5 | 719 | alias = false; |
5d0c8e49 JR |
720 | break; |
721 | case IVHD_DEV_ALIAS: | |
42a698f4 JR |
722 | |
723 | DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " | |
724 | "flags: %02x devid_to: %02x:%02x.%x\n", | |
725 | PCI_BUS(e->devid), | |
726 | PCI_SLOT(e->devid), | |
727 | PCI_FUNC(e->devid), | |
728 | e->flags, | |
729 | PCI_BUS(e->ext >> 8), | |
730 | PCI_SLOT(e->ext >> 8), | |
731 | PCI_FUNC(e->ext >> 8)); | |
732 | ||
5d0c8e49 JR |
733 | devid = e->devid; |
734 | devid_to = e->ext >> 8; | |
7a6a3a08 | 735 | set_dev_entry_from_acpi(iommu, devid , e->flags, 0); |
7455aab1 | 736 | set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); |
5d0c8e49 JR |
737 | amd_iommu_alias_table[devid] = devid_to; |
738 | break; | |
739 | case IVHD_DEV_ALIAS_RANGE: | |
42a698f4 JR |
740 | |
741 | DUMP_printk(" DEV_ALIAS_RANGE\t\t " | |
742 | "devid: %02x:%02x.%x flags: %02x " | |
743 | "devid_to: %02x:%02x.%x\n", | |
744 | PCI_BUS(e->devid), | |
745 | PCI_SLOT(e->devid), | |
746 | PCI_FUNC(e->devid), | |
747 | e->flags, | |
748 | PCI_BUS(e->ext >> 8), | |
749 | PCI_SLOT(e->ext >> 8), | |
750 | PCI_FUNC(e->ext >> 8)); | |
751 | ||
5d0c8e49 JR |
752 | devid_start = e->devid; |
753 | flags = e->flags; | |
754 | devid_to = e->ext >> 8; | |
755 | ext_flags = 0; | |
58a3bee5 | 756 | alias = true; |
5d0c8e49 JR |
757 | break; |
758 | case IVHD_DEV_EXT_SELECT: | |
42a698f4 JR |
759 | |
760 | DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " | |
761 | "flags: %02x ext: %08x\n", | |
762 | PCI_BUS(e->devid), | |
763 | PCI_SLOT(e->devid), | |
764 | PCI_FUNC(e->devid), | |
765 | e->flags, e->ext); | |
766 | ||
5d0c8e49 | 767 | devid = e->devid; |
5ff4789d JR |
768 | set_dev_entry_from_acpi(iommu, devid, e->flags, |
769 | e->ext); | |
5d0c8e49 JR |
770 | break; |
771 | case IVHD_DEV_EXT_SELECT_RANGE: | |
42a698f4 JR |
772 | |
773 | DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " | |
774 | "%02x:%02x.%x flags: %02x ext: %08x\n", | |
775 | PCI_BUS(e->devid), | |
776 | PCI_SLOT(e->devid), | |
777 | PCI_FUNC(e->devid), | |
778 | e->flags, e->ext); | |
779 | ||
5d0c8e49 JR |
780 | devid_start = e->devid; |
781 | flags = e->flags; | |
782 | ext_flags = e->ext; | |
58a3bee5 | 783 | alias = false; |
5d0c8e49 JR |
784 | break; |
785 | case IVHD_DEV_RANGE_END: | |
42a698f4 JR |
786 | |
787 | DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", | |
788 | PCI_BUS(e->devid), | |
789 | PCI_SLOT(e->devid), | |
790 | PCI_FUNC(e->devid)); | |
791 | ||
5d0c8e49 JR |
792 | devid = e->devid; |
793 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { | |
7a6a3a08 | 794 | if (alias) { |
5d0c8e49 | 795 | amd_iommu_alias_table[dev_i] = devid_to; |
7a6a3a08 JR |
796 | set_dev_entry_from_acpi(iommu, |
797 | devid_to, flags, ext_flags); | |
798 | } | |
799 | set_dev_entry_from_acpi(iommu, dev_i, | |
800 | flags, ext_flags); | |
5d0c8e49 JR |
801 | } |
802 | break; | |
803 | default: | |
804 | break; | |
805 | } | |
806 | ||
b514e555 | 807 | p += ivhd_entry_length(p); |
5d0c8e49 JR |
808 | } |
809 | } | |
810 | ||
b65233a9 | 811 | /* Initializes the device->iommu mapping for the driver */ |
5d0c8e49 JR |
812 | static int __init init_iommu_devices(struct amd_iommu *iommu) |
813 | { | |
814 | u16 i; | |
815 | ||
816 | for (i = iommu->first_device; i <= iommu->last_device; ++i) | |
817 | set_iommu_for_device(iommu, i); | |
818 | ||
819 | return 0; | |
820 | } | |
821 | ||
e47d402d JR |
822 | static void __init free_iommu_one(struct amd_iommu *iommu) |
823 | { | |
824 | free_command_buffer(iommu); | |
335503e5 | 825 | free_event_buffer(iommu); |
e47d402d JR |
826 | iommu_unmap_mmio_space(iommu); |
827 | } | |
828 | ||
829 | static void __init free_iommu_all(void) | |
830 | { | |
831 | struct amd_iommu *iommu, *next; | |
832 | ||
3bd22172 | 833 | for_each_iommu_safe(iommu, next) { |
e47d402d JR |
834 | list_del(&iommu->list); |
835 | free_iommu_one(iommu); | |
836 | kfree(iommu); | |
837 | } | |
838 | } | |
839 | ||
b65233a9 JR |
840 | /* |
841 | * This function clues the initialization function for one IOMMU | |
842 | * together and also allocates the command buffer and programs the | |
843 | * hardware. It does NOT enable the IOMMU. This is done afterwards. | |
844 | */ | |
e47d402d JR |
845 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
846 | { | |
847 | spin_lock_init(&iommu->lock); | |
bb52777e JR |
848 | |
849 | /* Add IOMMU to internal data structures */ | |
e47d402d | 850 | list_add_tail(&iommu->list, &amd_iommu_list); |
bb52777e JR |
851 | iommu->index = amd_iommus_present++; |
852 | ||
853 | if (unlikely(iommu->index >= MAX_IOMMUS)) { | |
854 | WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); | |
855 | return -ENOSYS; | |
856 | } | |
857 | ||
858 | /* Index is fine - add IOMMU to the array */ | |
859 | amd_iommus[iommu->index] = iommu; | |
e47d402d JR |
860 | |
861 | /* | |
862 | * Copy data from ACPI table entry to the iommu struct | |
863 | */ | |
3eaf28a1 JR |
864 | iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); |
865 | if (!iommu->dev) | |
866 | return 1; | |
867 | ||
e47d402d | 868 | iommu->cap_ptr = h->cap_ptr; |
ee893c24 | 869 | iommu->pci_seg = h->pci_seg; |
e47d402d JR |
870 | iommu->mmio_phys = h->mmio_phys; |
871 | iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); | |
872 | if (!iommu->mmio_base) | |
873 | return -ENOMEM; | |
874 | ||
e47d402d JR |
875 | iommu->cmd_buf = alloc_command_buffer(iommu); |
876 | if (!iommu->cmd_buf) | |
877 | return -ENOMEM; | |
878 | ||
335503e5 JR |
879 | iommu->evt_buf = alloc_event_buffer(iommu); |
880 | if (!iommu->evt_buf) | |
881 | return -ENOMEM; | |
882 | ||
a80dc3e0 JR |
883 | iommu->int_enabled = false; |
884 | ||
e47d402d JR |
885 | init_iommu_from_pci(iommu); |
886 | init_iommu_from_acpi(iommu, h); | |
887 | init_iommu_devices(iommu); | |
888 | ||
318afd41 JR |
889 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) |
890 | amd_iommu_np_cache = true; | |
891 | ||
8a66712b | 892 | return pci_enable_device(iommu->dev); |
e47d402d JR |
893 | } |
894 | ||
b65233a9 JR |
895 | /* |
896 | * Iterates over all IOMMU entries in the ACPI table, allocates the | |
897 | * IOMMU structure and initializes it with init_iommu_one() | |
898 | */ | |
e47d402d JR |
899 | static int __init init_iommu_all(struct acpi_table_header *table) |
900 | { | |
901 | u8 *p = (u8 *)table, *end = (u8 *)table; | |
902 | struct ivhd_header *h; | |
903 | struct amd_iommu *iommu; | |
904 | int ret; | |
905 | ||
e47d402d JR |
906 | end += table->length; |
907 | p += IVRS_HEADER_LENGTH; | |
908 | ||
909 | while (p < end) { | |
910 | h = (struct ivhd_header *)p; | |
911 | switch (*p) { | |
912 | case ACPI_IVHD_TYPE: | |
9c72041f | 913 | |
ae908c22 | 914 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
9c72041f JR |
915 | "seg: %d flags: %01x info %04x\n", |
916 | PCI_BUS(h->devid), PCI_SLOT(h->devid), | |
917 | PCI_FUNC(h->devid), h->cap_ptr, | |
918 | h->pci_seg, h->flags, h->info); | |
919 | DUMP_printk(" mmio-addr: %016llx\n", | |
920 | h->mmio_phys); | |
921 | ||
e47d402d JR |
922 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
923 | if (iommu == NULL) | |
924 | return -ENOMEM; | |
925 | ret = init_iommu_one(iommu, h); | |
926 | if (ret) | |
927 | return ret; | |
928 | break; | |
929 | default: | |
930 | break; | |
931 | } | |
932 | p += h->length; | |
933 | ||
934 | } | |
935 | WARN_ON(p != end); | |
936 | ||
0f764806 JR |
937 | amd_iommu_initialized = true; |
938 | ||
e47d402d JR |
939 | return 0; |
940 | } | |
941 | ||
a80dc3e0 JR |
942 | /**************************************************************************** |
943 | * | |
944 | * The following functions initialize the MSI interrupts for all IOMMUs | |
945 | * in the system. Its a bit challenging because there could be multiple | |
946 | * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per | |
947 | * pci_dev. | |
948 | * | |
949 | ****************************************************************************/ | |
950 | ||
9f800de3 | 951 | static int iommu_setup_msi(struct amd_iommu *iommu) |
a80dc3e0 JR |
952 | { |
953 | int r; | |
a80dc3e0 JR |
954 | |
955 | if (pci_enable_msi(iommu->dev)) | |
956 | return 1; | |
957 | ||
958 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | |
959 | IRQF_SAMPLE_RANDOM, | |
4c6f40d4 | 960 | "AMD-Vi", |
a80dc3e0 JR |
961 | NULL); |
962 | ||
963 | if (r) { | |
964 | pci_disable_msi(iommu->dev); | |
965 | return 1; | |
966 | } | |
967 | ||
fab6afa3 | 968 | iommu->int_enabled = true; |
58492e12 JR |
969 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); |
970 | ||
a80dc3e0 JR |
971 | return 0; |
972 | } | |
973 | ||
05f92db9 | 974 | static int iommu_init_msi(struct amd_iommu *iommu) |
a80dc3e0 JR |
975 | { |
976 | if (iommu->int_enabled) | |
977 | return 0; | |
978 | ||
d91cecdd | 979 | if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) |
a80dc3e0 JR |
980 | return iommu_setup_msi(iommu); |
981 | ||
982 | return 1; | |
983 | } | |
984 | ||
b65233a9 JR |
985 | /**************************************************************************** |
986 | * | |
987 | * The next functions belong to the third pass of parsing the ACPI | |
988 | * table. In this last pass the memory mapping requirements are | |
989 | * gathered (like exclusion and unity mapping reanges). | |
990 | * | |
991 | ****************************************************************************/ | |
992 | ||
be2a022c JR |
993 | static void __init free_unity_maps(void) |
994 | { | |
995 | struct unity_map_entry *entry, *next; | |
996 | ||
997 | list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { | |
998 | list_del(&entry->list); | |
999 | kfree(entry); | |
1000 | } | |
1001 | } | |
1002 | ||
b65233a9 | 1003 | /* called when we find an exclusion range definition in ACPI */ |
be2a022c JR |
1004 | static int __init init_exclusion_range(struct ivmd_header *m) |
1005 | { | |
1006 | int i; | |
1007 | ||
1008 | switch (m->type) { | |
1009 | case ACPI_IVMD_TYPE: | |
1010 | set_device_exclusion_range(m->devid, m); | |
1011 | break; | |
1012 | case ACPI_IVMD_TYPE_ALL: | |
3a61ec38 | 1013 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
be2a022c JR |
1014 | set_device_exclusion_range(i, m); |
1015 | break; | |
1016 | case ACPI_IVMD_TYPE_RANGE: | |
1017 | for (i = m->devid; i <= m->aux; ++i) | |
1018 | set_device_exclusion_range(i, m); | |
1019 | break; | |
1020 | default: | |
1021 | break; | |
1022 | } | |
1023 | ||
1024 | return 0; | |
1025 | } | |
1026 | ||
b65233a9 | 1027 | /* called for unity map ACPI definition */ |
be2a022c JR |
1028 | static int __init init_unity_map_range(struct ivmd_header *m) |
1029 | { | |
1030 | struct unity_map_entry *e = 0; | |
02acc43a | 1031 | char *s; |
be2a022c JR |
1032 | |
1033 | e = kzalloc(sizeof(*e), GFP_KERNEL); | |
1034 | if (e == NULL) | |
1035 | return -ENOMEM; | |
1036 | ||
1037 | switch (m->type) { | |
1038 | default: | |
0bc252f4 JR |
1039 | kfree(e); |
1040 | return 0; | |
be2a022c | 1041 | case ACPI_IVMD_TYPE: |
02acc43a | 1042 | s = "IVMD_TYPEi\t\t\t"; |
be2a022c JR |
1043 | e->devid_start = e->devid_end = m->devid; |
1044 | break; | |
1045 | case ACPI_IVMD_TYPE_ALL: | |
02acc43a | 1046 | s = "IVMD_TYPE_ALL\t\t"; |
be2a022c JR |
1047 | e->devid_start = 0; |
1048 | e->devid_end = amd_iommu_last_bdf; | |
1049 | break; | |
1050 | case ACPI_IVMD_TYPE_RANGE: | |
02acc43a | 1051 | s = "IVMD_TYPE_RANGE\t\t"; |
be2a022c JR |
1052 | e->devid_start = m->devid; |
1053 | e->devid_end = m->aux; | |
1054 | break; | |
1055 | } | |
1056 | e->address_start = PAGE_ALIGN(m->range_start); | |
1057 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); | |
1058 | e->prot = m->flags >> 1; | |
1059 | ||
02acc43a JR |
1060 | DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" |
1061 | " range_start: %016llx range_end: %016llx flags: %x\n", s, | |
1062 | PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), | |
1063 | PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), | |
1064 | PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), | |
1065 | e->address_start, e->address_end, m->flags); | |
1066 | ||
be2a022c JR |
1067 | list_add_tail(&e->list, &amd_iommu_unity_map); |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | ||
b65233a9 | 1072 | /* iterates over all memory definitions we find in the ACPI table */ |
be2a022c JR |
1073 | static int __init init_memory_definitions(struct acpi_table_header *table) |
1074 | { | |
1075 | u8 *p = (u8 *)table, *end = (u8 *)table; | |
1076 | struct ivmd_header *m; | |
1077 | ||
be2a022c JR |
1078 | end += table->length; |
1079 | p += IVRS_HEADER_LENGTH; | |
1080 | ||
1081 | while (p < end) { | |
1082 | m = (struct ivmd_header *)p; | |
1083 | if (m->flags & IVMD_FLAG_EXCL_RANGE) | |
1084 | init_exclusion_range(m); | |
1085 | else if (m->flags & IVMD_FLAG_UNITY_MAP) | |
1086 | init_unity_map_range(m); | |
1087 | ||
1088 | p += m->length; | |
1089 | } | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
9f5f5fb3 JR |
1094 | /* |
1095 | * Init the device table to not allow DMA access for devices and | |
1096 | * suppress all page faults | |
1097 | */ | |
1098 | static void init_device_table(void) | |
1099 | { | |
1100 | u16 devid; | |
1101 | ||
1102 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | |
1103 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); | |
1104 | set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); | |
9f5f5fb3 JR |
1105 | } |
1106 | } | |
1107 | ||
b65233a9 JR |
1108 | /* |
1109 | * This function finally enables all IOMMUs found in the system after | |
1110 | * they have been initialized | |
1111 | */ | |
05f92db9 | 1112 | static void enable_iommus(void) |
8736197b JR |
1113 | { |
1114 | struct amd_iommu *iommu; | |
1115 | ||
3bd22172 | 1116 | for_each_iommu(iommu) { |
a8c485bb | 1117 | iommu_disable(iommu); |
58492e12 JR |
1118 | iommu_set_device_table(iommu); |
1119 | iommu_enable_command_buffer(iommu); | |
1120 | iommu_enable_event_buffer(iommu); | |
8736197b | 1121 | iommu_set_exclusion_range(iommu); |
a80dc3e0 | 1122 | iommu_init_msi(iommu); |
8736197b JR |
1123 | iommu_enable(iommu); |
1124 | } | |
1125 | } | |
1126 | ||
92ac4320 JR |
1127 | static void disable_iommus(void) |
1128 | { | |
1129 | struct amd_iommu *iommu; | |
1130 | ||
1131 | for_each_iommu(iommu) | |
1132 | iommu_disable(iommu); | |
1133 | } | |
1134 | ||
7441e9cb JR |
1135 | /* |
1136 | * Suspend/Resume support | |
1137 | * disable suspend until real resume implemented | |
1138 | */ | |
1139 | ||
1140 | static int amd_iommu_resume(struct sys_device *dev) | |
1141 | { | |
736501ee JR |
1142 | /* re-load the hardware */ |
1143 | enable_iommus(); | |
1144 | ||
1145 | /* | |
1146 | * we have to flush after the IOMMUs are enabled because a | |
1147 | * disabled IOMMU will never execute the commands we send | |
1148 | */ | |
736501ee | 1149 | amd_iommu_flush_all_devices(); |
6a047d8b | 1150 | amd_iommu_flush_all_domains(); |
736501ee | 1151 | |
7441e9cb JR |
1152 | return 0; |
1153 | } | |
1154 | ||
1155 | static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) | |
1156 | { | |
736501ee JR |
1157 | /* disable IOMMUs to go out of the way for BIOS */ |
1158 | disable_iommus(); | |
1159 | ||
1160 | return 0; | |
7441e9cb JR |
1161 | } |
1162 | ||
1163 | static struct sysdev_class amd_iommu_sysdev_class = { | |
1164 | .name = "amd_iommu", | |
1165 | .suspend = amd_iommu_suspend, | |
1166 | .resume = amd_iommu_resume, | |
1167 | }; | |
1168 | ||
1169 | static struct sys_device device_amd_iommu = { | |
1170 | .id = 0, | |
1171 | .cls = &amd_iommu_sysdev_class, | |
1172 | }; | |
1173 | ||
b65233a9 JR |
1174 | /* |
1175 | * This is the core init function for AMD IOMMU hardware in the system. | |
1176 | * This function is called from the generic x86 DMA layer initialization | |
1177 | * code. | |
1178 | * | |
1179 | * This function basically parses the ACPI table for AMD IOMMU (IVRS) | |
1180 | * three times: | |
1181 | * | |
1182 | * 1 pass) Find the highest PCI device id the driver has to handle. | |
1183 | * Upon this information the size of the data structures is | |
1184 | * determined that needs to be allocated. | |
1185 | * | |
1186 | * 2 pass) Initialize the data structures just allocated with the | |
1187 | * information in the ACPI table about available AMD IOMMUs | |
1188 | * in the system. It also maps the PCI devices in the | |
1189 | * system to specific IOMMUs | |
1190 | * | |
1191 | * 3 pass) After the basic data structures are allocated and | |
1192 | * initialized we update them with information about memory | |
1193 | * remapping requirements parsed out of the ACPI table in | |
1194 | * this last pass. | |
1195 | * | |
1196 | * After that the hardware is initialized and ready to go. In the last | |
1197 | * step we do some Linux specific things like registering the driver in | |
1198 | * the dma_ops interface and initializing the suspend/resume support | |
1199 | * functions. Finally it prints some information about AMD IOMMUs and | |
1200 | * the driver state and enables the hardware. | |
1201 | */ | |
ea1b0d39 | 1202 | static int __init amd_iommu_init(void) |
fe74c9cf JR |
1203 | { |
1204 | int i, ret = 0; | |
1205 | ||
fe74c9cf JR |
1206 | /* |
1207 | * First parse ACPI tables to find the largest Bus/Dev/Func | |
1208 | * we need to handle. Upon this information the shared data | |
1209 | * structures for the IOMMUs in the system will be allocated | |
1210 | */ | |
1211 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | |
1212 | return -ENODEV; | |
1213 | ||
c571484e JR |
1214 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
1215 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | |
1216 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | |
fe74c9cf JR |
1217 | |
1218 | ret = -ENOMEM; | |
1219 | ||
1220 | /* Device table - directly used by all IOMMUs */ | |
5dc8bff0 | 1221 | amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
fe74c9cf JR |
1222 | get_order(dev_table_size)); |
1223 | if (amd_iommu_dev_table == NULL) | |
1224 | goto out; | |
1225 | ||
1226 | /* | |
1227 | * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the | |
1228 | * IOMMU see for that device | |
1229 | */ | |
1230 | amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, | |
1231 | get_order(alias_table_size)); | |
1232 | if (amd_iommu_alias_table == NULL) | |
1233 | goto free; | |
1234 | ||
1235 | /* IOMMU rlookup table - find the IOMMU for a specific device */ | |
83fd5cc6 JR |
1236 | amd_iommu_rlookup_table = (void *)__get_free_pages( |
1237 | GFP_KERNEL | __GFP_ZERO, | |
fe74c9cf JR |
1238 | get_order(rlookup_table_size)); |
1239 | if (amd_iommu_rlookup_table == NULL) | |
1240 | goto free; | |
1241 | ||
5dc8bff0 JR |
1242 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
1243 | GFP_KERNEL | __GFP_ZERO, | |
fe74c9cf JR |
1244 | get_order(MAX_DOMAIN_ID/8)); |
1245 | if (amd_iommu_pd_alloc_bitmap == NULL) | |
1246 | goto free; | |
1247 | ||
9f5f5fb3 JR |
1248 | /* init the device table */ |
1249 | init_device_table(); | |
1250 | ||
fe74c9cf | 1251 | /* |
5dc8bff0 | 1252 | * let all alias entries point to itself |
fe74c9cf | 1253 | */ |
3a61ec38 | 1254 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
fe74c9cf JR |
1255 | amd_iommu_alias_table[i] = i; |
1256 | ||
fe74c9cf JR |
1257 | /* |
1258 | * never allocate domain 0 because its used as the non-allocated and | |
1259 | * error value placeholder | |
1260 | */ | |
1261 | amd_iommu_pd_alloc_bitmap[0] = 1; | |
1262 | ||
aeb26f55 JR |
1263 | spin_lock_init(&amd_iommu_pd_lock); |
1264 | ||
fe74c9cf JR |
1265 | /* |
1266 | * now the data structures are allocated and basically initialized | |
1267 | * start the real acpi table scan | |
1268 | */ | |
1269 | ret = -ENODEV; | |
1270 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | |
1271 | goto free; | |
1272 | ||
0f764806 JR |
1273 | if (!amd_iommu_initialized) |
1274 | goto free; | |
1275 | ||
fe74c9cf JR |
1276 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
1277 | goto free; | |
1278 | ||
129d6aba | 1279 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
8736197b JR |
1280 | if (ret) |
1281 | goto free; | |
1282 | ||
129d6aba | 1283 | ret = sysdev_register(&device_amd_iommu); |
7441e9cb JR |
1284 | if (ret) |
1285 | goto free; | |
1286 | ||
b7cc9554 JR |
1287 | ret = amd_iommu_init_devices(); |
1288 | if (ret) | |
1289 | goto free; | |
1290 | ||
4751a951 JR |
1291 | if (iommu_pass_through) |
1292 | ret = amd_iommu_init_passthrough(); | |
1293 | else | |
1294 | ret = amd_iommu_init_dma_ops(); | |
f5325094 | 1295 | |
7441e9cb JR |
1296 | if (ret) |
1297 | goto free; | |
1298 | ||
f5325094 JR |
1299 | amd_iommu_init_api(); |
1300 | ||
8638c491 JR |
1301 | amd_iommu_init_notifier(); |
1302 | ||
8736197b JR |
1303 | enable_iommus(); |
1304 | ||
4751a951 JR |
1305 | if (iommu_pass_through) |
1306 | goto out; | |
1307 | ||
afa9fdc2 | 1308 | if (amd_iommu_unmap_flush) |
4c6f40d4 | 1309 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1c655773 | 1310 | else |
4c6f40d4 | 1311 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1c655773 | 1312 | |
338bac52 | 1313 | x86_platform.iommu_shutdown = disable_iommus; |
fe74c9cf JR |
1314 | out: |
1315 | return ret; | |
1316 | ||
1317 | free: | |
b7cc9554 JR |
1318 | |
1319 | amd_iommu_uninit_devices(); | |
1320 | ||
d58befd3 JR |
1321 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
1322 | get_order(MAX_DOMAIN_ID/8)); | |
fe74c9cf | 1323 | |
9a836de0 JR |
1324 | free_pages((unsigned long)amd_iommu_rlookup_table, |
1325 | get_order(rlookup_table_size)); | |
fe74c9cf | 1326 | |
9a836de0 JR |
1327 | free_pages((unsigned long)amd_iommu_alias_table, |
1328 | get_order(alias_table_size)); | |
fe74c9cf | 1329 | |
9a836de0 JR |
1330 | free_pages((unsigned long)amd_iommu_dev_table, |
1331 | get_order(dev_table_size)); | |
fe74c9cf JR |
1332 | |
1333 | free_iommu_all(); | |
1334 | ||
1335 | free_unity_maps(); | |
1336 | ||
1337 | goto out; | |
1338 | } | |
1339 | ||
b65233a9 JR |
1340 | /**************************************************************************** |
1341 | * | |
1342 | * Early detect code. This code runs at IOMMU detection time in the DMA | |
1343 | * layer. It just looks if there is an IVRS ACPI table to detect AMD | |
1344 | * IOMMUs | |
1345 | * | |
1346 | ****************************************************************************/ | |
ae7877de JR |
1347 | static int __init early_amd_iommu_detect(struct acpi_table_header *table) |
1348 | { | |
1349 | return 0; | |
1350 | } | |
1351 | ||
1352 | void __init amd_iommu_detect(void) | |
1353 | { | |
75f1cdf1 | 1354 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
ae7877de JR |
1355 | return; |
1356 | ||
ae7877de JR |
1357 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
1358 | iommu_detected = 1; | |
c1cbebee | 1359 | amd_iommu_detected = 1; |
ea1b0d39 | 1360 | x86_init.iommu.iommu_init = amd_iommu_init; |
11bd04f6 | 1361 | |
5d990b62 CW |
1362 | /* Make sure ACS will be enabled */ |
1363 | pci_request_acs(); | |
ae7877de JR |
1364 | } |
1365 | } | |
1366 | ||
b65233a9 JR |
1367 | /**************************************************************************** |
1368 | * | |
1369 | * Parsing functions for the AMD IOMMU specific kernel command line | |
1370 | * options. | |
1371 | * | |
1372 | ****************************************************************************/ | |
1373 | ||
fefda117 JR |
1374 | static int __init parse_amd_iommu_dump(char *str) |
1375 | { | |
1376 | amd_iommu_dump = true; | |
1377 | ||
1378 | return 1; | |
1379 | } | |
1380 | ||
918ad6c5 JR |
1381 | static int __init parse_amd_iommu_options(char *str) |
1382 | { | |
1383 | for (; *str; ++str) { | |
695b5676 | 1384 | if (strncmp(str, "fullflush", 9) == 0) |
afa9fdc2 | 1385 | amd_iommu_unmap_flush = true; |
918ad6c5 JR |
1386 | } |
1387 | ||
1388 | return 1; | |
1389 | } | |
1390 | ||
fefda117 | 1391 | __setup("amd_iommu_dump", parse_amd_iommu_dump); |
918ad6c5 | 1392 | __setup("amd_iommu=", parse_amd_iommu_options); |