]>
Commit | Line | Data |
---|---|---|
f6e2e6b6 JR |
1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | |
4 | * Leo Duran <leo.duran@amd.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
20 | #include <linux/pci.h> | |
21 | #include <linux/acpi.h> | |
22 | #include <linux/gfp.h> | |
23 | #include <linux/list.h> | |
7441e9cb | 24 | #include <linux/sysdev.h> |
f6e2e6b6 JR |
25 | #include <asm/pci-direct.h> |
26 | #include <asm/amd_iommu_types.h> | |
c6da992e | 27 | #include <asm/amd_iommu.h> |
46a7fa27 | 28 | #include <asm/iommu.h> |
f6e2e6b6 JR |
29 | |
30 | /* | |
31 | * definitions for the ACPI scanning code | |
32 | */ | |
f6e2e6b6 JR |
33 | #define PCI_BUS(x) (((x) >> 8) & 0xff) |
34 | #define IVRS_HEADER_LENGTH 48 | |
f6e2e6b6 JR |
35 | |
36 | #define ACPI_IVHD_TYPE 0x10 | |
37 | #define ACPI_IVMD_TYPE_ALL 0x20 | |
38 | #define ACPI_IVMD_TYPE 0x21 | |
39 | #define ACPI_IVMD_TYPE_RANGE 0x22 | |
40 | ||
41 | #define IVHD_DEV_ALL 0x01 | |
42 | #define IVHD_DEV_SELECT 0x02 | |
43 | #define IVHD_DEV_SELECT_RANGE_START 0x03 | |
44 | #define IVHD_DEV_RANGE_END 0x04 | |
45 | #define IVHD_DEV_ALIAS 0x42 | |
46 | #define IVHD_DEV_ALIAS_RANGE 0x43 | |
47 | #define IVHD_DEV_EXT_SELECT 0x46 | |
48 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 | |
49 | ||
50 | #define IVHD_FLAG_HT_TUN_EN 0x00 | |
51 | #define IVHD_FLAG_PASSPW_EN 0x01 | |
52 | #define IVHD_FLAG_RESPASSPW_EN 0x02 | |
53 | #define IVHD_FLAG_ISOC_EN 0x03 | |
54 | ||
55 | #define IVMD_FLAG_EXCL_RANGE 0x08 | |
56 | #define IVMD_FLAG_UNITY_MAP 0x01 | |
57 | ||
58 | #define ACPI_DEVFLAG_INITPASS 0x01 | |
59 | #define ACPI_DEVFLAG_EXTINT 0x02 | |
60 | #define ACPI_DEVFLAG_NMI 0x04 | |
61 | #define ACPI_DEVFLAG_SYSMGT1 0x10 | |
62 | #define ACPI_DEVFLAG_SYSMGT2 0x20 | |
63 | #define ACPI_DEVFLAG_LINT0 0x40 | |
64 | #define ACPI_DEVFLAG_LINT1 0x80 | |
65 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 | |
66 | ||
b65233a9 JR |
67 | /* |
68 | * ACPI table definitions | |
69 | * | |
70 | * These data structures are laid over the table to parse the important values | |
71 | * out of it. | |
72 | */ | |
73 | ||
74 | /* | |
75 | * structure describing one IOMMU in the ACPI table. Typically followed by one | |
76 | * or more ivhd_entrys. | |
77 | */ | |
f6e2e6b6 JR |
78 | struct ivhd_header { |
79 | u8 type; | |
80 | u8 flags; | |
81 | u16 length; | |
82 | u16 devid; | |
83 | u16 cap_ptr; | |
84 | u64 mmio_phys; | |
85 | u16 pci_seg; | |
86 | u16 info; | |
87 | u32 reserved; | |
88 | } __attribute__((packed)); | |
89 | ||
b65233a9 JR |
90 | /* |
91 | * A device entry describing which devices a specific IOMMU translates and | |
92 | * which requestor ids they use. | |
93 | */ | |
f6e2e6b6 JR |
94 | struct ivhd_entry { |
95 | u8 type; | |
96 | u16 devid; | |
97 | u8 flags; | |
98 | u32 ext; | |
99 | } __attribute__((packed)); | |
100 | ||
b65233a9 JR |
101 | /* |
102 | * An AMD IOMMU memory definition structure. It defines things like exclusion | |
103 | * ranges for devices and regions that should be unity mapped. | |
104 | */ | |
f6e2e6b6 JR |
105 | struct ivmd_header { |
106 | u8 type; | |
107 | u8 flags; | |
108 | u16 length; | |
109 | u16 devid; | |
110 | u16 aux; | |
111 | u64 resv; | |
112 | u64 range_start; | |
113 | u64 range_length; | |
114 | } __attribute__((packed)); | |
115 | ||
c1cbebee JR |
116 | static int __initdata amd_iommu_detected; |
117 | ||
b65233a9 JR |
118 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
119 | to handle */ | |
2e22847f | 120 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
b65233a9 JR |
121 | we find in ACPI */ |
122 | unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ | |
123 | int amd_iommu_isolate; /* if 1, device isolation is enabled */ | |
928abd25 | 124 | |
2e22847f | 125 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
b65233a9 | 126 | system */ |
928abd25 | 127 | |
b65233a9 JR |
128 | /* |
129 | * Pointer to the device table which is shared by all AMD IOMMUs | |
130 | * it is indexed by the PCI device id or the HT unit id and contains | |
131 | * information about the domain the device belongs to as well as the | |
132 | * page table root pointer. | |
133 | */ | |
928abd25 | 134 | struct dev_table_entry *amd_iommu_dev_table; |
b65233a9 JR |
135 | |
136 | /* | |
137 | * The alias table is a driver specific data structure which contains the | |
138 | * mappings of the PCI device ids to the actual requestor ids on the IOMMU. | |
139 | * More than one device can share the same requestor id. | |
140 | */ | |
928abd25 | 141 | u16 *amd_iommu_alias_table; |
b65233a9 JR |
142 | |
143 | /* | |
144 | * The rlookup table is used to find the IOMMU which is responsible | |
145 | * for a specific device. It is also indexed by the PCI device id. | |
146 | */ | |
928abd25 | 147 | struct amd_iommu **amd_iommu_rlookup_table; |
b65233a9 JR |
148 | |
149 | /* | |
150 | * The pd table (protection domain table) is used to find the protection domain | |
151 | * data structure a device belongs to. Indexed with the PCI device id too. | |
152 | */ | |
928abd25 | 153 | struct protection_domain **amd_iommu_pd_table; |
b65233a9 JR |
154 | |
155 | /* | |
156 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap | |
157 | * to know which ones are already in use. | |
158 | */ | |
928abd25 JR |
159 | unsigned long *amd_iommu_pd_alloc_bitmap; |
160 | ||
b65233a9 JR |
161 | static u32 dev_table_size; /* size of the device table */ |
162 | static u32 alias_table_size; /* size of the alias table */ | |
163 | static u32 rlookup_table_size; /* size if the rlookup table */ | |
3e8064ba | 164 | |
208ec8c9 JR |
165 | static inline void update_last_devid(u16 devid) |
166 | { | |
167 | if (devid > amd_iommu_last_bdf) | |
168 | amd_iommu_last_bdf = devid; | |
169 | } | |
170 | ||
c571484e JR |
171 | static inline unsigned long tbl_size(int entry_size) |
172 | { | |
173 | unsigned shift = PAGE_SHIFT + | |
174 | get_order(amd_iommu_last_bdf * entry_size); | |
175 | ||
176 | return 1UL << shift; | |
177 | } | |
178 | ||
b65233a9 JR |
179 | /**************************************************************************** |
180 | * | |
181 | * AMD IOMMU MMIO register space handling functions | |
182 | * | |
183 | * These functions are used to program the IOMMU device registers in | |
184 | * MMIO space required for that driver. | |
185 | * | |
186 | ****************************************************************************/ | |
3e8064ba | 187 | |
b65233a9 JR |
188 | /* |
189 | * This function set the exclusion range in the IOMMU. DMA accesses to the | |
190 | * exclusion range are passed through untranslated | |
191 | */ | |
b2026aa2 JR |
192 | static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) |
193 | { | |
194 | u64 start = iommu->exclusion_start & PAGE_MASK; | |
195 | u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; | |
196 | u64 entry; | |
197 | ||
198 | if (!iommu->exclusion_start) | |
199 | return; | |
200 | ||
201 | entry = start | MMIO_EXCL_ENABLE_MASK; | |
202 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, | |
203 | &entry, sizeof(entry)); | |
204 | ||
205 | entry = limit; | |
206 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, | |
207 | &entry, sizeof(entry)); | |
208 | } | |
209 | ||
b65233a9 | 210 | /* Programs the physical address of the device table into the IOMMU hardware */ |
b2026aa2 JR |
211 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
212 | { | |
213 | u32 entry; | |
214 | ||
215 | BUG_ON(iommu->mmio_base == NULL); | |
216 | ||
217 | entry = virt_to_phys(amd_iommu_dev_table); | |
218 | entry |= (dev_table_size >> 12) - 1; | |
219 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, | |
220 | &entry, sizeof(entry)); | |
221 | } | |
222 | ||
b65233a9 | 223 | /* Generic functions to enable/disable certain features of the IOMMU. */ |
b2026aa2 JR |
224 | static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
225 | { | |
226 | u32 ctrl; | |
227 | ||
228 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
229 | ctrl |= (1 << bit); | |
230 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
231 | } | |
232 | ||
233 | static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |
234 | { | |
235 | u32 ctrl; | |
236 | ||
237 | ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
238 | ctrl &= ~(1 << bit); | |
239 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | |
240 | } | |
241 | ||
b65233a9 | 242 | /* Function to enable the hardware */ |
b2026aa2 JR |
243 | void __init iommu_enable(struct amd_iommu *iommu) |
244 | { | |
b2026aa2 JR |
245 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); |
246 | print_devid(iommu->devid, 0); | |
247 | printk(" cap 0x%hx\n", iommu->cap_ptr); | |
248 | ||
249 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | |
b2026aa2 JR |
250 | } |
251 | ||
b65233a9 JR |
252 | /* |
253 | * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in | |
254 | * the system has one. | |
255 | */ | |
6c56747b JR |
256 | static u8 * __init iommu_map_mmio_space(u64 address) |
257 | { | |
258 | u8 *ret; | |
259 | ||
260 | if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) | |
261 | return NULL; | |
262 | ||
263 | ret = ioremap_nocache(address, MMIO_REGION_LENGTH); | |
264 | if (ret != NULL) | |
265 | return ret; | |
266 | ||
267 | release_mem_region(address, MMIO_REGION_LENGTH); | |
268 | ||
269 | return NULL; | |
270 | } | |
271 | ||
272 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) | |
273 | { | |
274 | if (iommu->mmio_base) | |
275 | iounmap(iommu->mmio_base); | |
276 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); | |
277 | } | |
278 | ||
b65233a9 JR |
279 | /**************************************************************************** |
280 | * | |
281 | * The functions below belong to the first pass of AMD IOMMU ACPI table | |
282 | * parsing. In this pass we try to find out the highest device id this | |
283 | * code has to handle. Upon this information the size of the shared data | |
284 | * structures is determined later. | |
285 | * | |
286 | ****************************************************************************/ | |
287 | ||
288 | /* | |
289 | * This function reads the last device id the IOMMU has to handle from the PCI | |
290 | * capability header for this IOMMU | |
291 | */ | |
3e8064ba JR |
292 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) |
293 | { | |
294 | u32 cap; | |
295 | ||
296 | cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); | |
d591b0a3 | 297 | update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); |
3e8064ba JR |
298 | |
299 | return 0; | |
300 | } | |
301 | ||
b65233a9 JR |
302 | /* |
303 | * After reading the highest device id from the IOMMU PCI capability header | |
304 | * this function looks if there is a higher device id defined in the ACPI table | |
305 | */ | |
3e8064ba JR |
306 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
307 | { | |
308 | u8 *p = (void *)h, *end = (void *)h; | |
309 | struct ivhd_entry *dev; | |
310 | ||
311 | p += sizeof(*h); | |
312 | end += h->length; | |
313 | ||
314 | find_last_devid_on_pci(PCI_BUS(h->devid), | |
315 | PCI_SLOT(h->devid), | |
316 | PCI_FUNC(h->devid), | |
317 | h->cap_ptr); | |
318 | ||
319 | while (p < end) { | |
320 | dev = (struct ivhd_entry *)p; | |
321 | switch (dev->type) { | |
322 | case IVHD_DEV_SELECT: | |
323 | case IVHD_DEV_RANGE_END: | |
324 | case IVHD_DEV_ALIAS: | |
325 | case IVHD_DEV_EXT_SELECT: | |
b65233a9 | 326 | /* all the above subfield types refer to device ids */ |
208ec8c9 | 327 | update_last_devid(dev->devid); |
3e8064ba JR |
328 | break; |
329 | default: | |
330 | break; | |
331 | } | |
332 | p += 0x04 << (*p >> 6); | |
333 | } | |
334 | ||
335 | WARN_ON(p != end); | |
336 | ||
337 | return 0; | |
338 | } | |
339 | ||
b65233a9 JR |
340 | /* |
341 | * Iterate over all IVHD entries in the ACPI table and find the highest device | |
342 | * id which we need to handle. This is the first of three functions which parse | |
343 | * the ACPI table. So we check the checksum here. | |
344 | */ | |
3e8064ba JR |
345 | static int __init find_last_devid_acpi(struct acpi_table_header *table) |
346 | { | |
347 | int i; | |
348 | u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; | |
349 | struct ivhd_header *h; | |
350 | ||
351 | /* | |
352 | * Validate checksum here so we don't need to do it when | |
353 | * we actually parse the table | |
354 | */ | |
355 | for (i = 0; i < table->length; ++i) | |
356 | checksum += p[i]; | |
357 | if (checksum != 0) | |
358 | /* ACPI table corrupt */ | |
359 | return -ENODEV; | |
360 | ||
361 | p += IVRS_HEADER_LENGTH; | |
362 | ||
363 | end += table->length; | |
364 | while (p < end) { | |
365 | h = (struct ivhd_header *)p; | |
366 | switch (h->type) { | |
367 | case ACPI_IVHD_TYPE: | |
368 | find_last_devid_from_ivhd(h); | |
369 | break; | |
370 | default: | |
371 | break; | |
372 | } | |
373 | p += h->length; | |
374 | } | |
375 | WARN_ON(p != end); | |
376 | ||
377 | return 0; | |
378 | } | |
379 | ||
b65233a9 JR |
380 | /**************************************************************************** |
381 | * | |
382 | * The following functions belong the the code path which parses the ACPI table | |
383 | * the second time. In this ACPI parsing iteration we allocate IOMMU specific | |
384 | * data structures, initialize the device/alias/rlookup table and also | |
385 | * basically initialize the hardware. | |
386 | * | |
387 | ****************************************************************************/ | |
388 | ||
389 | /* | |
390 | * Allocates the command buffer. This buffer is per AMD IOMMU. We can | |
391 | * write commands to that buffer later and the IOMMU will execute them | |
392 | * asynchronously | |
393 | */ | |
b36ca91e JR |
394 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) |
395 | { | |
d0312b21 | 396 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
b36ca91e | 397 | get_order(CMD_BUFFER_SIZE)); |
d0312b21 | 398 | u64 entry; |
b36ca91e JR |
399 | |
400 | if (cmd_buf == NULL) | |
401 | return NULL; | |
402 | ||
403 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | |
404 | ||
b36ca91e JR |
405 | entry = (u64)virt_to_phys(cmd_buf); |
406 | entry |= MMIO_CMD_SIZE_512; | |
407 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | |
408 | &entry, sizeof(entry)); | |
409 | ||
410 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | |
411 | ||
412 | return cmd_buf; | |
413 | } | |
414 | ||
415 | static void __init free_command_buffer(struct amd_iommu *iommu) | |
416 | { | |
9a836de0 | 417 | free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); |
b36ca91e JR |
418 | } |
419 | ||
335503e5 JR |
420 | /* allocates the memory where the IOMMU will log its events to */ |
421 | static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) | |
422 | { | |
423 | u64 entry; | |
424 | iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
425 | get_order(EVT_BUFFER_SIZE)); | |
426 | ||
427 | if (iommu->evt_buf == NULL) | |
428 | return NULL; | |
429 | ||
430 | entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; | |
431 | memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, | |
432 | &entry, sizeof(entry)); | |
433 | ||
434 | iommu->evt_buf_size = EVT_BUFFER_SIZE; | |
435 | ||
436 | return iommu->evt_buf; | |
437 | } | |
438 | ||
439 | static void __init free_event_buffer(struct amd_iommu *iommu) | |
440 | { | |
441 | free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); | |
442 | } | |
443 | ||
b65233a9 | 444 | /* sets a specific bit in the device table entry. */ |
3566b778 JR |
445 | static void set_dev_entry_bit(u16 devid, u8 bit) |
446 | { | |
447 | int i = (bit >> 5) & 0x07; | |
448 | int _bit = bit & 0x1f; | |
449 | ||
450 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | |
451 | } | |
452 | ||
5ff4789d JR |
453 | /* Writes the specific IOMMU for a device into the rlookup table */ |
454 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | |
455 | { | |
456 | amd_iommu_rlookup_table[devid] = iommu; | |
457 | } | |
458 | ||
b65233a9 JR |
459 | /* |
460 | * This function takes the device specific flags read from the ACPI | |
461 | * table and sets up the device table entry with that information | |
462 | */ | |
5ff4789d JR |
463 | static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, |
464 | u16 devid, u32 flags, u32 ext_flags) | |
3566b778 JR |
465 | { |
466 | if (flags & ACPI_DEVFLAG_INITPASS) | |
467 | set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); | |
468 | if (flags & ACPI_DEVFLAG_EXTINT) | |
469 | set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); | |
470 | if (flags & ACPI_DEVFLAG_NMI) | |
471 | set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); | |
472 | if (flags & ACPI_DEVFLAG_SYSMGT1) | |
473 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); | |
474 | if (flags & ACPI_DEVFLAG_SYSMGT2) | |
475 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); | |
476 | if (flags & ACPI_DEVFLAG_LINT0) | |
477 | set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); | |
478 | if (flags & ACPI_DEVFLAG_LINT1) | |
479 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | |
3566b778 | 480 | |
5ff4789d | 481 | set_iommu_for_device(iommu, devid); |
3566b778 JR |
482 | } |
483 | ||
b65233a9 JR |
484 | /* |
485 | * Reads the device exclusion range from ACPI and initialize IOMMU with | |
486 | * it | |
487 | */ | |
3566b778 JR |
488 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) |
489 | { | |
490 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | |
491 | ||
492 | if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) | |
493 | return; | |
494 | ||
495 | if (iommu) { | |
b65233a9 JR |
496 | /* |
497 | * We only can configure exclusion ranges per IOMMU, not | |
498 | * per device. But we can enable the exclusion range per | |
499 | * device. This is done here | |
500 | */ | |
3566b778 JR |
501 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); |
502 | iommu->exclusion_start = m->range_start; | |
503 | iommu->exclusion_length = m->range_length; | |
504 | } | |
505 | } | |
506 | ||
b65233a9 JR |
507 | /* |
508 | * This function reads some important data from the IOMMU PCI space and | |
509 | * initializes the driver data structure with it. It reads the hardware | |
510 | * capabilities and the first/last device entries | |
511 | */ | |
5d0c8e49 JR |
512 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) |
513 | { | |
514 | int bus = PCI_BUS(iommu->devid); | |
515 | int dev = PCI_SLOT(iommu->devid); | |
516 | int fn = PCI_FUNC(iommu->devid); | |
517 | int cap_ptr = iommu->cap_ptr; | |
518 | u32 range; | |
519 | ||
520 | iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET); | |
521 | ||
522 | range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); | |
d591b0a3 JR |
523 | iommu->first_device = calc_devid(MMIO_GET_BUS(range), |
524 | MMIO_GET_FD(range)); | |
525 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), | |
526 | MMIO_GET_LD(range)); | |
5d0c8e49 JR |
527 | } |
528 | ||
b65233a9 JR |
529 | /* |
530 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and | |
531 | * initializes the hardware and our data structures with it. | |
532 | */ | |
5d0c8e49 JR |
533 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, |
534 | struct ivhd_header *h) | |
535 | { | |
536 | u8 *p = (u8 *)h; | |
537 | u8 *end = p, flags = 0; | |
538 | u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; | |
539 | u32 ext_flags = 0; | |
58a3bee5 | 540 | bool alias = false; |
5d0c8e49 JR |
541 | struct ivhd_entry *e; |
542 | ||
543 | /* | |
544 | * First set the recommended feature enable bits from ACPI | |
545 | * into the IOMMU control registers | |
546 | */ | |
547 | h->flags & IVHD_FLAG_HT_TUN_EN ? | |
548 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | |
549 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | |
550 | ||
551 | h->flags & IVHD_FLAG_PASSPW_EN ? | |
552 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | |
553 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | |
554 | ||
555 | h->flags & IVHD_FLAG_RESPASSPW_EN ? | |
556 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | |
557 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | |
558 | ||
559 | h->flags & IVHD_FLAG_ISOC_EN ? | |
560 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | |
561 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | |
562 | ||
563 | /* | |
564 | * make IOMMU memory accesses cache coherent | |
565 | */ | |
566 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | |
567 | ||
568 | /* | |
569 | * Done. Now parse the device entries | |
570 | */ | |
571 | p += sizeof(struct ivhd_header); | |
572 | end += h->length; | |
573 | ||
574 | while (p < end) { | |
575 | e = (struct ivhd_entry *)p; | |
576 | switch (e->type) { | |
577 | case IVHD_DEV_ALL: | |
578 | for (dev_i = iommu->first_device; | |
579 | dev_i <= iommu->last_device; ++dev_i) | |
5ff4789d JR |
580 | set_dev_entry_from_acpi(iommu, dev_i, |
581 | e->flags, 0); | |
5d0c8e49 JR |
582 | break; |
583 | case IVHD_DEV_SELECT: | |
584 | devid = e->devid; | |
5ff4789d | 585 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); |
5d0c8e49 JR |
586 | break; |
587 | case IVHD_DEV_SELECT_RANGE_START: | |
588 | devid_start = e->devid; | |
589 | flags = e->flags; | |
590 | ext_flags = 0; | |
58a3bee5 | 591 | alias = false; |
5d0c8e49 JR |
592 | break; |
593 | case IVHD_DEV_ALIAS: | |
594 | devid = e->devid; | |
595 | devid_to = e->ext >> 8; | |
5ff4789d | 596 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); |
5d0c8e49 JR |
597 | amd_iommu_alias_table[devid] = devid_to; |
598 | break; | |
599 | case IVHD_DEV_ALIAS_RANGE: | |
600 | devid_start = e->devid; | |
601 | flags = e->flags; | |
602 | devid_to = e->ext >> 8; | |
603 | ext_flags = 0; | |
58a3bee5 | 604 | alias = true; |
5d0c8e49 JR |
605 | break; |
606 | case IVHD_DEV_EXT_SELECT: | |
607 | devid = e->devid; | |
5ff4789d JR |
608 | set_dev_entry_from_acpi(iommu, devid, e->flags, |
609 | e->ext); | |
5d0c8e49 JR |
610 | break; |
611 | case IVHD_DEV_EXT_SELECT_RANGE: | |
612 | devid_start = e->devid; | |
613 | flags = e->flags; | |
614 | ext_flags = e->ext; | |
58a3bee5 | 615 | alias = false; |
5d0c8e49 JR |
616 | break; |
617 | case IVHD_DEV_RANGE_END: | |
618 | devid = e->devid; | |
619 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { | |
620 | if (alias) | |
621 | amd_iommu_alias_table[dev_i] = devid_to; | |
5ff4789d | 622 | set_dev_entry_from_acpi(iommu, |
5d0c8e49 JR |
623 | amd_iommu_alias_table[dev_i], |
624 | flags, ext_flags); | |
625 | } | |
626 | break; | |
627 | default: | |
628 | break; | |
629 | } | |
630 | ||
631 | p += 0x04 << (e->type >> 6); | |
632 | } | |
633 | } | |
634 | ||
b65233a9 | 635 | /* Initializes the device->iommu mapping for the driver */ |
5d0c8e49 JR |
636 | static int __init init_iommu_devices(struct amd_iommu *iommu) |
637 | { | |
638 | u16 i; | |
639 | ||
640 | for (i = iommu->first_device; i <= iommu->last_device; ++i) | |
641 | set_iommu_for_device(iommu, i); | |
642 | ||
643 | return 0; | |
644 | } | |
645 | ||
e47d402d JR |
646 | static void __init free_iommu_one(struct amd_iommu *iommu) |
647 | { | |
648 | free_command_buffer(iommu); | |
335503e5 | 649 | free_event_buffer(iommu); |
e47d402d JR |
650 | iommu_unmap_mmio_space(iommu); |
651 | } | |
652 | ||
653 | static void __init free_iommu_all(void) | |
654 | { | |
655 | struct amd_iommu *iommu, *next; | |
656 | ||
657 | list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) { | |
658 | list_del(&iommu->list); | |
659 | free_iommu_one(iommu); | |
660 | kfree(iommu); | |
661 | } | |
662 | } | |
663 | ||
b65233a9 JR |
664 | /* |
665 | * This function clues the initialization function for one IOMMU | |
666 | * together and also allocates the command buffer and programs the | |
667 | * hardware. It does NOT enable the IOMMU. This is done afterwards. | |
668 | */ | |
e47d402d JR |
669 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
670 | { | |
671 | spin_lock_init(&iommu->lock); | |
672 | list_add_tail(&iommu->list, &amd_iommu_list); | |
673 | ||
674 | /* | |
675 | * Copy data from ACPI table entry to the iommu struct | |
676 | */ | |
677 | iommu->devid = h->devid; | |
678 | iommu->cap_ptr = h->cap_ptr; | |
679 | iommu->mmio_phys = h->mmio_phys; | |
680 | iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); | |
681 | if (!iommu->mmio_base) | |
682 | return -ENOMEM; | |
683 | ||
684 | iommu_set_device_table(iommu); | |
685 | iommu->cmd_buf = alloc_command_buffer(iommu); | |
686 | if (!iommu->cmd_buf) | |
687 | return -ENOMEM; | |
688 | ||
335503e5 JR |
689 | iommu->evt_buf = alloc_event_buffer(iommu); |
690 | if (!iommu->evt_buf) | |
691 | return -ENOMEM; | |
692 | ||
e47d402d JR |
693 | init_iommu_from_pci(iommu); |
694 | init_iommu_from_acpi(iommu, h); | |
695 | init_iommu_devices(iommu); | |
696 | ||
697 | return 0; | |
698 | } | |
699 | ||
b65233a9 JR |
700 | /* |
701 | * Iterates over all IOMMU entries in the ACPI table, allocates the | |
702 | * IOMMU structure and initializes it with init_iommu_one() | |
703 | */ | |
e47d402d JR |
704 | static int __init init_iommu_all(struct acpi_table_header *table) |
705 | { | |
706 | u8 *p = (u8 *)table, *end = (u8 *)table; | |
707 | struct ivhd_header *h; | |
708 | struct amd_iommu *iommu; | |
709 | int ret; | |
710 | ||
e47d402d JR |
711 | end += table->length; |
712 | p += IVRS_HEADER_LENGTH; | |
713 | ||
714 | while (p < end) { | |
715 | h = (struct ivhd_header *)p; | |
716 | switch (*p) { | |
717 | case ACPI_IVHD_TYPE: | |
718 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); | |
719 | if (iommu == NULL) | |
720 | return -ENOMEM; | |
721 | ret = init_iommu_one(iommu, h); | |
722 | if (ret) | |
723 | return ret; | |
724 | break; | |
725 | default: | |
726 | break; | |
727 | } | |
728 | p += h->length; | |
729 | ||
730 | } | |
731 | WARN_ON(p != end); | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
b65233a9 JR |
736 | /**************************************************************************** |
737 | * | |
738 | * The next functions belong to the third pass of parsing the ACPI | |
739 | * table. In this last pass the memory mapping requirements are | |
740 | * gathered (like exclusion and unity mapping reanges). | |
741 | * | |
742 | ****************************************************************************/ | |
743 | ||
be2a022c JR |
744 | static void __init free_unity_maps(void) |
745 | { | |
746 | struct unity_map_entry *entry, *next; | |
747 | ||
748 | list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { | |
749 | list_del(&entry->list); | |
750 | kfree(entry); | |
751 | } | |
752 | } | |
753 | ||
b65233a9 | 754 | /* called when we find an exclusion range definition in ACPI */ |
be2a022c JR |
755 | static int __init init_exclusion_range(struct ivmd_header *m) |
756 | { | |
757 | int i; | |
758 | ||
759 | switch (m->type) { | |
760 | case ACPI_IVMD_TYPE: | |
761 | set_device_exclusion_range(m->devid, m); | |
762 | break; | |
763 | case ACPI_IVMD_TYPE_ALL: | |
3a61ec38 | 764 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
be2a022c JR |
765 | set_device_exclusion_range(i, m); |
766 | break; | |
767 | case ACPI_IVMD_TYPE_RANGE: | |
768 | for (i = m->devid; i <= m->aux; ++i) | |
769 | set_device_exclusion_range(i, m); | |
770 | break; | |
771 | default: | |
772 | break; | |
773 | } | |
774 | ||
775 | return 0; | |
776 | } | |
777 | ||
b65233a9 | 778 | /* called for unity map ACPI definition */ |
be2a022c JR |
779 | static int __init init_unity_map_range(struct ivmd_header *m) |
780 | { | |
781 | struct unity_map_entry *e = 0; | |
782 | ||
783 | e = kzalloc(sizeof(*e), GFP_KERNEL); | |
784 | if (e == NULL) | |
785 | return -ENOMEM; | |
786 | ||
787 | switch (m->type) { | |
788 | default: | |
789 | case ACPI_IVMD_TYPE: | |
790 | e->devid_start = e->devid_end = m->devid; | |
791 | break; | |
792 | case ACPI_IVMD_TYPE_ALL: | |
793 | e->devid_start = 0; | |
794 | e->devid_end = amd_iommu_last_bdf; | |
795 | break; | |
796 | case ACPI_IVMD_TYPE_RANGE: | |
797 | e->devid_start = m->devid; | |
798 | e->devid_end = m->aux; | |
799 | break; | |
800 | } | |
801 | e->address_start = PAGE_ALIGN(m->range_start); | |
802 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); | |
803 | e->prot = m->flags >> 1; | |
804 | ||
805 | list_add_tail(&e->list, &amd_iommu_unity_map); | |
806 | ||
807 | return 0; | |
808 | } | |
809 | ||
b65233a9 | 810 | /* iterates over all memory definitions we find in the ACPI table */ |
be2a022c JR |
811 | static int __init init_memory_definitions(struct acpi_table_header *table) |
812 | { | |
813 | u8 *p = (u8 *)table, *end = (u8 *)table; | |
814 | struct ivmd_header *m; | |
815 | ||
be2a022c JR |
816 | end += table->length; |
817 | p += IVRS_HEADER_LENGTH; | |
818 | ||
819 | while (p < end) { | |
820 | m = (struct ivmd_header *)p; | |
821 | if (m->flags & IVMD_FLAG_EXCL_RANGE) | |
822 | init_exclusion_range(m); | |
823 | else if (m->flags & IVMD_FLAG_UNITY_MAP) | |
824 | init_unity_map_range(m); | |
825 | ||
826 | p += m->length; | |
827 | } | |
828 | ||
829 | return 0; | |
830 | } | |
831 | ||
9f5f5fb3 JR |
832 | /* |
833 | * Init the device table to not allow DMA access for devices and | |
834 | * suppress all page faults | |
835 | */ | |
836 | static void init_device_table(void) | |
837 | { | |
838 | u16 devid; | |
839 | ||
840 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | |
841 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); | |
842 | set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); | |
843 | set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT); | |
844 | } | |
845 | } | |
846 | ||
b65233a9 JR |
847 | /* |
848 | * This function finally enables all IOMMUs found in the system after | |
849 | * they have been initialized | |
850 | */ | |
8736197b JR |
851 | static void __init enable_iommus(void) |
852 | { | |
853 | struct amd_iommu *iommu; | |
854 | ||
855 | list_for_each_entry(iommu, &amd_iommu_list, list) { | |
856 | iommu_set_exclusion_range(iommu); | |
857 | iommu_enable(iommu); | |
858 | } | |
859 | } | |
860 | ||
7441e9cb JR |
861 | /* |
862 | * Suspend/Resume support | |
863 | * disable suspend until real resume implemented | |
864 | */ | |
865 | ||
866 | static int amd_iommu_resume(struct sys_device *dev) | |
867 | { | |
868 | return 0; | |
869 | } | |
870 | ||
871 | static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) | |
872 | { | |
873 | return -EINVAL; | |
874 | } | |
875 | ||
876 | static struct sysdev_class amd_iommu_sysdev_class = { | |
877 | .name = "amd_iommu", | |
878 | .suspend = amd_iommu_suspend, | |
879 | .resume = amd_iommu_resume, | |
880 | }; | |
881 | ||
882 | static struct sys_device device_amd_iommu = { | |
883 | .id = 0, | |
884 | .cls = &amd_iommu_sysdev_class, | |
885 | }; | |
886 | ||
b65233a9 JR |
887 | /* |
888 | * This is the core init function for AMD IOMMU hardware in the system. | |
889 | * This function is called from the generic x86 DMA layer initialization | |
890 | * code. | |
891 | * | |
892 | * This function basically parses the ACPI table for AMD IOMMU (IVRS) | |
893 | * three times: | |
894 | * | |
895 | * 1 pass) Find the highest PCI device id the driver has to handle. | |
896 | * Upon this information the size of the data structures is | |
897 | * determined that needs to be allocated. | |
898 | * | |
899 | * 2 pass) Initialize the data structures just allocated with the | |
900 | * information in the ACPI table about available AMD IOMMUs | |
901 | * in the system. It also maps the PCI devices in the | |
902 | * system to specific IOMMUs | |
903 | * | |
904 | * 3 pass) After the basic data structures are allocated and | |
905 | * initialized we update them with information about memory | |
906 | * remapping requirements parsed out of the ACPI table in | |
907 | * this last pass. | |
908 | * | |
909 | * After that the hardware is initialized and ready to go. In the last | |
910 | * step we do some Linux specific things like registering the driver in | |
911 | * the dma_ops interface and initializing the suspend/resume support | |
912 | * functions. Finally it prints some information about AMD IOMMUs and | |
913 | * the driver state and enables the hardware. | |
914 | */ | |
fe74c9cf JR |
915 | int __init amd_iommu_init(void) |
916 | { | |
917 | int i, ret = 0; | |
918 | ||
919 | ||
8b14518f | 920 | if (no_iommu) { |
fe74c9cf JR |
921 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); |
922 | return 0; | |
923 | } | |
924 | ||
c1cbebee JR |
925 | if (!amd_iommu_detected) |
926 | return -ENODEV; | |
927 | ||
fe74c9cf JR |
928 | /* |
929 | * First parse ACPI tables to find the largest Bus/Dev/Func | |
930 | * we need to handle. Upon this information the shared data | |
931 | * structures for the IOMMUs in the system will be allocated | |
932 | */ | |
933 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | |
934 | return -ENODEV; | |
935 | ||
c571484e JR |
936 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
937 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | |
938 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | |
fe74c9cf JR |
939 | |
940 | ret = -ENOMEM; | |
941 | ||
942 | /* Device table - directly used by all IOMMUs */ | |
5dc8bff0 | 943 | amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
fe74c9cf JR |
944 | get_order(dev_table_size)); |
945 | if (amd_iommu_dev_table == NULL) | |
946 | goto out; | |
947 | ||
948 | /* | |
949 | * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the | |
950 | * IOMMU see for that device | |
951 | */ | |
952 | amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, | |
953 | get_order(alias_table_size)); | |
954 | if (amd_iommu_alias_table == NULL) | |
955 | goto free; | |
956 | ||
957 | /* IOMMU rlookup table - find the IOMMU for a specific device */ | |
958 | amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, | |
959 | get_order(rlookup_table_size)); | |
960 | if (amd_iommu_rlookup_table == NULL) | |
961 | goto free; | |
962 | ||
963 | /* | |
964 | * Protection Domain table - maps devices to protection domains | |
965 | * This table has the same size as the rlookup_table | |
966 | */ | |
5dc8bff0 | 967 | amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
fe74c9cf JR |
968 | get_order(rlookup_table_size)); |
969 | if (amd_iommu_pd_table == NULL) | |
970 | goto free; | |
971 | ||
5dc8bff0 JR |
972 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
973 | GFP_KERNEL | __GFP_ZERO, | |
fe74c9cf JR |
974 | get_order(MAX_DOMAIN_ID/8)); |
975 | if (amd_iommu_pd_alloc_bitmap == NULL) | |
976 | goto free; | |
977 | ||
9f5f5fb3 JR |
978 | /* init the device table */ |
979 | init_device_table(); | |
980 | ||
fe74c9cf | 981 | /* |
5dc8bff0 | 982 | * let all alias entries point to itself |
fe74c9cf | 983 | */ |
3a61ec38 | 984 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
fe74c9cf JR |
985 | amd_iommu_alias_table[i] = i; |
986 | ||
fe74c9cf JR |
987 | /* |
988 | * never allocate domain 0 because its used as the non-allocated and | |
989 | * error value placeholder | |
990 | */ | |
991 | amd_iommu_pd_alloc_bitmap[0] = 1; | |
992 | ||
993 | /* | |
994 | * now the data structures are allocated and basically initialized | |
995 | * start the real acpi table scan | |
996 | */ | |
997 | ret = -ENODEV; | |
998 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | |
999 | goto free; | |
1000 | ||
1001 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | |
1002 | goto free; | |
1003 | ||
129d6aba | 1004 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
8736197b JR |
1005 | if (ret) |
1006 | goto free; | |
1007 | ||
129d6aba | 1008 | ret = sysdev_register(&device_amd_iommu); |
7441e9cb JR |
1009 | if (ret) |
1010 | goto free; | |
1011 | ||
129d6aba | 1012 | ret = amd_iommu_init_dma_ops(); |
7441e9cb JR |
1013 | if (ret) |
1014 | goto free; | |
1015 | ||
8736197b JR |
1016 | enable_iommus(); |
1017 | ||
fe74c9cf JR |
1018 | printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n", |
1019 | (1 << (amd_iommu_aperture_order-20))); | |
1020 | ||
1021 | printk(KERN_INFO "AMD IOMMU: device isolation "); | |
1022 | if (amd_iommu_isolate) | |
1023 | printk("enabled\n"); | |
1024 | else | |
1025 | printk("disabled\n"); | |
1026 | ||
1c655773 JR |
1027 | if (iommu_fullflush) |
1028 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); | |
1029 | else | |
1030 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); | |
1031 | ||
fe74c9cf JR |
1032 | out: |
1033 | return ret; | |
1034 | ||
1035 | free: | |
9a836de0 | 1036 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1); |
fe74c9cf | 1037 | |
9a836de0 JR |
1038 | free_pages((unsigned long)amd_iommu_pd_table, |
1039 | get_order(rlookup_table_size)); | |
fe74c9cf | 1040 | |
9a836de0 JR |
1041 | free_pages((unsigned long)amd_iommu_rlookup_table, |
1042 | get_order(rlookup_table_size)); | |
fe74c9cf | 1043 | |
9a836de0 JR |
1044 | free_pages((unsigned long)amd_iommu_alias_table, |
1045 | get_order(alias_table_size)); | |
fe74c9cf | 1046 | |
9a836de0 JR |
1047 | free_pages((unsigned long)amd_iommu_dev_table, |
1048 | get_order(dev_table_size)); | |
fe74c9cf JR |
1049 | |
1050 | free_iommu_all(); | |
1051 | ||
1052 | free_unity_maps(); | |
1053 | ||
1054 | goto out; | |
1055 | } | |
1056 | ||
b65233a9 JR |
1057 | /**************************************************************************** |
1058 | * | |
1059 | * Early detect code. This code runs at IOMMU detection time in the DMA | |
1060 | * layer. It just looks if there is an IVRS ACPI table to detect AMD | |
1061 | * IOMMUs | |
1062 | * | |
1063 | ****************************************************************************/ | |
ae7877de JR |
1064 | static int __init early_amd_iommu_detect(struct acpi_table_header *table) |
1065 | { | |
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | void __init amd_iommu_detect(void) | |
1070 | { | |
299a140d | 1071 | if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture)) |
ae7877de JR |
1072 | return; |
1073 | ||
ae7877de JR |
1074 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
1075 | iommu_detected = 1; | |
c1cbebee | 1076 | amd_iommu_detected = 1; |
92af4e29 | 1077 | #ifdef CONFIG_GART_IOMMU |
ae7877de JR |
1078 | gart_iommu_aperture_disabled = 1; |
1079 | gart_iommu_aperture = 0; | |
92af4e29 | 1080 | #endif |
ae7877de JR |
1081 | } |
1082 | } | |
1083 | ||
b65233a9 JR |
1084 | /**************************************************************************** |
1085 | * | |
1086 | * Parsing functions for the AMD IOMMU specific kernel command line | |
1087 | * options. | |
1088 | * | |
1089 | ****************************************************************************/ | |
1090 | ||
918ad6c5 JR |
1091 | static int __init parse_amd_iommu_options(char *str) |
1092 | { | |
1093 | for (; *str; ++str) { | |
1c655773 | 1094 | if (strncmp(str, "isolate", 7) == 0) |
918ad6c5 JR |
1095 | amd_iommu_isolate = 1; |
1096 | } | |
1097 | ||
1098 | return 1; | |
1099 | } | |
1100 | ||
1101 | static int __init parse_amd_iommu_size_options(char *str) | |
1102 | { | |
0906372e JR |
1103 | unsigned order = PAGE_SHIFT + get_order(memparse(str, &str)); |
1104 | ||
1105 | if ((order > 24) && (order < 31)) | |
1106 | amd_iommu_aperture_order = order; | |
918ad6c5 JR |
1107 | |
1108 | return 1; | |
1109 | } | |
1110 | ||
1111 | __setup("amd_iommu=", parse_amd_iommu_options); | |
1112 | __setup("amd_iommu_size=", parse_amd_iommu_size_options); |