2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/gfp.h>
23 #include <linux/list.h>
24 #include <linux/sysdev.h>
25 #include <asm/pci-direct.h>
26 #include <asm/amd_iommu_types.h>
27 #include <asm/amd_iommu.h>
31 * definitions for the ACPI scanning code
33 #define UPDATE_LAST_BDF(x) do {\
34 if ((x) > amd_iommu_last_bdf) \
35 amd_iommu_last_bdf = (x); \
38 #define DEVID(bus, devfn) (((bus) << 8) | (devfn))
39 #define PCI_BUS(x) (((x) >> 8) & 0xff)
40 #define IVRS_HEADER_LENGTH 48
41 #define TBL_SIZE(x) (1 << (PAGE_SHIFT + get_order(amd_iommu_last_bdf * (x))))
43 #define ACPI_IVHD_TYPE 0x10
44 #define ACPI_IVMD_TYPE_ALL 0x20
45 #define ACPI_IVMD_TYPE 0x21
46 #define ACPI_IVMD_TYPE_RANGE 0x22
48 #define IVHD_DEV_ALL 0x01
49 #define IVHD_DEV_SELECT 0x02
50 #define IVHD_DEV_SELECT_RANGE_START 0x03
51 #define IVHD_DEV_RANGE_END 0x04
52 #define IVHD_DEV_ALIAS 0x42
53 #define IVHD_DEV_ALIAS_RANGE 0x43
54 #define IVHD_DEV_EXT_SELECT 0x46
55 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
57 #define IVHD_FLAG_HT_TUN_EN 0x00
58 #define IVHD_FLAG_PASSPW_EN 0x01
59 #define IVHD_FLAG_RESPASSPW_EN 0x02
60 #define IVHD_FLAG_ISOC_EN 0x03
62 #define IVMD_FLAG_EXCL_RANGE 0x08
63 #define IVMD_FLAG_UNITY_MAP 0x01
65 #define ACPI_DEVFLAG_INITPASS 0x01
66 #define ACPI_DEVFLAG_EXTINT 0x02
67 #define ACPI_DEVFLAG_NMI 0x04
68 #define ACPI_DEVFLAG_SYSMGT1 0x10
69 #define ACPI_DEVFLAG_SYSMGT2 0x20
70 #define ACPI_DEVFLAG_LINT0 0x40
71 #define ACPI_DEVFLAG_LINT1 0x80
72 #define ACPI_DEVFLAG_ATSDIS 0x10000000
84 } __attribute__((packed
));
91 } __attribute__((packed
));
102 } __attribute__((packed
));
104 static int __initdata amd_iommu_detected
;
106 u16 amd_iommu_last_bdf
;
107 struct list_head amd_iommu_unity_map
;
108 unsigned amd_iommu_aperture_order
= 26;
109 int amd_iommu_isolate
;
111 struct list_head amd_iommu_list
;
112 struct dev_table_entry
*amd_iommu_dev_table
;
113 u16
*amd_iommu_alias_table
;
114 struct amd_iommu
**amd_iommu_rlookup_table
;
115 struct protection_domain
**amd_iommu_pd_table
;
116 unsigned long *amd_iommu_pd_alloc_bitmap
;
118 static u32 dev_table_size
;
119 static u32 alias_table_size
;
120 static u32 rlookup_table_size
;
122 static void __init
iommu_set_exclusion_range(struct amd_iommu
*iommu
)
124 u64 start
= iommu
->exclusion_start
& PAGE_MASK
;
125 u64 limit
= (start
+ iommu
->exclusion_length
) & PAGE_MASK
;
128 if (!iommu
->exclusion_start
)
131 entry
= start
| MMIO_EXCL_ENABLE_MASK
;
132 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_BASE_OFFSET
,
133 &entry
, sizeof(entry
));
136 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_LIMIT_OFFSET
,
137 &entry
, sizeof(entry
));
140 static void __init
iommu_set_device_table(struct amd_iommu
*iommu
)
144 BUG_ON(iommu
->mmio_base
== NULL
);
146 entry
= virt_to_phys(amd_iommu_dev_table
);
147 entry
|= (dev_table_size
>> 12) - 1;
148 memcpy_toio(iommu
->mmio_base
+ MMIO_DEV_TABLE_OFFSET
,
149 &entry
, sizeof(entry
));
152 static void __init
iommu_feature_enable(struct amd_iommu
*iommu
, u8 bit
)
156 ctrl
= readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
158 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
161 static void __init
iommu_feature_disable(struct amd_iommu
*iommu
, u8 bit
)
165 ctrl
= (u64
)readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
167 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
170 void __init
iommu_enable(struct amd_iommu
*iommu
)
172 printk(KERN_INFO
"AMD IOMMU: Enabling IOMMU at ");
173 print_devid(iommu
->devid
, 0);
174 printk(" cap 0x%hx\n", iommu
->cap_ptr
);
176 iommu_feature_enable(iommu
, CONTROL_IOMMU_EN
);
179 static u8
* __init
iommu_map_mmio_space(u64 address
)
183 if (!request_mem_region(address
, MMIO_REGION_LENGTH
, "amd_iommu"))
186 ret
= ioremap_nocache(address
, MMIO_REGION_LENGTH
);
190 release_mem_region(address
, MMIO_REGION_LENGTH
);
195 static void __init
iommu_unmap_mmio_space(struct amd_iommu
*iommu
)
197 if (iommu
->mmio_base
)
198 iounmap(iommu
->mmio_base
);
199 release_mem_region(iommu
->mmio_phys
, MMIO_REGION_LENGTH
);
202 static int __init
find_last_devid_on_pci(int bus
, int dev
, int fn
, int cap_ptr
)
206 cap
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_RANGE_OFFSET
);
207 UPDATE_LAST_BDF(DEVID(MMIO_GET_BUS(cap
), MMIO_GET_LD(cap
)));
212 static int __init
find_last_devid_from_ivhd(struct ivhd_header
*h
)
214 u8
*p
= (void *)h
, *end
= (void *)h
;
215 struct ivhd_entry
*dev
;
220 find_last_devid_on_pci(PCI_BUS(h
->devid
),
226 dev
= (struct ivhd_entry
*)p
;
228 case IVHD_DEV_SELECT
:
229 case IVHD_DEV_RANGE_END
:
231 case IVHD_DEV_EXT_SELECT
:
232 UPDATE_LAST_BDF(dev
->devid
);
237 p
+= 0x04 << (*p
>> 6);
245 static int __init
find_last_devid_acpi(struct acpi_table_header
*table
)
248 u8 checksum
= 0, *p
= (u8
*)table
, *end
= (u8
*)table
;
249 struct ivhd_header
*h
;
252 * Validate checksum here so we don't need to do it when
253 * we actually parse the table
255 for (i
= 0; i
< table
->length
; ++i
)
258 /* ACPI table corrupt */
261 p
+= IVRS_HEADER_LENGTH
;
263 end
+= table
->length
;
265 h
= (struct ivhd_header
*)p
;
268 find_last_devid_from_ivhd(h
);
280 static u8
* __init
alloc_command_buffer(struct amd_iommu
*iommu
)
282 u8
*cmd_buf
= (u8
*)__get_free_pages(GFP_KERNEL
,
283 get_order(CMD_BUFFER_SIZE
));
289 iommu
->cmd_buf_size
= CMD_BUFFER_SIZE
;
291 memset(cmd_buf
, 0, CMD_BUFFER_SIZE
);
293 entry
= (u64
)virt_to_phys(cmd_buf
);
294 entry
|= MMIO_CMD_SIZE_512
;
295 memcpy_toio(iommu
->mmio_base
+ MMIO_CMD_BUF_OFFSET
,
296 &entry
, sizeof(entry
));
298 iommu_feature_enable(iommu
, CONTROL_CMDBUF_EN
);
303 static void __init
free_command_buffer(struct amd_iommu
*iommu
)
306 free_pages((unsigned long)iommu
->cmd_buf
,
307 get_order(CMD_BUFFER_SIZE
));
310 static void set_dev_entry_bit(u16 devid
, u8 bit
)
312 int i
= (bit
>> 5) & 0x07;
313 int _bit
= bit
& 0x1f;
315 amd_iommu_dev_table
[devid
].data
[i
] |= (1 << _bit
);
318 static void __init
set_dev_entry_from_acpi(u16 devid
, u32 flags
, u32 ext_flags
)
320 if (flags
& ACPI_DEVFLAG_INITPASS
)
321 set_dev_entry_bit(devid
, DEV_ENTRY_INIT_PASS
);
322 if (flags
& ACPI_DEVFLAG_EXTINT
)
323 set_dev_entry_bit(devid
, DEV_ENTRY_EINT_PASS
);
324 if (flags
& ACPI_DEVFLAG_NMI
)
325 set_dev_entry_bit(devid
, DEV_ENTRY_NMI_PASS
);
326 if (flags
& ACPI_DEVFLAG_SYSMGT1
)
327 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT1
);
328 if (flags
& ACPI_DEVFLAG_SYSMGT2
)
329 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT2
);
330 if (flags
& ACPI_DEVFLAG_LINT0
)
331 set_dev_entry_bit(devid
, DEV_ENTRY_LINT0_PASS
);
332 if (flags
& ACPI_DEVFLAG_LINT1
)
333 set_dev_entry_bit(devid
, DEV_ENTRY_LINT1_PASS
);
336 static void __init
set_iommu_for_device(struct amd_iommu
*iommu
, u16 devid
)
338 amd_iommu_rlookup_table
[devid
] = iommu
;
341 static void __init
set_device_exclusion_range(u16 devid
, struct ivmd_header
*m
)
343 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
345 if (!(m
->flags
& IVMD_FLAG_EXCL_RANGE
))
349 set_dev_entry_bit(m
->devid
, DEV_ENTRY_EX
);
350 iommu
->exclusion_start
= m
->range_start
;
351 iommu
->exclusion_length
= m
->range_length
;
355 static void __init
init_iommu_from_pci(struct amd_iommu
*iommu
)
357 int bus
= PCI_BUS(iommu
->devid
);
358 int dev
= PCI_SLOT(iommu
->devid
);
359 int fn
= PCI_FUNC(iommu
->devid
);
360 int cap_ptr
= iommu
->cap_ptr
;
363 iommu
->cap
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_CAP_HDR_OFFSET
);
365 range
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_RANGE_OFFSET
);
366 iommu
->first_device
= DEVID(MMIO_GET_BUS(range
), MMIO_GET_FD(range
));
367 iommu
->last_device
= DEVID(MMIO_GET_BUS(range
), MMIO_GET_LD(range
));
370 static void __init
init_iommu_from_acpi(struct amd_iommu
*iommu
,
371 struct ivhd_header
*h
)
374 u8
*end
= p
, flags
= 0;
375 u16 dev_i
, devid
= 0, devid_start
= 0, devid_to
= 0;
378 struct ivhd_entry
*e
;
381 * First set the recommended feature enable bits from ACPI
382 * into the IOMMU control registers
384 h
->flags
& IVHD_FLAG_HT_TUN_EN
?
385 iommu_feature_enable(iommu
, CONTROL_HT_TUN_EN
) :
386 iommu_feature_disable(iommu
, CONTROL_HT_TUN_EN
);
388 h
->flags
& IVHD_FLAG_PASSPW_EN
?
389 iommu_feature_enable(iommu
, CONTROL_PASSPW_EN
) :
390 iommu_feature_disable(iommu
, CONTROL_PASSPW_EN
);
392 h
->flags
& IVHD_FLAG_RESPASSPW_EN
?
393 iommu_feature_enable(iommu
, CONTROL_RESPASSPW_EN
) :
394 iommu_feature_disable(iommu
, CONTROL_RESPASSPW_EN
);
396 h
->flags
& IVHD_FLAG_ISOC_EN
?
397 iommu_feature_enable(iommu
, CONTROL_ISOC_EN
) :
398 iommu_feature_disable(iommu
, CONTROL_ISOC_EN
);
401 * make IOMMU memory accesses cache coherent
403 iommu_feature_enable(iommu
, CONTROL_COHERENT_EN
);
406 * Done. Now parse the device entries
408 p
+= sizeof(struct ivhd_header
);
412 e
= (struct ivhd_entry
*)p
;
415 for (dev_i
= iommu
->first_device
;
416 dev_i
<= iommu
->last_device
; ++dev_i
)
417 set_dev_entry_from_acpi(dev_i
, e
->flags
, 0);
419 case IVHD_DEV_SELECT
:
421 set_dev_entry_from_acpi(devid
, e
->flags
, 0);
423 case IVHD_DEV_SELECT_RANGE_START
:
424 devid_start
= e
->devid
;
431 devid_to
= e
->ext
>> 8;
432 set_dev_entry_from_acpi(devid
, e
->flags
, 0);
433 amd_iommu_alias_table
[devid
] = devid_to
;
435 case IVHD_DEV_ALIAS_RANGE
:
436 devid_start
= e
->devid
;
438 devid_to
= e
->ext
>> 8;
442 case IVHD_DEV_EXT_SELECT
:
444 set_dev_entry_from_acpi(devid
, e
->flags
, e
->ext
);
446 case IVHD_DEV_EXT_SELECT_RANGE
:
447 devid_start
= e
->devid
;
452 case IVHD_DEV_RANGE_END
:
454 for (dev_i
= devid_start
; dev_i
<= devid
; ++dev_i
) {
456 amd_iommu_alias_table
[dev_i
] = devid_to
;
457 set_dev_entry_from_acpi(
458 amd_iommu_alias_table
[dev_i
],
466 p
+= 0x04 << (e
->type
>> 6);
470 static int __init
init_iommu_devices(struct amd_iommu
*iommu
)
474 for (i
= iommu
->first_device
; i
<= iommu
->last_device
; ++i
)
475 set_iommu_for_device(iommu
, i
);
480 static void __init
free_iommu_one(struct amd_iommu
*iommu
)
482 free_command_buffer(iommu
);
483 iommu_unmap_mmio_space(iommu
);
486 static void __init
free_iommu_all(void)
488 struct amd_iommu
*iommu
, *next
;
490 list_for_each_entry_safe(iommu
, next
, &amd_iommu_list
, list
) {
491 list_del(&iommu
->list
);
492 free_iommu_one(iommu
);
497 static int __init
init_iommu_one(struct amd_iommu
*iommu
, struct ivhd_header
*h
)
499 spin_lock_init(&iommu
->lock
);
500 list_add_tail(&iommu
->list
, &amd_iommu_list
);
503 * Copy data from ACPI table entry to the iommu struct
505 iommu
->devid
= h
->devid
;
506 iommu
->cap_ptr
= h
->cap_ptr
;
507 iommu
->mmio_phys
= h
->mmio_phys
;
508 iommu
->mmio_base
= iommu_map_mmio_space(h
->mmio_phys
);
509 if (!iommu
->mmio_base
)
512 iommu_set_device_table(iommu
);
513 iommu
->cmd_buf
= alloc_command_buffer(iommu
);
517 init_iommu_from_pci(iommu
);
518 init_iommu_from_acpi(iommu
, h
);
519 init_iommu_devices(iommu
);
524 static int __init
init_iommu_all(struct acpi_table_header
*table
)
526 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
527 struct ivhd_header
*h
;
528 struct amd_iommu
*iommu
;
531 INIT_LIST_HEAD(&amd_iommu_list
);
533 end
+= table
->length
;
534 p
+= IVRS_HEADER_LENGTH
;
537 h
= (struct ivhd_header
*)p
;
540 iommu
= kzalloc(sizeof(struct amd_iommu
), GFP_KERNEL
);
543 ret
= init_iommu_one(iommu
, h
);
558 static void __init
free_unity_maps(void)
560 struct unity_map_entry
*entry
, *next
;
562 list_for_each_entry_safe(entry
, next
, &amd_iommu_unity_map
, list
) {
563 list_del(&entry
->list
);
568 static int __init
init_exclusion_range(struct ivmd_header
*m
)
574 set_device_exclusion_range(m
->devid
, m
);
576 case ACPI_IVMD_TYPE_ALL
:
577 for (i
= 0; i
< amd_iommu_last_bdf
; ++i
)
578 set_device_exclusion_range(i
, m
);
580 case ACPI_IVMD_TYPE_RANGE
:
581 for (i
= m
->devid
; i
<= m
->aux
; ++i
)
582 set_device_exclusion_range(i
, m
);
591 static int __init
init_unity_map_range(struct ivmd_header
*m
)
593 struct unity_map_entry
*e
= 0;
595 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
602 e
->devid_start
= e
->devid_end
= m
->devid
;
604 case ACPI_IVMD_TYPE_ALL
:
606 e
->devid_end
= amd_iommu_last_bdf
;
608 case ACPI_IVMD_TYPE_RANGE
:
609 e
->devid_start
= m
->devid
;
610 e
->devid_end
= m
->aux
;
613 e
->address_start
= PAGE_ALIGN(m
->range_start
);
614 e
->address_end
= e
->address_start
+ PAGE_ALIGN(m
->range_length
);
615 e
->prot
= m
->flags
>> 1;
617 list_add_tail(&e
->list
, &amd_iommu_unity_map
);
622 static int __init
init_memory_definitions(struct acpi_table_header
*table
)
624 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
625 struct ivmd_header
*m
;
627 INIT_LIST_HEAD(&amd_iommu_unity_map
);
629 end
+= table
->length
;
630 p
+= IVRS_HEADER_LENGTH
;
633 m
= (struct ivmd_header
*)p
;
634 if (m
->flags
& IVMD_FLAG_EXCL_RANGE
)
635 init_exclusion_range(m
);
636 else if (m
->flags
& IVMD_FLAG_UNITY_MAP
)
637 init_unity_map_range(m
);
645 static void __init
enable_iommus(void)
647 struct amd_iommu
*iommu
;
649 list_for_each_entry(iommu
, &amd_iommu_list
, list
) {
650 iommu_set_exclusion_range(iommu
);
656 * Suspend/Resume support
657 * disable suspend until real resume implemented
660 static int amd_iommu_resume(struct sys_device
*dev
)
665 static int amd_iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
670 static struct sysdev_class amd_iommu_sysdev_class
= {
672 .suspend
= amd_iommu_suspend
,
673 .resume
= amd_iommu_resume
,
676 static struct sys_device device_amd_iommu
= {
678 .cls
= &amd_iommu_sysdev_class
,
681 int __init
amd_iommu_init(void)
687 printk(KERN_INFO
"AMD IOMMU disabled by kernel command line\n");
691 if (!amd_iommu_detected
)
695 * First parse ACPI tables to find the largest Bus/Dev/Func
696 * we need to handle. Upon this information the shared data
697 * structures for the IOMMUs in the system will be allocated
699 if (acpi_table_parse("IVRS", find_last_devid_acpi
) != 0)
702 dev_table_size
= TBL_SIZE(DEV_TABLE_ENTRY_SIZE
);
703 alias_table_size
= TBL_SIZE(ALIAS_TABLE_ENTRY_SIZE
);
704 rlookup_table_size
= TBL_SIZE(RLOOKUP_TABLE_ENTRY_SIZE
);
708 /* Device table - directly used by all IOMMUs */
709 amd_iommu_dev_table
= (void *)__get_free_pages(GFP_KERNEL
,
710 get_order(dev_table_size
));
711 if (amd_iommu_dev_table
== NULL
)
715 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
716 * IOMMU see for that device
718 amd_iommu_alias_table
= (void *)__get_free_pages(GFP_KERNEL
,
719 get_order(alias_table_size
));
720 if (amd_iommu_alias_table
== NULL
)
723 /* IOMMU rlookup table - find the IOMMU for a specific device */
724 amd_iommu_rlookup_table
= (void *)__get_free_pages(GFP_KERNEL
,
725 get_order(rlookup_table_size
));
726 if (amd_iommu_rlookup_table
== NULL
)
730 * Protection Domain table - maps devices to protection domains
731 * This table has the same size as the rlookup_table
733 amd_iommu_pd_table
= (void *)__get_free_pages(GFP_KERNEL
,
734 get_order(rlookup_table_size
));
735 if (amd_iommu_pd_table
== NULL
)
738 amd_iommu_pd_alloc_bitmap
= (void *)__get_free_pages(GFP_KERNEL
,
739 get_order(MAX_DOMAIN_ID
/8));
740 if (amd_iommu_pd_alloc_bitmap
== NULL
)
744 * memory is allocated now; initialize the device table with all zeroes
745 * and let all alias entries point to itself
747 memset(amd_iommu_dev_table
, 0, dev_table_size
);
748 for (i
= 0; i
< amd_iommu_last_bdf
; ++i
)
749 amd_iommu_alias_table
[i
] = i
;
751 memset(amd_iommu_pd_table
, 0, rlookup_table_size
);
752 memset(amd_iommu_pd_alloc_bitmap
, 0, MAX_DOMAIN_ID
/ 8);
755 * never allocate domain 0 because its used as the non-allocated and
756 * error value placeholder
758 amd_iommu_pd_alloc_bitmap
[0] = 1;
761 * now the data structures are allocated and basically initialized
762 * start the real acpi table scan
765 if (acpi_table_parse("IVRS", init_iommu_all
) != 0)
768 if (acpi_table_parse("IVRS", init_memory_definitions
) != 0)
771 ret
= amd_iommu_init_dma_ops();
775 ret
= sysdev_class_register(&amd_iommu_sysdev_class
);
779 ret
= sysdev_register(&device_amd_iommu
);
785 printk(KERN_INFO
"AMD IOMMU: aperture size is %d MB\n",
786 (1 << (amd_iommu_aperture_order
-20)));
788 printk(KERN_INFO
"AMD IOMMU: device isolation ");
789 if (amd_iommu_isolate
)
792 printk("disabled\n");
798 if (amd_iommu_pd_alloc_bitmap
)
799 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap
, 1);
801 if (amd_iommu_pd_table
)
802 free_pages((unsigned long)amd_iommu_pd_table
,
803 get_order(rlookup_table_size
));
805 if (amd_iommu_rlookup_table
)
806 free_pages((unsigned long)amd_iommu_rlookup_table
,
807 get_order(rlookup_table_size
));
809 if (amd_iommu_alias_table
)
810 free_pages((unsigned long)amd_iommu_alias_table
,
811 get_order(alias_table_size
));
813 if (amd_iommu_dev_table
)
814 free_pages((unsigned long)amd_iommu_dev_table
,
815 get_order(dev_table_size
));
824 static int __init
early_amd_iommu_detect(struct acpi_table_header
*table
)
829 void __init
amd_iommu_detect(void)
831 if (swiotlb
|| no_iommu
|| iommu_detected
)
834 if (acpi_table_parse("IVRS", early_amd_iommu_detect
) == 0) {
836 amd_iommu_detected
= 1;
837 #ifdef CONFIG_GART_IOMMU
838 gart_iommu_aperture_disabled
= 1;
839 gart_iommu_aperture
= 0;
844 static int __init
parse_amd_iommu_options(char *str
)
846 for (; *str
; ++str
) {
847 if (strcmp(str
, "isolate") == 0)
848 amd_iommu_isolate
= 1;
854 static int __init
parse_amd_iommu_size_options(char *str
)
856 for (; *str
; ++str
) {
857 if (strcmp(str
, "32M") == 0)
858 amd_iommu_aperture_order
= 25;
859 if (strcmp(str
, "64M") == 0)
860 amd_iommu_aperture_order
= 26;
861 if (strcmp(str
, "128M") == 0)
862 amd_iommu_aperture_order
= 27;
863 if (strcmp(str
, "256M") == 0)
864 amd_iommu_aperture_order
= 28;
865 if (strcmp(str
, "512M") == 0)
866 amd_iommu_aperture_order
= 29;
867 if (strcmp(str
, "1G") == 0)
868 amd_iommu_aperture_order
= 30;
874 __setup("amd_iommu=", parse_amd_iommu_options
);
875 __setup("amd_iommu_size=", parse_amd_iommu_size_options
);