2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <linux/dmi.h>
41 #include <asm/cacheflush.h>
42 #include <asm/iommu.h>
45 #define ROOT_SIZE VTD_PAGE_SIZE
46 #define CONTEXT_SIZE VTD_PAGE_SIZE
48 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
51 #define IOAPIC_RANGE_START (0xfee00000)
52 #define IOAPIC_RANGE_END (0xfeefffff)
53 #define IOVA_START_ADDR (0x1000)
55 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
57 #define MAX_AGAW_WIDTH 64
59 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
62 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
63 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
64 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
65 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
66 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
68 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
69 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
70 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
74 are never going to work. */
75 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
77 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
80 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
82 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
84 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
86 return mm_to_dma_pfn(page_to_pfn(pg
));
88 static inline unsigned long virt_to_dma_pfn(void *p
)
90 return page_to_dma_pfn(virt_to_page(p
));
93 /* global iommu list, set NULL for ignored DMAR units */
94 static struct intel_iommu
**g_iommus
;
96 static int rwbf_quirk
;
101 * 12-63: Context Ptr (12 - (haw-1))
108 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
109 static inline bool root_present(struct root_entry
*root
)
111 return (root
->val
& 1);
113 static inline void set_root_present(struct root_entry
*root
)
117 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
119 root
->val
|= value
& VTD_PAGE_MASK
;
122 static inline struct context_entry
*
123 get_context_addr_from_root(struct root_entry
*root
)
125 return (struct context_entry
*)
126 (root_present(root
)?phys_to_virt(
127 root
->val
& VTD_PAGE_MASK
) :
134 * 1: fault processing disable
135 * 2-3: translation type
136 * 12-63: address space root
142 struct context_entry
{
147 static inline bool context_present(struct context_entry
*context
)
149 return (context
->lo
& 1);
151 static inline void context_set_present(struct context_entry
*context
)
156 static inline void context_set_fault_enable(struct context_entry
*context
)
158 context
->lo
&= (((u64
)-1) << 2) | 1;
161 static inline void context_set_translation_type(struct context_entry
*context
,
164 context
->lo
&= (((u64
)-1) << 4) | 3;
165 context
->lo
|= (value
& 3) << 2;
168 static inline void context_set_address_root(struct context_entry
*context
,
171 context
->lo
|= value
& VTD_PAGE_MASK
;
174 static inline void context_set_address_width(struct context_entry
*context
,
177 context
->hi
|= value
& 7;
180 static inline void context_set_domain_id(struct context_entry
*context
,
183 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
186 static inline void context_clear_entry(struct context_entry
*context
)
199 * 12-63: Host physcial address
205 static inline void dma_clear_pte(struct dma_pte
*pte
)
210 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
212 pte
->val
|= DMA_PTE_READ
;
215 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
217 pte
->val
|= DMA_PTE_WRITE
;
220 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
222 pte
->val
|= DMA_PTE_SNP
;
225 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
227 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
230 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
233 return pte
->val
& VTD_PAGE_MASK
;
235 /* Must have a full atomic 64-bit read */
236 return __cmpxchg64(pte
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
240 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
242 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
245 static inline bool dma_pte_present(struct dma_pte
*pte
)
247 return (pte
->val
& 3) != 0;
250 static inline int first_pte_in_page(struct dma_pte
*pte
)
252 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
256 * This domain is a statically identity mapping domain.
257 * 1. This domain creats a static 1:1 mapping to all usable memory.
258 * 2. It maps to each iommu if successful.
259 * 3. Each iommu mapps to this domain if successful.
261 static struct dmar_domain
*si_domain
;
262 static int hw_pass_through
= 1;
264 /* devices under the same p2p bridge are owned in one domain */
265 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
267 /* domain represents a virtual machine, more than one devices
268 * across iommus may be owned in one domain, e.g. kvm guest.
270 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
272 /* si_domain contains mulitple devices */
273 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
276 int id
; /* domain id */
277 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
279 struct list_head devices
; /* all devices' list */
280 struct iova_domain iovad
; /* iova's that belong to this domain */
282 struct dma_pte
*pgd
; /* virtual address */
283 int gaw
; /* max guest address width */
285 /* adjusted guest address width, 0 is level 2 30-bit */
288 int flags
; /* flags to find out type of domain */
290 int iommu_coherency
;/* indicate coherency of iommu access */
291 int iommu_snooping
; /* indicate snooping control feature*/
292 int iommu_count
; /* reference count of iommu */
293 spinlock_t iommu_lock
; /* protect iommu set in domain */
294 u64 max_addr
; /* maximum mapped address */
297 /* PCI domain-device relationship */
298 struct device_domain_info
{
299 struct list_head link
; /* link to domain siblings */
300 struct list_head global
; /* link to global list */
301 int segment
; /* PCI domain */
302 u8 bus
; /* PCI bus number */
303 u8 devfn
; /* PCI devfn number */
304 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
305 struct intel_iommu
*iommu
; /* IOMMU used by this device */
306 struct dmar_domain
*domain
; /* pointer to domain */
309 static void flush_unmaps_timeout(unsigned long data
);
311 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
313 #define HIGH_WATER_MARK 250
314 struct deferred_flush_tables
{
316 struct iova
*iova
[HIGH_WATER_MARK
];
317 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
320 static struct deferred_flush_tables
*deferred_flush
;
322 /* bitmap for indexing intel_iommus */
323 static int g_num_of_iommus
;
325 static DEFINE_SPINLOCK(async_umap_flush_lock
);
326 static LIST_HEAD(unmaps_to_do
);
329 static long list_size
;
331 static void domain_remove_dev_info(struct dmar_domain
*domain
);
333 #ifdef CONFIG_DMAR_DEFAULT_ON
334 int dmar_disabled
= 0;
336 int dmar_disabled
= 1;
337 #endif /*CONFIG_DMAR_DEFAULT_ON*/
339 static int __initdata dmar_map_gfx
= 1;
340 static int dmar_forcedac
;
341 static int intel_iommu_strict
;
343 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
344 static DEFINE_SPINLOCK(device_domain_lock
);
345 static LIST_HEAD(device_domain_list
);
347 static struct iommu_ops intel_iommu_ops
;
349 static int __init
intel_iommu_setup(char *str
)
354 if (!strncmp(str
, "on", 2)) {
356 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
357 } else if (!strncmp(str
, "off", 3)) {
359 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
360 } else if (!strncmp(str
, "igfx_off", 8)) {
363 "Intel-IOMMU: disable GFX device mapping\n");
364 } else if (!strncmp(str
, "forcedac", 8)) {
366 "Intel-IOMMU: Forcing DAC for PCI devices\n");
368 } else if (!strncmp(str
, "strict", 6)) {
370 "Intel-IOMMU: disable batched IOTLB flush\n");
371 intel_iommu_strict
= 1;
374 str
+= strcspn(str
, ",");
380 __setup("intel_iommu=", intel_iommu_setup
);
382 static struct kmem_cache
*iommu_domain_cache
;
383 static struct kmem_cache
*iommu_devinfo_cache
;
384 static struct kmem_cache
*iommu_iova_cache
;
386 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
391 /* trying to avoid low memory issues */
392 flags
= current
->flags
& PF_MEMALLOC
;
393 current
->flags
|= PF_MEMALLOC
;
394 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
395 current
->flags
&= (~PF_MEMALLOC
| flags
);
400 static inline void *alloc_pgtable_page(void)
405 /* trying to avoid low memory issues */
406 flags
= current
->flags
& PF_MEMALLOC
;
407 current
->flags
|= PF_MEMALLOC
;
408 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
409 current
->flags
&= (~PF_MEMALLOC
| flags
);
413 static inline void free_pgtable_page(void *vaddr
)
415 free_page((unsigned long)vaddr
);
418 static inline void *alloc_domain_mem(void)
420 return iommu_kmem_cache_alloc(iommu_domain_cache
);
423 static void free_domain_mem(void *vaddr
)
425 kmem_cache_free(iommu_domain_cache
, vaddr
);
428 static inline void * alloc_devinfo_mem(void)
430 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
433 static inline void free_devinfo_mem(void *vaddr
)
435 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
438 struct iova
*alloc_iova_mem(void)
440 return iommu_kmem_cache_alloc(iommu_iova_cache
);
443 void free_iova_mem(struct iova
*iova
)
445 kmem_cache_free(iommu_iova_cache
, iova
);
449 static inline int width_to_agaw(int width
);
451 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
456 sagaw
= cap_sagaw(iommu
->cap
);
457 for (agaw
= width_to_agaw(max_gaw
);
459 if (test_bit(agaw
, &sagaw
))
467 * Calculate max SAGAW for each iommu.
469 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
471 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
475 * calculate agaw for each iommu.
476 * "SAGAW" may be different across iommus, use a default agaw, and
477 * get a supported less agaw for iommus that don't support the default agaw.
479 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
481 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
484 /* This functionin only returns single iommu in a domain */
485 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
489 /* si_domain and vm domain should not get here. */
490 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
491 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
493 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
494 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
497 return g_iommus
[iommu_id
];
500 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
504 domain
->iommu_coherency
= 1;
506 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
507 for (; i
< g_num_of_iommus
; ) {
508 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
509 domain
->iommu_coherency
= 0;
512 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
516 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
520 domain
->iommu_snooping
= 1;
522 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
523 for (; i
< g_num_of_iommus
; ) {
524 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
525 domain
->iommu_snooping
= 0;
528 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
532 /* Some capabilities may be different across iommus */
533 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
535 domain_update_iommu_coherency(domain
);
536 domain_update_iommu_snooping(domain
);
539 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
541 struct dmar_drhd_unit
*drhd
= NULL
;
544 for_each_drhd_unit(drhd
) {
547 if (segment
!= drhd
->segment
)
550 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
551 if (drhd
->devices
[i
] &&
552 drhd
->devices
[i
]->bus
->number
== bus
&&
553 drhd
->devices
[i
]->devfn
== devfn
)
555 if (drhd
->devices
[i
] &&
556 drhd
->devices
[i
]->subordinate
&&
557 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
558 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
562 if (drhd
->include_all
)
569 static void domain_flush_cache(struct dmar_domain
*domain
,
570 void *addr
, int size
)
572 if (!domain
->iommu_coherency
)
573 clflush_cache_range(addr
, size
);
576 /* Gets context entry for a given bus and devfn */
577 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
580 struct root_entry
*root
;
581 struct context_entry
*context
;
582 unsigned long phy_addr
;
585 spin_lock_irqsave(&iommu
->lock
, flags
);
586 root
= &iommu
->root_entry
[bus
];
587 context
= get_context_addr_from_root(root
);
589 context
= (struct context_entry
*)alloc_pgtable_page();
591 spin_unlock_irqrestore(&iommu
->lock
, flags
);
594 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
595 phy_addr
= virt_to_phys((void *)context
);
596 set_root_value(root
, phy_addr
);
597 set_root_present(root
);
598 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
600 spin_unlock_irqrestore(&iommu
->lock
, flags
);
601 return &context
[devfn
];
604 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
606 struct root_entry
*root
;
607 struct context_entry
*context
;
611 spin_lock_irqsave(&iommu
->lock
, flags
);
612 root
= &iommu
->root_entry
[bus
];
613 context
= get_context_addr_from_root(root
);
618 ret
= context_present(&context
[devfn
]);
620 spin_unlock_irqrestore(&iommu
->lock
, flags
);
624 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
626 struct root_entry
*root
;
627 struct context_entry
*context
;
630 spin_lock_irqsave(&iommu
->lock
, flags
);
631 root
= &iommu
->root_entry
[bus
];
632 context
= get_context_addr_from_root(root
);
634 context_clear_entry(&context
[devfn
]);
635 __iommu_flush_cache(iommu
, &context
[devfn
], \
638 spin_unlock_irqrestore(&iommu
->lock
, flags
);
641 static void free_context_table(struct intel_iommu
*iommu
)
643 struct root_entry
*root
;
646 struct context_entry
*context
;
648 spin_lock_irqsave(&iommu
->lock
, flags
);
649 if (!iommu
->root_entry
) {
652 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
653 root
= &iommu
->root_entry
[i
];
654 context
= get_context_addr_from_root(root
);
656 free_pgtable_page(context
);
658 free_pgtable_page(iommu
->root_entry
);
659 iommu
->root_entry
= NULL
;
661 spin_unlock_irqrestore(&iommu
->lock
, flags
);
664 /* page table handling */
665 #define LEVEL_STRIDE (9)
666 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
668 static inline int agaw_to_level(int agaw
)
673 static inline int agaw_to_width(int agaw
)
675 return 30 + agaw
* LEVEL_STRIDE
;
679 static inline int width_to_agaw(int width
)
681 return (width
- 30) / LEVEL_STRIDE
;
684 static inline unsigned int level_to_offset_bits(int level
)
686 return (level
- 1) * LEVEL_STRIDE
;
689 static inline int pfn_level_offset(unsigned long pfn
, int level
)
691 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
694 static inline unsigned long level_mask(int level
)
696 return -1UL << level_to_offset_bits(level
);
699 static inline unsigned long level_size(int level
)
701 return 1UL << level_to_offset_bits(level
);
704 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
706 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
709 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
712 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
713 struct dma_pte
*parent
, *pte
= NULL
;
714 int level
= agaw_to_level(domain
->agaw
);
717 BUG_ON(!domain
->pgd
);
718 BUG_ON(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
719 parent
= domain
->pgd
;
724 offset
= pfn_level_offset(pfn
, level
);
725 pte
= &parent
[offset
];
729 if (!dma_pte_present(pte
)) {
732 tmp_page
= alloc_pgtable_page();
737 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
738 pteval
= (virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
739 if (cmpxchg64(&pte
->val
, 0ULL, pteval
)) {
740 /* Someone else set it while we were thinking; use theirs. */
741 free_pgtable_page(tmp_page
);
744 domain_flush_cache(domain
, pte
, sizeof(*pte
));
747 parent
= phys_to_virt(dma_pte_addr(pte
));
754 /* return address's pte at specific level */
755 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
759 struct dma_pte
*parent
, *pte
= NULL
;
760 int total
= agaw_to_level(domain
->agaw
);
763 parent
= domain
->pgd
;
764 while (level
<= total
) {
765 offset
= pfn_level_offset(pfn
, total
);
766 pte
= &parent
[offset
];
770 if (!dma_pte_present(pte
))
772 parent
= phys_to_virt(dma_pte_addr(pte
));
778 /* clear last level pte, a tlb flush should be followed */
779 static void dma_pte_clear_range(struct dmar_domain
*domain
,
780 unsigned long start_pfn
,
781 unsigned long last_pfn
)
783 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
784 struct dma_pte
*first_pte
, *pte
;
786 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
787 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
788 BUG_ON(start_pfn
> last_pfn
);
790 /* we don't need lock here; nobody else touches the iova range */
792 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1);
794 start_pfn
= align_to_level(start_pfn
+ 1, 2);
801 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
803 domain_flush_cache(domain
, first_pte
,
804 (void *)pte
- (void *)first_pte
);
806 } while (start_pfn
&& start_pfn
<= last_pfn
);
809 /* free page table pages. last level pte should already be cleared */
810 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
811 unsigned long start_pfn
,
812 unsigned long last_pfn
)
814 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
815 struct dma_pte
*first_pte
, *pte
;
816 int total
= agaw_to_level(domain
->agaw
);
820 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
821 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
822 BUG_ON(start_pfn
> last_pfn
);
824 /* We don't need lock here; nobody else touches the iova range */
826 while (level
<= total
) {
827 tmp
= align_to_level(start_pfn
, level
);
829 /* If we can't even clear one PTE at this level, we're done */
830 if (tmp
+ level_size(level
) - 1 > last_pfn
)
834 first_pte
= pte
= dma_pfn_level_pte(domain
, tmp
, level
);
836 tmp
= align_to_level(tmp
+ 1, level
+ 1);
840 if (dma_pte_present(pte
)) {
841 free_pgtable_page(phys_to_virt(dma_pte_addr(pte
)));
845 tmp
+= level_size(level
);
846 } while (!first_pte_in_page(pte
) &&
847 tmp
+ level_size(level
) - 1 <= last_pfn
);
849 domain_flush_cache(domain
, first_pte
,
850 (void *)pte
- (void *)first_pte
);
852 } while (tmp
&& tmp
+ level_size(level
) - 1 <= last_pfn
);
856 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
857 free_pgtable_page(domain
->pgd
);
863 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
865 struct root_entry
*root
;
868 root
= (struct root_entry
*)alloc_pgtable_page();
872 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
874 spin_lock_irqsave(&iommu
->lock
, flags
);
875 iommu
->root_entry
= root
;
876 spin_unlock_irqrestore(&iommu
->lock
, flags
);
881 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
887 addr
= iommu
->root_entry
;
889 spin_lock_irqsave(&iommu
->register_lock
, flag
);
890 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
892 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
894 /* Make sure hardware complete it */
895 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
896 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
898 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
901 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
906 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
909 spin_lock_irqsave(&iommu
->register_lock
, flag
);
910 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
912 /* Make sure hardware complete it */
913 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
914 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
916 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
919 /* return value determine if we need a write buffer flush */
920 static void __iommu_flush_context(struct intel_iommu
*iommu
,
921 u16 did
, u16 source_id
, u8 function_mask
,
928 case DMA_CCMD_GLOBAL_INVL
:
929 val
= DMA_CCMD_GLOBAL_INVL
;
931 case DMA_CCMD_DOMAIN_INVL
:
932 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
934 case DMA_CCMD_DEVICE_INVL
:
935 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
936 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
943 spin_lock_irqsave(&iommu
->register_lock
, flag
);
944 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
946 /* Make sure hardware complete it */
947 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
948 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
950 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
953 /* return value determine if we need a write buffer flush */
954 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
955 u64 addr
, unsigned int size_order
, u64 type
)
957 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
958 u64 val
= 0, val_iva
= 0;
962 case DMA_TLB_GLOBAL_FLUSH
:
963 /* global flush doesn't need set IVA_REG */
964 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
966 case DMA_TLB_DSI_FLUSH
:
967 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
969 case DMA_TLB_PSI_FLUSH
:
970 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
971 /* Note: always flush non-leaf currently */
972 val_iva
= size_order
| addr
;
977 /* Note: set drain read/write */
980 * This is probably to be super secure.. Looks like we can
981 * ignore it without any impact.
983 if (cap_read_drain(iommu
->cap
))
984 val
|= DMA_TLB_READ_DRAIN
;
986 if (cap_write_drain(iommu
->cap
))
987 val
|= DMA_TLB_WRITE_DRAIN
;
989 spin_lock_irqsave(&iommu
->register_lock
, flag
);
990 /* Note: Only uses first TLB reg currently */
992 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
993 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
995 /* Make sure hardware complete it */
996 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
997 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
999 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1001 /* check IOTLB invalidation granularity */
1002 if (DMA_TLB_IAIG(val
) == 0)
1003 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1004 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1005 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1006 (unsigned long long)DMA_TLB_IIRG(type
),
1007 (unsigned long long)DMA_TLB_IAIG(val
));
1010 static struct device_domain_info
*iommu_support_dev_iotlb(
1011 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
1014 unsigned long flags
;
1015 struct device_domain_info
*info
;
1016 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
1018 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1024 spin_lock_irqsave(&device_domain_lock
, flags
);
1025 list_for_each_entry(info
, &domain
->devices
, link
)
1026 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1030 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1032 if (!found
|| !info
->dev
)
1035 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1038 if (!dmar_find_matched_atsr_unit(info
->dev
))
1041 info
->iommu
= iommu
;
1046 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1051 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1054 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1056 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1059 pci_disable_ats(info
->dev
);
1062 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1063 u64 addr
, unsigned mask
)
1066 unsigned long flags
;
1067 struct device_domain_info
*info
;
1069 spin_lock_irqsave(&device_domain_lock
, flags
);
1070 list_for_each_entry(info
, &domain
->devices
, link
) {
1071 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1074 sid
= info
->bus
<< 8 | info
->devfn
;
1075 qdep
= pci_ats_queue_depth(info
->dev
);
1076 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1078 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1081 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1082 unsigned long pfn
, unsigned int pages
)
1084 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1085 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1090 * Fallback to domain selective flush if no PSI support or the size is
1092 * PSI requires page size to be 2 ^ x, and the base address is naturally
1093 * aligned to the size
1095 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1096 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1099 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1103 * In caching mode, domain ID 0 is reserved for non-present to present
1104 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1106 if (!cap_caching_mode(iommu
->cap
) || did
)
1107 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1110 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1113 unsigned long flags
;
1115 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1116 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1117 pmen
&= ~DMA_PMEN_EPM
;
1118 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1120 /* wait for the protected region status bit to clear */
1121 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1122 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1124 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1127 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1130 unsigned long flags
;
1132 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1133 iommu
->gcmd
|= DMA_GCMD_TE
;
1134 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1136 /* Make sure hardware complete it */
1137 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1138 readl
, (sts
& DMA_GSTS_TES
), sts
);
1140 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1144 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1149 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1150 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1151 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1153 /* Make sure hardware complete it */
1154 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1155 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1157 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1162 static int iommu_init_domains(struct intel_iommu
*iommu
)
1164 unsigned long ndomains
;
1165 unsigned long nlongs
;
1167 ndomains
= cap_ndoms(iommu
->cap
);
1168 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1169 nlongs
= BITS_TO_LONGS(ndomains
);
1171 spin_lock_init(&iommu
->lock
);
1173 /* TBD: there might be 64K domains,
1174 * consider other allocation for future chip
1176 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1177 if (!iommu
->domain_ids
) {
1178 printk(KERN_ERR
"Allocating domain id array failed\n");
1181 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1183 if (!iommu
->domains
) {
1184 printk(KERN_ERR
"Allocating domain array failed\n");
1189 * if Caching mode is set, then invalid translations are tagged
1190 * with domainid 0. Hence we need to pre-allocate it.
1192 if (cap_caching_mode(iommu
->cap
))
1193 set_bit(0, iommu
->domain_ids
);
1198 static void domain_exit(struct dmar_domain
*domain
);
1199 static void vm_domain_exit(struct dmar_domain
*domain
);
1201 void free_dmar_iommu(struct intel_iommu
*iommu
)
1203 struct dmar_domain
*domain
;
1205 unsigned long flags
;
1207 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1208 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1209 for (; i
< cap_ndoms(iommu
->cap
); ) {
1210 domain
= iommu
->domains
[i
];
1211 clear_bit(i
, iommu
->domain_ids
);
1213 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1214 if (--domain
->iommu_count
== 0) {
1215 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1216 vm_domain_exit(domain
);
1218 domain_exit(domain
);
1220 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1222 i
= find_next_bit(iommu
->domain_ids
,
1223 cap_ndoms(iommu
->cap
), i
+1);
1227 if (iommu
->gcmd
& DMA_GCMD_TE
)
1228 iommu_disable_translation(iommu
);
1231 set_irq_data(iommu
->irq
, NULL
);
1232 /* This will mask the irq */
1233 free_irq(iommu
->irq
, iommu
);
1234 destroy_irq(iommu
->irq
);
1237 kfree(iommu
->domains
);
1238 kfree(iommu
->domain_ids
);
1240 g_iommus
[iommu
->seq_id
] = NULL
;
1242 /* if all iommus are freed, free g_iommus */
1243 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1248 if (i
== g_num_of_iommus
)
1251 /* free context mapping */
1252 free_context_table(iommu
);
1255 static struct dmar_domain
*alloc_domain(void)
1257 struct dmar_domain
*domain
;
1259 domain
= alloc_domain_mem();
1263 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1269 static int iommu_attach_domain(struct dmar_domain
*domain
,
1270 struct intel_iommu
*iommu
)
1273 unsigned long ndomains
;
1274 unsigned long flags
;
1276 ndomains
= cap_ndoms(iommu
->cap
);
1278 spin_lock_irqsave(&iommu
->lock
, flags
);
1280 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1281 if (num
>= ndomains
) {
1282 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1283 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1288 set_bit(num
, iommu
->domain_ids
);
1289 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1290 iommu
->domains
[num
] = domain
;
1291 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1296 static void iommu_detach_domain(struct dmar_domain
*domain
,
1297 struct intel_iommu
*iommu
)
1299 unsigned long flags
;
1303 spin_lock_irqsave(&iommu
->lock
, flags
);
1304 ndomains
= cap_ndoms(iommu
->cap
);
1305 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1306 for (; num
< ndomains
; ) {
1307 if (iommu
->domains
[num
] == domain
) {
1311 num
= find_next_bit(iommu
->domain_ids
,
1312 cap_ndoms(iommu
->cap
), num
+1);
1316 clear_bit(num
, iommu
->domain_ids
);
1317 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1318 iommu
->domains
[num
] = NULL
;
1320 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1323 static struct iova_domain reserved_iova_list
;
1324 static struct lock_class_key reserved_rbtree_key
;
1326 static void dmar_init_reserved_ranges(void)
1328 struct pci_dev
*pdev
= NULL
;
1332 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1334 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1335 &reserved_rbtree_key
);
1337 /* IOAPIC ranges shouldn't be accessed by DMA */
1338 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1339 IOVA_PFN(IOAPIC_RANGE_END
));
1341 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1343 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1344 for_each_pci_dev(pdev
) {
1347 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1348 r
= &pdev
->resource
[i
];
1349 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1351 iova
= reserve_iova(&reserved_iova_list
,
1355 printk(KERN_ERR
"Reserve iova failed\n");
1361 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1363 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1366 static inline int guestwidth_to_adjustwidth(int gaw
)
1369 int r
= (gaw
- 12) % 9;
1380 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1382 struct intel_iommu
*iommu
;
1383 int adjust_width
, agaw
;
1384 unsigned long sagaw
;
1386 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1387 spin_lock_init(&domain
->iommu_lock
);
1389 domain_reserve_special_ranges(domain
);
1391 /* calculate AGAW */
1392 iommu
= domain_get_iommu(domain
);
1393 if (guest_width
> cap_mgaw(iommu
->cap
))
1394 guest_width
= cap_mgaw(iommu
->cap
);
1395 domain
->gaw
= guest_width
;
1396 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1397 agaw
= width_to_agaw(adjust_width
);
1398 sagaw
= cap_sagaw(iommu
->cap
);
1399 if (!test_bit(agaw
, &sagaw
)) {
1400 /* hardware doesn't support it, choose a bigger one */
1401 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1402 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1406 domain
->agaw
= agaw
;
1407 INIT_LIST_HEAD(&domain
->devices
);
1409 if (ecap_coherent(iommu
->ecap
))
1410 domain
->iommu_coherency
= 1;
1412 domain
->iommu_coherency
= 0;
1414 if (ecap_sc_support(iommu
->ecap
))
1415 domain
->iommu_snooping
= 1;
1417 domain
->iommu_snooping
= 0;
1419 domain
->iommu_count
= 1;
1421 /* always allocate the top pgd */
1422 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1425 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1429 static void domain_exit(struct dmar_domain
*domain
)
1431 struct dmar_drhd_unit
*drhd
;
1432 struct intel_iommu
*iommu
;
1434 /* Domain 0 is reserved, so dont process it */
1438 domain_remove_dev_info(domain
);
1440 put_iova_domain(&domain
->iovad
);
1443 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1445 /* free page tables */
1446 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1448 for_each_active_iommu(iommu
, drhd
)
1449 if (test_bit(iommu
->seq_id
, &domain
->iommu_bmp
))
1450 iommu_detach_domain(domain
, iommu
);
1452 free_domain_mem(domain
);
1455 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1456 u8 bus
, u8 devfn
, int translation
)
1458 struct context_entry
*context
;
1459 unsigned long flags
;
1460 struct intel_iommu
*iommu
;
1461 struct dma_pte
*pgd
;
1463 unsigned long ndomains
;
1466 struct device_domain_info
*info
= NULL
;
1468 pr_debug("Set context mapping for %02x:%02x.%d\n",
1469 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1471 BUG_ON(!domain
->pgd
);
1472 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1473 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1475 iommu
= device_to_iommu(segment
, bus
, devfn
);
1479 context
= device_to_context_entry(iommu
, bus
, devfn
);
1482 spin_lock_irqsave(&iommu
->lock
, flags
);
1483 if (context_present(context
)) {
1484 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1491 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1492 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1495 /* find an available domain id for this device in iommu */
1496 ndomains
= cap_ndoms(iommu
->cap
);
1497 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1498 for (; num
< ndomains
; ) {
1499 if (iommu
->domains
[num
] == domain
) {
1504 num
= find_next_bit(iommu
->domain_ids
,
1505 cap_ndoms(iommu
->cap
), num
+1);
1509 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1510 if (num
>= ndomains
) {
1511 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1512 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1516 set_bit(num
, iommu
->domain_ids
);
1517 iommu
->domains
[num
] = domain
;
1521 /* Skip top levels of page tables for
1522 * iommu which has less agaw than default.
1524 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1525 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1526 if (!dma_pte_present(pgd
)) {
1527 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1533 context_set_domain_id(context
, id
);
1535 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1536 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1537 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1538 CONTEXT_TT_MULTI_LEVEL
;
1541 * In pass through mode, AW must be programmed to indicate the largest
1542 * AGAW value supported by hardware. And ASR is ignored by hardware.
1544 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1545 context_set_address_width(context
, iommu
->msagaw
);
1547 context_set_address_root(context
, virt_to_phys(pgd
));
1548 context_set_address_width(context
, iommu
->agaw
);
1551 context_set_translation_type(context
, translation
);
1552 context_set_fault_enable(context
);
1553 context_set_present(context
);
1554 domain_flush_cache(domain
, context
, sizeof(*context
));
1557 * It's a non-present to present mapping. If hardware doesn't cache
1558 * non-present entry we only need to flush the write-buffer. If the
1559 * _does_ cache non-present entries, then it does so in the special
1560 * domain #0, which we have to flush:
1562 if (cap_caching_mode(iommu
->cap
)) {
1563 iommu
->flush
.flush_context(iommu
, 0,
1564 (((u16
)bus
) << 8) | devfn
,
1565 DMA_CCMD_MASK_NOBIT
,
1566 DMA_CCMD_DEVICE_INVL
);
1567 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
);
1569 iommu_flush_write_buffer(iommu
);
1571 iommu_enable_dev_iotlb(info
);
1572 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1574 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1575 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1576 domain
->iommu_count
++;
1577 domain_update_iommu_cap(domain
);
1579 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1584 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1588 struct pci_dev
*tmp
, *parent
;
1590 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1591 pdev
->bus
->number
, pdev
->devfn
,
1596 /* dependent device mapping */
1597 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1600 /* Secondary interface's bus number and devfn 0 */
1601 parent
= pdev
->bus
->self
;
1602 while (parent
!= tmp
) {
1603 ret
= domain_context_mapping_one(domain
,
1604 pci_domain_nr(parent
->bus
),
1605 parent
->bus
->number
,
1606 parent
->devfn
, translation
);
1609 parent
= parent
->bus
->self
;
1611 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1612 return domain_context_mapping_one(domain
,
1613 pci_domain_nr(tmp
->subordinate
),
1614 tmp
->subordinate
->number
, 0,
1616 else /* this is a legacy PCI bridge */
1617 return domain_context_mapping_one(domain
,
1618 pci_domain_nr(tmp
->bus
),
1624 static int domain_context_mapped(struct pci_dev
*pdev
)
1627 struct pci_dev
*tmp
, *parent
;
1628 struct intel_iommu
*iommu
;
1630 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1635 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1638 /* dependent device mapping */
1639 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1642 /* Secondary interface's bus number and devfn 0 */
1643 parent
= pdev
->bus
->self
;
1644 while (parent
!= tmp
) {
1645 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1649 parent
= parent
->bus
->self
;
1652 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1655 return device_context_mapped(iommu
, tmp
->bus
->number
,
1659 /* Returns a number of VTD pages, but aligned to MM page size */
1660 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1663 host_addr
&= ~PAGE_MASK
;
1664 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1667 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1668 struct scatterlist
*sg
, unsigned long phys_pfn
,
1669 unsigned long nr_pages
, int prot
)
1671 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1672 phys_addr_t
uninitialized_var(pteval
);
1673 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1674 unsigned long sg_res
;
1676 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1678 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1681 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1686 sg_res
= nr_pages
+ 1;
1687 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1690 while (nr_pages
--) {
1694 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
1695 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1696 sg
->dma_length
= sg
->length
;
1697 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1700 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
);
1704 /* We don't need lock here, nobody else
1705 * touches the iova range
1707 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
1709 static int dumps
= 5;
1710 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1711 iov_pfn
, tmp
, (unsigned long long)pteval
);
1714 debug_dma_dump_mappings(NULL
);
1719 if (!nr_pages
|| first_pte_in_page(pte
)) {
1720 domain_flush_cache(domain
, first_pte
,
1721 (void *)pte
- (void *)first_pte
);
1725 pteval
+= VTD_PAGE_SIZE
;
1733 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1734 struct scatterlist
*sg
, unsigned long nr_pages
,
1737 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
1740 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1741 unsigned long phys_pfn
, unsigned long nr_pages
,
1744 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
1747 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1752 clear_context_table(iommu
, bus
, devfn
);
1753 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1754 DMA_CCMD_GLOBAL_INVL
);
1755 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1758 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1760 struct device_domain_info
*info
;
1761 unsigned long flags
;
1762 struct intel_iommu
*iommu
;
1764 spin_lock_irqsave(&device_domain_lock
, flags
);
1765 while (!list_empty(&domain
->devices
)) {
1766 info
= list_entry(domain
->devices
.next
,
1767 struct device_domain_info
, link
);
1768 list_del(&info
->link
);
1769 list_del(&info
->global
);
1771 info
->dev
->dev
.archdata
.iommu
= NULL
;
1772 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1774 iommu_disable_dev_iotlb(info
);
1775 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1776 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1777 free_devinfo_mem(info
);
1779 spin_lock_irqsave(&device_domain_lock
, flags
);
1781 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1786 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1788 static struct dmar_domain
*
1789 find_domain(struct pci_dev
*pdev
)
1791 struct device_domain_info
*info
;
1793 /* No lock here, assumes no domain exit in normal case */
1794 info
= pdev
->dev
.archdata
.iommu
;
1796 return info
->domain
;
1800 /* domain is initialized */
1801 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1803 struct dmar_domain
*domain
, *found
= NULL
;
1804 struct intel_iommu
*iommu
;
1805 struct dmar_drhd_unit
*drhd
;
1806 struct device_domain_info
*info
, *tmp
;
1807 struct pci_dev
*dev_tmp
;
1808 unsigned long flags
;
1809 int bus
= 0, devfn
= 0;
1813 domain
= find_domain(pdev
);
1817 segment
= pci_domain_nr(pdev
->bus
);
1819 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1821 if (dev_tmp
->is_pcie
) {
1822 bus
= dev_tmp
->subordinate
->number
;
1825 bus
= dev_tmp
->bus
->number
;
1826 devfn
= dev_tmp
->devfn
;
1828 spin_lock_irqsave(&device_domain_lock
, flags
);
1829 list_for_each_entry(info
, &device_domain_list
, global
) {
1830 if (info
->segment
== segment
&&
1831 info
->bus
== bus
&& info
->devfn
== devfn
) {
1832 found
= info
->domain
;
1836 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1837 /* pcie-pci bridge already has a domain, uses it */
1844 domain
= alloc_domain();
1848 /* Allocate new domain for the device */
1849 drhd
= dmar_find_matched_drhd_unit(pdev
);
1851 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1855 iommu
= drhd
->iommu
;
1857 ret
= iommu_attach_domain(domain
, iommu
);
1859 domain_exit(domain
);
1863 if (domain_init(domain
, gaw
)) {
1864 domain_exit(domain
);
1868 /* register pcie-to-pci device */
1870 info
= alloc_devinfo_mem();
1872 domain_exit(domain
);
1875 info
->segment
= segment
;
1877 info
->devfn
= devfn
;
1879 info
->domain
= domain
;
1880 /* This domain is shared by devices under p2p bridge */
1881 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1883 /* pcie-to-pci bridge already has a domain, uses it */
1885 spin_lock_irqsave(&device_domain_lock
, flags
);
1886 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1887 if (tmp
->segment
== segment
&&
1888 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1889 found
= tmp
->domain
;
1894 free_devinfo_mem(info
);
1895 domain_exit(domain
);
1898 list_add(&info
->link
, &domain
->devices
);
1899 list_add(&info
->global
, &device_domain_list
);
1901 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1905 info
= alloc_devinfo_mem();
1908 info
->segment
= segment
;
1909 info
->bus
= pdev
->bus
->number
;
1910 info
->devfn
= pdev
->devfn
;
1912 info
->domain
= domain
;
1913 spin_lock_irqsave(&device_domain_lock
, flags
);
1914 /* somebody is fast */
1915 found
= find_domain(pdev
);
1916 if (found
!= NULL
) {
1917 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1918 if (found
!= domain
) {
1919 domain_exit(domain
);
1922 free_devinfo_mem(info
);
1925 list_add(&info
->link
, &domain
->devices
);
1926 list_add(&info
->global
, &device_domain_list
);
1927 pdev
->dev
.archdata
.iommu
= info
;
1928 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1931 /* recheck it here, maybe others set it */
1932 return find_domain(pdev
);
1935 static int iommu_identity_mapping
;
1937 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
1938 unsigned long long start
,
1939 unsigned long long end
)
1941 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
1942 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
1944 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
1945 dma_to_mm_pfn(last_vpfn
))) {
1946 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1950 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1951 start
, end
, domain
->id
);
1953 * RMRR range might have overlap with physical memory range,
1956 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
1958 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
1959 last_vpfn
- first_vpfn
+ 1,
1960 DMA_PTE_READ
|DMA_PTE_WRITE
);
1963 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1964 unsigned long long start
,
1965 unsigned long long end
)
1967 struct dmar_domain
*domain
;
1970 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1974 /* For _hardware_ passthrough, don't bother. But for software
1975 passthrough, we do it anyway -- it may indicate a memory
1976 range which is reserved in E820, so which didn't get set
1977 up to start with in si_domain */
1978 if (domain
== si_domain
&& hw_pass_through
) {
1979 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1980 pci_name(pdev
), start
, end
);
1985 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1986 pci_name(pdev
), start
, end
);
1988 if (end
>> agaw_to_width(domain
->agaw
)) {
1989 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1990 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1991 agaw_to_width(domain
->agaw
),
1992 dmi_get_system_info(DMI_BIOS_VENDOR
),
1993 dmi_get_system_info(DMI_BIOS_VERSION
),
1994 dmi_get_system_info(DMI_PRODUCT_VERSION
));
1999 ret
= iommu_domain_identity_map(domain
, start
, end
);
2003 /* context entry init */
2004 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
2011 domain_exit(domain
);
2015 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2016 struct pci_dev
*pdev
)
2018 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2020 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
2021 rmrr
->end_address
+ 1);
2024 #ifdef CONFIG_DMAR_FLOPPY_WA
2025 static inline void iommu_prepare_isa(void)
2027 struct pci_dev
*pdev
;
2030 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2034 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2035 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
2038 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2039 "floppy might not work\n");
2043 static inline void iommu_prepare_isa(void)
2047 #endif /* !CONFIG_DMAR_FLPY_WA */
2049 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2051 static int __init
si_domain_work_fn(unsigned long start_pfn
,
2052 unsigned long end_pfn
, void *datax
)
2056 *ret
= iommu_domain_identity_map(si_domain
,
2057 (uint64_t)start_pfn
<< PAGE_SHIFT
,
2058 (uint64_t)end_pfn
<< PAGE_SHIFT
);
2063 static int __init
si_domain_init(int hw
)
2065 struct dmar_drhd_unit
*drhd
;
2066 struct intel_iommu
*iommu
;
2069 si_domain
= alloc_domain();
2073 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2075 for_each_active_iommu(iommu
, drhd
) {
2076 ret
= iommu_attach_domain(si_domain
, iommu
);
2078 domain_exit(si_domain
);
2083 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2084 domain_exit(si_domain
);
2088 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2093 for_each_online_node(nid
) {
2094 work_with_active_regions(nid
, si_domain_work_fn
, &ret
);
2102 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2103 struct pci_dev
*pdev
);
2104 static int identity_mapping(struct pci_dev
*pdev
)
2106 struct device_domain_info
*info
;
2108 if (likely(!iommu_identity_mapping
))
2112 list_for_each_entry(info
, &si_domain
->devices
, link
)
2113 if (info
->dev
== pdev
)
2118 static int domain_add_dev_info(struct dmar_domain
*domain
,
2119 struct pci_dev
*pdev
,
2122 struct device_domain_info
*info
;
2123 unsigned long flags
;
2126 info
= alloc_devinfo_mem();
2130 ret
= domain_context_mapping(domain
, pdev
, translation
);
2132 free_devinfo_mem(info
);
2136 info
->segment
= pci_domain_nr(pdev
->bus
);
2137 info
->bus
= pdev
->bus
->number
;
2138 info
->devfn
= pdev
->devfn
;
2140 info
->domain
= domain
;
2142 spin_lock_irqsave(&device_domain_lock
, flags
);
2143 list_add(&info
->link
, &domain
->devices
);
2144 list_add(&info
->global
, &device_domain_list
);
2145 pdev
->dev
.archdata
.iommu
= info
;
2146 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2151 static int iommu_should_identity_map(struct pci_dev
*pdev
, int startup
)
2153 if (iommu_identity_mapping
== 2)
2154 return IS_GFX_DEVICE(pdev
);
2157 * We want to start off with all devices in the 1:1 domain, and
2158 * take them out later if we find they can't access all of memory.
2160 * However, we can't do this for PCI devices behind bridges,
2161 * because all PCI devices behind the same bridge will end up
2162 * with the same source-id on their transactions.
2164 * Practically speaking, we can't change things around for these
2165 * devices at run-time, because we can't be sure there'll be no
2166 * DMA transactions in flight for any of their siblings.
2168 * So PCI devices (unless they're on the root bus) as well as
2169 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2170 * the 1:1 domain, just in _case_ one of their siblings turns out
2171 * not to be able to map all of memory.
2173 if (!pdev
->is_pcie
) {
2174 if (!pci_is_root_bus(pdev
->bus
))
2176 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2178 } else if (pdev
->pcie_type
== PCI_EXP_TYPE_PCI_BRIDGE
)
2182 * At boot time, we don't yet know if devices will be 64-bit capable.
2183 * Assume that they will -- if they turn out not to be, then we can
2184 * take them out of the 1:1 domain later.
2187 return pdev
->dma_mask
> DMA_BIT_MASK(32);
2192 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2194 struct pci_dev
*pdev
= NULL
;
2197 ret
= si_domain_init(hw
);
2201 for_each_pci_dev(pdev
) {
2202 if (iommu_should_identity_map(pdev
, 1)) {
2203 printk(KERN_INFO
"IOMMU: %s identity mapping for device %s\n",
2204 hw
? "hardware" : "software", pci_name(pdev
));
2206 ret
= domain_add_dev_info(si_domain
, pdev
,
2207 hw
? CONTEXT_TT_PASS_THROUGH
:
2208 CONTEXT_TT_MULTI_LEVEL
);
2217 int __init
init_dmars(void)
2219 struct dmar_drhd_unit
*drhd
;
2220 struct dmar_rmrr_unit
*rmrr
;
2221 struct pci_dev
*pdev
;
2222 struct intel_iommu
*iommu
;
2228 * initialize and program root entry to not present
2231 for_each_drhd_unit(drhd
) {
2234 * lock not needed as this is only incremented in the single
2235 * threaded kernel __init code path all other access are read
2240 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2243 printk(KERN_ERR
"Allocating global iommu array failed\n");
2248 deferred_flush
= kzalloc(g_num_of_iommus
*
2249 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2250 if (!deferred_flush
) {
2255 for_each_drhd_unit(drhd
) {
2259 iommu
= drhd
->iommu
;
2260 g_iommus
[iommu
->seq_id
] = iommu
;
2262 ret
= iommu_init_domains(iommu
);
2268 * we could share the same root & context tables
2269 * amoung all IOMMU's. Need to Split it later.
2271 ret
= iommu_alloc_root_entry(iommu
);
2273 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2276 if (!ecap_pass_through(iommu
->ecap
))
2277 hw_pass_through
= 0;
2281 * Start from the sane iommu hardware state.
2283 for_each_drhd_unit(drhd
) {
2287 iommu
= drhd
->iommu
;
2290 * If the queued invalidation is already initialized by us
2291 * (for example, while enabling interrupt-remapping) then
2292 * we got the things already rolling from a sane state.
2298 * Clear any previous faults.
2300 dmar_fault(-1, iommu
);
2302 * Disable queued invalidation if supported and already enabled
2303 * before OS handover.
2305 dmar_disable_qi(iommu
);
2308 for_each_drhd_unit(drhd
) {
2312 iommu
= drhd
->iommu
;
2314 if (dmar_enable_qi(iommu
)) {
2316 * Queued Invalidate not enabled, use Register Based
2319 iommu
->flush
.flush_context
= __iommu_flush_context
;
2320 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2321 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
2323 (unsigned long long)drhd
->reg_base_addr
);
2325 iommu
->flush
.flush_context
= qi_flush_context
;
2326 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2327 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
2329 (unsigned long long)drhd
->reg_base_addr
);
2333 if (iommu_pass_through
)
2334 iommu_identity_mapping
= 1;
2335 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2337 iommu_identity_mapping
= 2;
2340 * If pass through is not set or not enabled, setup context entries for
2341 * identity mappings for rmrr, gfx, and isa and may fall back to static
2342 * identity mapping if iommu_identity_mapping is set.
2344 if (iommu_identity_mapping
) {
2345 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2347 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2353 * for each dev attached to rmrr
2355 * locate drhd for dev, alloc domain for dev
2356 * allocate free domain
2357 * allocate page table entries for rmrr
2358 * if context not allocated for bus
2359 * allocate and init context
2360 * set present in root table for this bus
2361 * init context with domain, translation etc
2365 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2366 for_each_rmrr_units(rmrr
) {
2367 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2368 pdev
= rmrr
->devices
[i
];
2370 * some BIOS lists non-exist devices in DMAR
2375 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2378 "IOMMU: mapping reserved region failed\n");
2382 iommu_prepare_isa();
2387 * global invalidate context cache
2388 * global invalidate iotlb
2389 * enable translation
2391 for_each_drhd_unit(drhd
) {
2394 iommu
= drhd
->iommu
;
2396 iommu_flush_write_buffer(iommu
);
2398 ret
= dmar_set_interrupt(iommu
);
2402 iommu_set_root_entry(iommu
);
2404 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2405 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2406 iommu_disable_protect_mem_regions(iommu
);
2408 ret
= iommu_enable_translation(iommu
);
2415 for_each_drhd_unit(drhd
) {
2418 iommu
= drhd
->iommu
;
2425 /* This takes a number of _MM_ pages, not VTD pages */
2426 static struct iova
*intel_alloc_iova(struct device
*dev
,
2427 struct dmar_domain
*domain
,
2428 unsigned long nrpages
, uint64_t dma_mask
)
2430 struct pci_dev
*pdev
= to_pci_dev(dev
);
2431 struct iova
*iova
= NULL
;
2433 /* Restrict dma_mask to the width that the iommu can handle */
2434 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2436 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2438 * First try to allocate an io virtual address in
2439 * DMA_BIT_MASK(32) and if that fails then try allocating
2442 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2443 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2447 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2448 if (unlikely(!iova
)) {
2449 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2450 nrpages
, pci_name(pdev
));
2457 static struct dmar_domain
*__get_valid_domain_for_dev(struct pci_dev
*pdev
)
2459 struct dmar_domain
*domain
;
2462 domain
= get_domain_for_dev(pdev
,
2463 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2466 "Allocating domain for %s failed", pci_name(pdev
));
2470 /* make sure context mapping is ok */
2471 if (unlikely(!domain_context_mapped(pdev
))) {
2472 ret
= domain_context_mapping(domain
, pdev
,
2473 CONTEXT_TT_MULTI_LEVEL
);
2476 "Domain context map for %s failed",
2485 static inline struct dmar_domain
*get_valid_domain_for_dev(struct pci_dev
*dev
)
2487 struct device_domain_info
*info
;
2489 /* No lock here, assumes no domain exit in normal case */
2490 info
= dev
->dev
.archdata
.iommu
;
2492 return info
->domain
;
2494 return __get_valid_domain_for_dev(dev
);
2497 static int iommu_dummy(struct pci_dev
*pdev
)
2499 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2502 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2503 static int iommu_no_mapping(struct device
*dev
)
2505 struct pci_dev
*pdev
;
2508 if (unlikely(dev
->bus
!= &pci_bus_type
))
2511 pdev
= to_pci_dev(dev
);
2512 if (iommu_dummy(pdev
))
2515 if (!iommu_identity_mapping
)
2518 found
= identity_mapping(pdev
);
2520 if (iommu_should_identity_map(pdev
, 0))
2524 * 32 bit DMA is removed from si_domain and fall back
2525 * to non-identity mapping.
2527 domain_remove_one_dev_info(si_domain
, pdev
);
2528 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2534 * In case of a detached 64 bit DMA device from vm, the device
2535 * is put into si_domain for identity mapping.
2537 if (iommu_should_identity_map(pdev
, 0)) {
2539 ret
= domain_add_dev_info(si_domain
, pdev
,
2541 CONTEXT_TT_PASS_THROUGH
:
2542 CONTEXT_TT_MULTI_LEVEL
);
2544 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2554 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2555 size_t size
, int dir
, u64 dma_mask
)
2557 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2558 struct dmar_domain
*domain
;
2559 phys_addr_t start_paddr
;
2563 struct intel_iommu
*iommu
;
2564 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
2566 BUG_ON(dir
== DMA_NONE
);
2568 if (iommu_no_mapping(hwdev
))
2571 domain
= get_valid_domain_for_dev(pdev
);
2575 iommu
= domain_get_iommu(domain
);
2576 size
= aligned_nrpages(paddr
, size
);
2578 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
),
2584 * Check if DMAR supports zero-length reads on write only
2587 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2588 !cap_zlr(iommu
->cap
))
2589 prot
|= DMA_PTE_READ
;
2590 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2591 prot
|= DMA_PTE_WRITE
;
2593 * paddr - (paddr + size) might be partial page, we should map the whole
2594 * page. Note: if two part of one page are separately mapped, we
2595 * might have two guest_addr mapping to the same host paddr, but this
2596 * is not a big problem
2598 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2599 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
2603 /* it's a non-present to present mapping. Only flush if caching mode */
2604 if (cap_caching_mode(iommu
->cap
))
2605 iommu_flush_iotlb_psi(iommu
, 0, mm_to_dma_pfn(iova
->pfn_lo
), size
);
2607 iommu_flush_write_buffer(iommu
);
2609 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2610 start_paddr
+= paddr
& ~PAGE_MASK
;
2615 __free_iova(&domain
->iovad
, iova
);
2616 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2617 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2621 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2622 unsigned long offset
, size_t size
,
2623 enum dma_data_direction dir
,
2624 struct dma_attrs
*attrs
)
2626 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2627 dir
, to_pci_dev(dev
)->dma_mask
);
2630 static void flush_unmaps(void)
2636 /* just flush them all */
2637 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2638 struct intel_iommu
*iommu
= g_iommus
[i
];
2642 if (!deferred_flush
[i
].next
)
2645 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2646 DMA_TLB_GLOBAL_FLUSH
);
2647 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2649 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2651 mask
= (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
;
2652 mask
= ilog2(mask
>> VTD_PAGE_SHIFT
);
2653 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2654 iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2655 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2657 deferred_flush
[i
].next
= 0;
2663 static void flush_unmaps_timeout(unsigned long data
)
2665 unsigned long flags
;
2667 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2669 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2672 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2674 unsigned long flags
;
2676 struct intel_iommu
*iommu
;
2678 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2679 if (list_size
== HIGH_WATER_MARK
)
2682 iommu
= domain_get_iommu(dom
);
2683 iommu_id
= iommu
->seq_id
;
2685 next
= deferred_flush
[iommu_id
].next
;
2686 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2687 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2688 deferred_flush
[iommu_id
].next
++;
2691 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2695 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2698 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2699 size_t size
, enum dma_data_direction dir
,
2700 struct dma_attrs
*attrs
)
2702 struct pci_dev
*pdev
= to_pci_dev(dev
);
2703 struct dmar_domain
*domain
;
2704 unsigned long start_pfn
, last_pfn
;
2706 struct intel_iommu
*iommu
;
2708 if (iommu_no_mapping(dev
))
2711 domain
= find_domain(pdev
);
2714 iommu
= domain_get_iommu(domain
);
2716 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2717 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
2718 (unsigned long long)dev_addr
))
2721 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2722 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2724 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2725 pci_name(pdev
), start_pfn
, last_pfn
);
2727 /* clear the whole page */
2728 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2730 /* free page tables */
2731 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2733 if (intel_iommu_strict
) {
2734 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2735 last_pfn
- start_pfn
+ 1);
2737 __free_iova(&domain
->iovad
, iova
);
2739 add_unmap(domain
, iova
);
2741 * queue up the release of the unmap to save the 1/6th of the
2742 * cpu used up by the iotlb flush operation...
2747 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2748 dma_addr_t
*dma_handle
, gfp_t flags
)
2753 size
= PAGE_ALIGN(size
);
2754 order
= get_order(size
);
2755 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2757 vaddr
= (void *)__get_free_pages(flags
, order
);
2760 memset(vaddr
, 0, size
);
2762 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2764 hwdev
->coherent_dma_mask
);
2767 free_pages((unsigned long)vaddr
, order
);
2771 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2772 dma_addr_t dma_handle
)
2776 size
= PAGE_ALIGN(size
);
2777 order
= get_order(size
);
2779 intel_unmap_page(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
, NULL
);
2780 free_pages((unsigned long)vaddr
, order
);
2783 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2784 int nelems
, enum dma_data_direction dir
,
2785 struct dma_attrs
*attrs
)
2787 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2788 struct dmar_domain
*domain
;
2789 unsigned long start_pfn
, last_pfn
;
2791 struct intel_iommu
*iommu
;
2793 if (iommu_no_mapping(hwdev
))
2796 domain
= find_domain(pdev
);
2799 iommu
= domain_get_iommu(domain
);
2801 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2802 if (WARN_ONCE(!iova
, "Driver unmaps unmatched sglist at PFN %llx\n",
2803 (unsigned long long)sglist
[0].dma_address
))
2806 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2807 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2809 /* clear the whole page */
2810 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2812 /* free page tables */
2813 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2815 if (intel_iommu_strict
) {
2816 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2817 last_pfn
- start_pfn
+ 1);
2819 __free_iova(&domain
->iovad
, iova
);
2821 add_unmap(domain
, iova
);
2823 * queue up the release of the unmap to save the 1/6th of the
2824 * cpu used up by the iotlb flush operation...
2829 static int intel_nontranslate_map_sg(struct device
*hddev
,
2830 struct scatterlist
*sglist
, int nelems
, int dir
)
2833 struct scatterlist
*sg
;
2835 for_each_sg(sglist
, sg
, nelems
, i
) {
2836 BUG_ON(!sg_page(sg
));
2837 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2838 sg
->dma_length
= sg
->length
;
2843 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2844 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2847 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2848 struct dmar_domain
*domain
;
2851 size_t offset_pfn
= 0;
2852 struct iova
*iova
= NULL
;
2854 struct scatterlist
*sg
;
2855 unsigned long start_vpfn
;
2856 struct intel_iommu
*iommu
;
2858 BUG_ON(dir
== DMA_NONE
);
2859 if (iommu_no_mapping(hwdev
))
2860 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2862 domain
= get_valid_domain_for_dev(pdev
);
2866 iommu
= domain_get_iommu(domain
);
2868 for_each_sg(sglist
, sg
, nelems
, i
)
2869 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
2871 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
),
2874 sglist
->dma_length
= 0;
2879 * Check if DMAR supports zero-length reads on write only
2882 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2883 !cap_zlr(iommu
->cap
))
2884 prot
|= DMA_PTE_READ
;
2885 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2886 prot
|= DMA_PTE_WRITE
;
2888 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2890 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
2891 if (unlikely(ret
)) {
2892 /* clear the page */
2893 dma_pte_clear_range(domain
, start_vpfn
,
2894 start_vpfn
+ size
- 1);
2895 /* free page tables */
2896 dma_pte_free_pagetable(domain
, start_vpfn
,
2897 start_vpfn
+ size
- 1);
2899 __free_iova(&domain
->iovad
, iova
);
2903 /* it's a non-present to present mapping. Only flush if caching mode */
2904 if (cap_caching_mode(iommu
->cap
))
2905 iommu_flush_iotlb_psi(iommu
, 0, start_vpfn
, offset_pfn
);
2907 iommu_flush_write_buffer(iommu
);
2912 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2917 struct dma_map_ops intel_dma_ops
= {
2918 .alloc_coherent
= intel_alloc_coherent
,
2919 .free_coherent
= intel_free_coherent
,
2920 .map_sg
= intel_map_sg
,
2921 .unmap_sg
= intel_unmap_sg
,
2922 .map_page
= intel_map_page
,
2923 .unmap_page
= intel_unmap_page
,
2924 .mapping_error
= intel_mapping_error
,
2927 static inline int iommu_domain_cache_init(void)
2931 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2932 sizeof(struct dmar_domain
),
2937 if (!iommu_domain_cache
) {
2938 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2945 static inline int iommu_devinfo_cache_init(void)
2949 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2950 sizeof(struct device_domain_info
),
2954 if (!iommu_devinfo_cache
) {
2955 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2962 static inline int iommu_iova_cache_init(void)
2966 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2967 sizeof(struct iova
),
2971 if (!iommu_iova_cache
) {
2972 printk(KERN_ERR
"Couldn't create iova cache\n");
2979 static int __init
iommu_init_mempool(void)
2982 ret
= iommu_iova_cache_init();
2986 ret
= iommu_domain_cache_init();
2990 ret
= iommu_devinfo_cache_init();
2994 kmem_cache_destroy(iommu_domain_cache
);
2996 kmem_cache_destroy(iommu_iova_cache
);
3001 static void __init
iommu_exit_mempool(void)
3003 kmem_cache_destroy(iommu_devinfo_cache
);
3004 kmem_cache_destroy(iommu_domain_cache
);
3005 kmem_cache_destroy(iommu_iova_cache
);
3009 static void __init
init_no_remapping_devices(void)
3011 struct dmar_drhd_unit
*drhd
;
3013 for_each_drhd_unit(drhd
) {
3014 if (!drhd
->include_all
) {
3016 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3017 if (drhd
->devices
[i
] != NULL
)
3019 /* ignore DMAR unit if no pci devices exist */
3020 if (i
== drhd
->devices_cnt
)
3028 for_each_drhd_unit(drhd
) {
3030 if (drhd
->ignored
|| drhd
->include_all
)
3033 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3034 if (drhd
->devices
[i
] &&
3035 !IS_GFX_DEVICE(drhd
->devices
[i
]))
3038 if (i
< drhd
->devices_cnt
)
3041 /* bypass IOMMU if it is just for gfx devices */
3043 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
3044 if (!drhd
->devices
[i
])
3046 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3051 #ifdef CONFIG_SUSPEND
3052 static int init_iommu_hw(void)
3054 struct dmar_drhd_unit
*drhd
;
3055 struct intel_iommu
*iommu
= NULL
;
3057 for_each_active_iommu(iommu
, drhd
)
3059 dmar_reenable_qi(iommu
);
3061 for_each_active_iommu(iommu
, drhd
) {
3062 iommu_flush_write_buffer(iommu
);
3064 iommu_set_root_entry(iommu
);
3066 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3067 DMA_CCMD_GLOBAL_INVL
);
3068 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3069 DMA_TLB_GLOBAL_FLUSH
);
3070 iommu_disable_protect_mem_regions(iommu
);
3071 iommu_enable_translation(iommu
);
3077 static void iommu_flush_all(void)
3079 struct dmar_drhd_unit
*drhd
;
3080 struct intel_iommu
*iommu
;
3082 for_each_active_iommu(iommu
, drhd
) {
3083 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3084 DMA_CCMD_GLOBAL_INVL
);
3085 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3086 DMA_TLB_GLOBAL_FLUSH
);
3090 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
3092 struct dmar_drhd_unit
*drhd
;
3093 struct intel_iommu
*iommu
= NULL
;
3096 for_each_active_iommu(iommu
, drhd
) {
3097 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3099 if (!iommu
->iommu_state
)
3105 for_each_active_iommu(iommu
, drhd
) {
3106 iommu_disable_translation(iommu
);
3108 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3110 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3111 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3112 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3113 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3114 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3115 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3116 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3117 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3119 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3124 for_each_active_iommu(iommu
, drhd
)
3125 kfree(iommu
->iommu_state
);
3130 static int iommu_resume(struct sys_device
*dev
)
3132 struct dmar_drhd_unit
*drhd
;
3133 struct intel_iommu
*iommu
= NULL
;
3136 if (init_iommu_hw()) {
3137 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3141 for_each_active_iommu(iommu
, drhd
) {
3143 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3145 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3146 iommu
->reg
+ DMAR_FECTL_REG
);
3147 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3148 iommu
->reg
+ DMAR_FEDATA_REG
);
3149 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3150 iommu
->reg
+ DMAR_FEADDR_REG
);
3151 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3152 iommu
->reg
+ DMAR_FEUADDR_REG
);
3154 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3157 for_each_active_iommu(iommu
, drhd
)
3158 kfree(iommu
->iommu_state
);
3163 static struct sysdev_class iommu_sysclass
= {
3165 .resume
= iommu_resume
,
3166 .suspend
= iommu_suspend
,
3169 static struct sys_device device_iommu
= {
3170 .cls
= &iommu_sysclass
,
3173 static int __init
init_iommu_sysfs(void)
3177 error
= sysdev_class_register(&iommu_sysclass
);
3181 error
= sysdev_register(&device_iommu
);
3183 sysdev_class_unregister(&iommu_sysclass
);
3189 static int __init
init_iommu_sysfs(void)
3193 #endif /* CONFIG_PM */
3195 int __init
intel_iommu_init(void)
3199 if (dmar_table_init())
3202 if (dmar_dev_scope_init())
3206 * Check the need for DMA-remapping initialization now.
3207 * Above initialization will also be used by Interrupt-remapping.
3209 if (no_iommu
|| swiotlb
|| dmar_disabled
)
3212 iommu_init_mempool();
3213 dmar_init_reserved_ranges();
3215 init_no_remapping_devices();
3219 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3220 put_iova_domain(&reserved_iova_list
);
3221 iommu_exit_mempool();
3225 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3227 init_timer(&unmap_timer
);
3229 dma_ops
= &intel_dma_ops
;
3233 register_iommu(&intel_iommu_ops
);
3238 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3239 struct pci_dev
*pdev
)
3241 struct pci_dev
*tmp
, *parent
;
3243 if (!iommu
|| !pdev
)
3246 /* dependent device detach */
3247 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3248 /* Secondary interface's bus number and devfn 0 */
3250 parent
= pdev
->bus
->self
;
3251 while (parent
!= tmp
) {
3252 iommu_detach_dev(iommu
, parent
->bus
->number
,
3254 parent
= parent
->bus
->self
;
3256 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
3257 iommu_detach_dev(iommu
,
3258 tmp
->subordinate
->number
, 0);
3259 else /* this is a legacy PCI bridge */
3260 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3265 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3266 struct pci_dev
*pdev
)
3268 struct device_domain_info
*info
;
3269 struct intel_iommu
*iommu
;
3270 unsigned long flags
;
3272 struct list_head
*entry
, *tmp
;
3274 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3279 spin_lock_irqsave(&device_domain_lock
, flags
);
3280 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3281 info
= list_entry(entry
, struct device_domain_info
, link
);
3282 /* No need to compare PCI domain; it has to be the same */
3283 if (info
->bus
== pdev
->bus
->number
&&
3284 info
->devfn
== pdev
->devfn
) {
3285 list_del(&info
->link
);
3286 list_del(&info
->global
);
3288 info
->dev
->dev
.archdata
.iommu
= NULL
;
3289 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3291 iommu_disable_dev_iotlb(info
);
3292 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3293 iommu_detach_dependent_devices(iommu
, pdev
);
3294 free_devinfo_mem(info
);
3296 spin_lock_irqsave(&device_domain_lock
, flags
);
3304 /* if there is no other devices under the same iommu
3305 * owned by this domain, clear this iommu in iommu_bmp
3306 * update iommu count and coherency
3308 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3314 unsigned long tmp_flags
;
3315 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3316 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3317 domain
->iommu_count
--;
3318 domain_update_iommu_cap(domain
);
3319 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3322 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3325 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3327 struct device_domain_info
*info
;
3328 struct intel_iommu
*iommu
;
3329 unsigned long flags1
, flags2
;
3331 spin_lock_irqsave(&device_domain_lock
, flags1
);
3332 while (!list_empty(&domain
->devices
)) {
3333 info
= list_entry(domain
->devices
.next
,
3334 struct device_domain_info
, link
);
3335 list_del(&info
->link
);
3336 list_del(&info
->global
);
3338 info
->dev
->dev
.archdata
.iommu
= NULL
;
3340 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3342 iommu_disable_dev_iotlb(info
);
3343 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3344 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3345 iommu_detach_dependent_devices(iommu
, info
->dev
);
3347 /* clear this iommu in iommu_bmp, update iommu count
3350 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3351 if (test_and_clear_bit(iommu
->seq_id
,
3352 &domain
->iommu_bmp
)) {
3353 domain
->iommu_count
--;
3354 domain_update_iommu_cap(domain
);
3356 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3358 free_devinfo_mem(info
);
3359 spin_lock_irqsave(&device_domain_lock
, flags1
);
3361 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3364 /* domain id for virtual machine, it won't be set in context */
3365 static unsigned long vm_domid
;
3367 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
3370 int min_agaw
= domain
->agaw
;
3372 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
3373 for (; i
< g_num_of_iommus
; ) {
3374 if (min_agaw
> g_iommus
[i
]->agaw
)
3375 min_agaw
= g_iommus
[i
]->agaw
;
3377 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
3383 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3385 struct dmar_domain
*domain
;
3387 domain
= alloc_domain_mem();
3391 domain
->id
= vm_domid
++;
3392 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3393 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3398 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3402 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3403 spin_lock_init(&domain
->iommu_lock
);
3405 domain_reserve_special_ranges(domain
);
3407 /* calculate AGAW */
3408 domain
->gaw
= guest_width
;
3409 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3410 domain
->agaw
= width_to_agaw(adjust_width
);
3412 INIT_LIST_HEAD(&domain
->devices
);
3414 domain
->iommu_count
= 0;
3415 domain
->iommu_coherency
= 0;
3416 domain
->iommu_snooping
= 0;
3417 domain
->max_addr
= 0;
3419 /* always allocate the top pgd */
3420 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3423 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3427 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3429 unsigned long flags
;
3430 struct dmar_drhd_unit
*drhd
;
3431 struct intel_iommu
*iommu
;
3433 unsigned long ndomains
;
3435 for_each_drhd_unit(drhd
) {
3438 iommu
= drhd
->iommu
;
3440 ndomains
= cap_ndoms(iommu
->cap
);
3441 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3442 for (; i
< ndomains
; ) {
3443 if (iommu
->domains
[i
] == domain
) {
3444 spin_lock_irqsave(&iommu
->lock
, flags
);
3445 clear_bit(i
, iommu
->domain_ids
);
3446 iommu
->domains
[i
] = NULL
;
3447 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3450 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3455 static void vm_domain_exit(struct dmar_domain
*domain
)
3457 /* Domain 0 is reserved, so dont process it */
3461 vm_domain_remove_all_dev_info(domain
);
3463 put_iova_domain(&domain
->iovad
);
3466 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3468 /* free page tables */
3469 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3471 iommu_free_vm_domain(domain
);
3472 free_domain_mem(domain
);
3475 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3477 struct dmar_domain
*dmar_domain
;
3479 dmar_domain
= iommu_alloc_vm_domain();
3482 "intel_iommu_domain_init: dmar_domain == NULL\n");
3485 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3487 "intel_iommu_domain_init() failed\n");
3488 vm_domain_exit(dmar_domain
);
3491 domain
->priv
= dmar_domain
;
3496 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3498 struct dmar_domain
*dmar_domain
= domain
->priv
;
3500 domain
->priv
= NULL
;
3501 vm_domain_exit(dmar_domain
);
3504 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3507 struct dmar_domain
*dmar_domain
= domain
->priv
;
3508 struct pci_dev
*pdev
= to_pci_dev(dev
);
3509 struct intel_iommu
*iommu
;
3513 /* normally pdev is not mapped */
3514 if (unlikely(domain_context_mapped(pdev
))) {
3515 struct dmar_domain
*old_domain
;
3517 old_domain
= find_domain(pdev
);
3519 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3520 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3521 domain_remove_one_dev_info(old_domain
, pdev
);
3523 domain_remove_dev_info(old_domain
);
3527 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3532 /* check if this iommu agaw is sufficient for max mapped address */
3533 addr_width
= agaw_to_width(iommu
->agaw
);
3534 end
= DOMAIN_MAX_ADDR(addr_width
);
3535 end
= end
& VTD_PAGE_MASK
;
3536 if (end
< dmar_domain
->max_addr
) {
3537 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3538 "sufficient for the mapped address (%llx)\n",
3539 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3543 return domain_add_dev_info(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3546 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3549 struct dmar_domain
*dmar_domain
= domain
->priv
;
3550 struct pci_dev
*pdev
= to_pci_dev(dev
);
3552 domain_remove_one_dev_info(dmar_domain
, pdev
);
3555 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3556 unsigned long iova
, phys_addr_t hpa
,
3557 size_t size
, int iommu_prot
)
3559 struct dmar_domain
*dmar_domain
= domain
->priv
;
3565 if (iommu_prot
& IOMMU_READ
)
3566 prot
|= DMA_PTE_READ
;
3567 if (iommu_prot
& IOMMU_WRITE
)
3568 prot
|= DMA_PTE_WRITE
;
3569 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3570 prot
|= DMA_PTE_SNP
;
3572 max_addr
= iova
+ size
;
3573 if (dmar_domain
->max_addr
< max_addr
) {
3577 /* check if minimum agaw is sufficient for mapped address */
3578 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3579 addr_width
= agaw_to_width(min_agaw
);
3580 end
= DOMAIN_MAX_ADDR(addr_width
);
3581 end
= end
& VTD_PAGE_MASK
;
3582 if (end
< max_addr
) {
3583 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3584 "sufficient for the mapped address (%llx)\n",
3585 __func__
, min_agaw
, max_addr
);
3588 dmar_domain
->max_addr
= max_addr
;
3590 /* Round up size to next multiple of PAGE_SIZE, if it and
3591 the low bits of hpa would take us onto the next page */
3592 size
= aligned_nrpages(hpa
, size
);
3593 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3594 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
3598 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3599 unsigned long iova
, size_t size
)
3601 struct dmar_domain
*dmar_domain
= domain
->priv
;
3606 dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3607 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
3609 if (dmar_domain
->max_addr
== iova
+ size
)
3610 dmar_domain
->max_addr
= iova
;
3613 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3616 struct dmar_domain
*dmar_domain
= domain
->priv
;
3617 struct dma_pte
*pte
;
3620 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
);
3622 phys
= dma_pte_addr(pte
);
3627 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3630 struct dmar_domain
*dmar_domain
= domain
->priv
;
3632 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3633 return dmar_domain
->iommu_snooping
;
3638 static struct iommu_ops intel_iommu_ops
= {
3639 .domain_init
= intel_iommu_domain_init
,
3640 .domain_destroy
= intel_iommu_domain_destroy
,
3641 .attach_dev
= intel_iommu_attach_device
,
3642 .detach_dev
= intel_iommu_detach_device
,
3643 .map
= intel_iommu_map_range
,
3644 .unmap
= intel_iommu_unmap_range
,
3645 .iova_to_phys
= intel_iommu_iova_to_phys
,
3646 .domain_has_cap
= intel_iommu_domain_has_cap
,
3649 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3652 * Mobile 4 Series Chipset neglects to set RWBF capability,
3655 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3659 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);