]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/iommu/intel-iommu.c
iommu/vt-d: Avoid calling virt_to_phys() on null pointer
[mirror_ubuntu-jammy-kernel.git] / drivers / iommu / intel-iommu.c
CommitLineData
ba395927 1/*
ea8ea460 2 * Copyright © 2006-2014 Intel Corporation.
ba395927
KA
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
ea8ea460
DW
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
9f10e5bf 18 * Joerg Roedel <jroedel@suse.de>
ba395927
KA
19 */
20
9f10e5bf
JR
21#define pr_fmt(fmt) "DMAR: " fmt
22
ba395927
KA
23#include <linux/init.h>
24#include <linux/bitmap.h>
5e0d2a6f 25#include <linux/debugfs.h>
54485c30 26#include <linux/export.h>
ba395927
KA
27#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
ba395927
KA
30#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
75f05569 35#include <linux/memory.h>
aa473240 36#include <linux/cpu.h>
5e0d2a6f 37#include <linux/timer.h>
dfddb969 38#include <linux/io.h>
38717946 39#include <linux/iova.h>
5d450806 40#include <linux/iommu.h>
38717946 41#include <linux/intel-iommu.h>
134fac3f 42#include <linux/syscore_ops.h>
69575d38 43#include <linux/tboot.h>
adb2fe02 44#include <linux/dmi.h>
5cdede24 45#include <linux/pci-ats.h>
0ee332c1 46#include <linux/memblock.h>
36746436 47#include <linux/dma-contiguous.h>
091d42e4 48#include <linux/crash_dump.h>
8a8f422d 49#include <asm/irq_remapping.h>
ba395927 50#include <asm/cacheflush.h>
46a7fa27 51#include <asm/iommu.h>
ba395927 52
078e1ee2
JR
53#include "irq_remapping.h"
54
5b6985ce
FY
55#define ROOT_SIZE VTD_PAGE_SIZE
56#define CONTEXT_SIZE VTD_PAGE_SIZE
57
ba395927 58#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
18436afd 59#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
ba395927 60#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
e0fc7e0b 61#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
ba395927
KA
62
63#define IOAPIC_RANGE_START (0xfee00000)
64#define IOAPIC_RANGE_END (0xfeefffff)
65#define IOVA_START_ADDR (0x1000)
66
67#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
4ed0d3e6 69#define MAX_AGAW_WIDTH 64
5c645b35 70#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
4ed0d3e6 71
2ebe3151
DW
72#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
ba395927 80
1b722500
RM
81/* IO virtual address start page frame number */
82#define IOVA_START_PFN (1)
83
f27be03b 84#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
284901a9 85#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
6a35528a 86#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
5e0d2a6f 87
df08cdc7
AM
88/* page table handling */
89#define LEVEL_STRIDE (9)
90#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91
6d1c56a9
OBC
92/*
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
96 * that we support.
97 *
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
101 *
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
104 *
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
107 */
108#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
109
df08cdc7
AM
110static inline int agaw_to_level(int agaw)
111{
112 return agaw + 2;
113}
114
115static inline int agaw_to_width(int agaw)
116{
5c645b35 117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
df08cdc7
AM
118}
119
120static inline int width_to_agaw(int width)
121{
5c645b35 122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
df08cdc7
AM
123}
124
125static inline unsigned int level_to_offset_bits(int level)
126{
127 return (level - 1) * LEVEL_STRIDE;
128}
129
130static inline int pfn_level_offset(unsigned long pfn, int level)
131{
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133}
134
135static inline unsigned long level_mask(int level)
136{
137 return -1UL << level_to_offset_bits(level);
138}
139
140static inline unsigned long level_size(int level)
141{
142 return 1UL << level_to_offset_bits(level);
143}
144
145static inline unsigned long align_to_level(unsigned long pfn, int level)
146{
147 return (pfn + level_size(level) - 1) & level_mask(level);
148}
fd18de50 149
6dd9a7c7
YS
150static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
151{
5c645b35 152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
6dd9a7c7
YS
153}
154
dd4e8319
DW
155/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
158{
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160}
161
162static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
163{
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
165}
166static inline unsigned long page_to_dma_pfn(struct page *pg)
167{
168 return mm_to_dma_pfn(page_to_pfn(pg));
169}
170static inline unsigned long virt_to_dma_pfn(void *p)
171{
172 return page_to_dma_pfn(virt_to_page(p));
173}
174
d9630fe9
WH
175/* global iommu list, set NULL for ignored DMAR units */
176static struct intel_iommu **g_iommus;
177
e0fc7e0b 178static void __init check_tylersburg_isoch(void);
9af88143
DW
179static int rwbf_quirk;
180
b779260b
JC
181/*
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
184 */
185static int force_on = 0;
bfd20f1c 186int intel_iommu_tboot_noforce;
b779260b 187
46b08e1a
MM
188/*
189 * 0: Present
190 * 1-11: Reserved
191 * 12-63: Context Ptr (12 - (haw-1))
192 * 64-127: Reserved
193 */
194struct root_entry {
03ecc32c
DW
195 u64 lo;
196 u64 hi;
46b08e1a
MM
197};
198#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
46b08e1a 199
091d42e4
JR
200/*
201 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
202 * if marked present.
203 */
204static phys_addr_t root_entry_lctp(struct root_entry *re)
205{
206 if (!(re->lo & 1))
207 return 0;
208
209 return re->lo & VTD_PAGE_MASK;
210}
211
212/*
213 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
214 * if marked present.
215 */
216static phys_addr_t root_entry_uctp(struct root_entry *re)
217{
218 if (!(re->hi & 1))
219 return 0;
46b08e1a 220
091d42e4
JR
221 return re->hi & VTD_PAGE_MASK;
222}
7a8fc25e
MM
223/*
224 * low 64 bits:
225 * 0: present
226 * 1: fault processing disable
227 * 2-3: translation type
228 * 12-63: address space root
229 * high 64 bits:
230 * 0-2: address width
231 * 3-6: aval
232 * 8-23: domain id
233 */
234struct context_entry {
235 u64 lo;
236 u64 hi;
237};
c07e7d21 238
cf484d0e
JR
239static inline void context_clear_pasid_enable(struct context_entry *context)
240{
241 context->lo &= ~(1ULL << 11);
242}
243
244static inline bool context_pasid_enabled(struct context_entry *context)
245{
246 return !!(context->lo & (1ULL << 11));
247}
248
249static inline void context_set_copied(struct context_entry *context)
250{
251 context->hi |= (1ull << 3);
252}
253
254static inline bool context_copied(struct context_entry *context)
255{
256 return !!(context->hi & (1ULL << 3));
257}
258
259static inline bool __context_present(struct context_entry *context)
c07e7d21
MM
260{
261 return (context->lo & 1);
262}
cf484d0e
JR
263
264static inline bool context_present(struct context_entry *context)
265{
266 return context_pasid_enabled(context) ?
267 __context_present(context) :
268 __context_present(context) && !context_copied(context);
269}
270
c07e7d21
MM
271static inline void context_set_present(struct context_entry *context)
272{
273 context->lo |= 1;
274}
275
276static inline void context_set_fault_enable(struct context_entry *context)
277{
278 context->lo &= (((u64)-1) << 2) | 1;
279}
280
c07e7d21
MM
281static inline void context_set_translation_type(struct context_entry *context,
282 unsigned long value)
283{
284 context->lo &= (((u64)-1) << 4) | 3;
285 context->lo |= (value & 3) << 2;
286}
287
288static inline void context_set_address_root(struct context_entry *context,
289 unsigned long value)
290{
1a2262f9 291 context->lo &= ~VTD_PAGE_MASK;
c07e7d21
MM
292 context->lo |= value & VTD_PAGE_MASK;
293}
294
295static inline void context_set_address_width(struct context_entry *context,
296 unsigned long value)
297{
298 context->hi |= value & 7;
299}
300
301static inline void context_set_domain_id(struct context_entry *context,
302 unsigned long value)
303{
304 context->hi |= (value & ((1 << 16) - 1)) << 8;
305}
306
dbcd861f
JR
307static inline int context_domain_id(struct context_entry *c)
308{
309 return((c->hi >> 8) & 0xffff);
310}
311
c07e7d21
MM
312static inline void context_clear_entry(struct context_entry *context)
313{
314 context->lo = 0;
315 context->hi = 0;
316}
7a8fc25e 317
622ba12a
MM
318/*
319 * 0: readable
320 * 1: writable
321 * 2-6: reserved
322 * 7: super page
9cf06697
SY
323 * 8-10: available
324 * 11: snoop behavior
622ba12a
MM
325 * 12-63: Host physcial address
326 */
327struct dma_pte {
328 u64 val;
329};
622ba12a 330
19c239ce
MM
331static inline void dma_clear_pte(struct dma_pte *pte)
332{
333 pte->val = 0;
334}
335
19c239ce
MM
336static inline u64 dma_pte_addr(struct dma_pte *pte)
337{
c85994e4
DW
338#ifdef CONFIG_64BIT
339 return pte->val & VTD_PAGE_MASK;
340#else
341 /* Must have a full atomic 64-bit read */
1a8bd481 342 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
c85994e4 343#endif
19c239ce
MM
344}
345
19c239ce
MM
346static inline bool dma_pte_present(struct dma_pte *pte)
347{
348 return (pte->val & 3) != 0;
349}
622ba12a 350
4399c8bf
AK
351static inline bool dma_pte_superpage(struct dma_pte *pte)
352{
c3c75eb7 353 return (pte->val & DMA_PTE_LARGE_PAGE);
4399c8bf
AK
354}
355
75e6bf96
DW
356static inline int first_pte_in_page(struct dma_pte *pte)
357{
358 return !((unsigned long)pte & ~VTD_PAGE_MASK);
359}
360
2c2e2c38
FY
361/*
362 * This domain is a statically identity mapping domain.
363 * 1. This domain creats a static 1:1 mapping to all usable memory.
364 * 2. It maps to each iommu if successful.
365 * 3. Each iommu mapps to this domain if successful.
366 */
19943b0e
DW
367static struct dmar_domain *si_domain;
368static int hw_pass_through = 1;
2c2e2c38 369
28ccce0d
JR
370/*
371 * Domain represents a virtual machine, more than one devices
1ce28feb
WH
372 * across iommus may be owned in one domain, e.g. kvm guest.
373 */
ab8dfe25 374#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
1ce28feb 375
2c2e2c38 376/* si_domain contains mulitple devices */
ab8dfe25 377#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
2c2e2c38 378
29a27719
JR
379#define for_each_domain_iommu(idx, domain) \
380 for (idx = 0; idx < g_num_of_iommus; idx++) \
381 if (domain->iommu_refcnt[idx])
382
99126f7c 383struct dmar_domain {
4c923d47 384 int nid; /* node id */
29a27719
JR
385
386 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
387 /* Refcount of devices per iommu */
388
99126f7c 389
c0e8a6c8
JR
390 u16 iommu_did[DMAR_UNITS_SUPPORTED];
391 /* Domain ids per IOMMU. Use u16 since
392 * domain ids are 16 bit wide according
393 * to VT-d spec, section 9.3 */
99126f7c 394
0824c592 395 bool has_iotlb_device;
00a77deb 396 struct list_head devices; /* all devices' list */
99126f7c
MM
397 struct iova_domain iovad; /* iova's that belong to this domain */
398
399 struct dma_pte *pgd; /* virtual address */
99126f7c
MM
400 int gaw; /* max guest address width */
401
402 /* adjusted guest address width, 0 is level 2 30-bit */
403 int agaw;
404
3b5410e7 405 int flags; /* flags to find out type of domain */
8e604097
WH
406
407 int iommu_coherency;/* indicate coherency of iommu access */
58c610bd 408 int iommu_snooping; /* indicate snooping control feature*/
c7151a8d 409 int iommu_count; /* reference count of iommu */
6dd9a7c7
YS
410 int iommu_superpage;/* Level of superpages supported:
411 0 == 4KiB (no superpages), 1 == 2MiB,
412 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
fe40f1e0 413 u64 max_addr; /* maximum mapped address */
00a77deb
JR
414
415 struct iommu_domain domain; /* generic domain data structure for
416 iommu core */
99126f7c
MM
417};
418
a647dacb
MM
419/* PCI domain-device relationship */
420struct device_domain_info {
421 struct list_head link; /* link to domain siblings */
422 struct list_head global; /* link to global list */
276dbf99 423 u8 bus; /* PCI bus number */
a647dacb 424 u8 devfn; /* PCI devfn number */
b16d0cb9
DW
425 u8 pasid_supported:3;
426 u8 pasid_enabled:1;
427 u8 pri_supported:1;
428 u8 pri_enabled:1;
429 u8 ats_supported:1;
430 u8 ats_enabled:1;
431 u8 ats_qdep;
0bcb3e28 432 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
93a23a72 433 struct intel_iommu *iommu; /* IOMMU used by this device */
a647dacb
MM
434 struct dmar_domain *domain; /* pointer to domain */
435};
436
b94e4117
JL
437struct dmar_rmrr_unit {
438 struct list_head list; /* list of rmrr units */
439 struct acpi_dmar_header *hdr; /* ACPI header */
440 u64 base_address; /* reserved base address*/
441 u64 end_address; /* reserved end address */
832bd858 442 struct dmar_dev_scope *devices; /* target devices */
b94e4117 443 int devices_cnt; /* target device count */
0659b8dc 444 struct iommu_resv_region *resv; /* reserved region handle */
b94e4117
JL
445};
446
447struct dmar_atsr_unit {
448 struct list_head list; /* list of ATSR units */
449 struct acpi_dmar_header *hdr; /* ACPI header */
832bd858 450 struct dmar_dev_scope *devices; /* target devices */
b94e4117
JL
451 int devices_cnt; /* target device count */
452 u8 include_all:1; /* include all ports */
453};
454
455static LIST_HEAD(dmar_atsr_units);
456static LIST_HEAD(dmar_rmrr_units);
457
458#define for_each_rmrr_units(rmrr) \
459 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
460
5e0d2a6f 461static void flush_unmaps_timeout(unsigned long data);
462
314f1dc1 463struct deferred_flush_entry {
2aac6304 464 unsigned long iova_pfn;
769530e4 465 unsigned long nrpages;
314f1dc1
OP
466 struct dmar_domain *domain;
467 struct page *freelist;
468};
5e0d2a6f 469
80b20dd8 470#define HIGH_WATER_MARK 250
314f1dc1 471struct deferred_flush_table {
80b20dd8 472 int next;
314f1dc1 473 struct deferred_flush_entry entries[HIGH_WATER_MARK];
80b20dd8 474};
475
aa473240
OP
476struct deferred_flush_data {
477 spinlock_t lock;
478 int timer_on;
479 struct timer_list timer;
480 long size;
481 struct deferred_flush_table *tables;
80b20dd8 482};
483
58c4a95f 484static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
80b20dd8 485
5e0d2a6f 486/* bitmap for indexing intel_iommus */
5e0d2a6f 487static int g_num_of_iommus;
488
92d03cc8 489static void domain_exit(struct dmar_domain *domain);
ba395927 490static void domain_remove_dev_info(struct dmar_domain *domain);
e6de0f8d
JR
491static void dmar_remove_one_dev_info(struct dmar_domain *domain,
492 struct device *dev);
127c7615 493static void __dmar_remove_one_dev_info(struct device_domain_info *info);
2452d9db
JR
494static void domain_context_clear(struct intel_iommu *iommu,
495 struct device *dev);
2a46ddf7
JL
496static int domain_detach_iommu(struct dmar_domain *domain,
497 struct intel_iommu *iommu);
ba395927 498
d3f13810 499#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
0cd5c3c8
KM
500int dmar_disabled = 0;
501#else
502int dmar_disabled = 1;
d3f13810 503#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
0cd5c3c8 504
8bc1f85c
ED
505int intel_iommu_enabled = 0;
506EXPORT_SYMBOL_GPL(intel_iommu_enabled);
507
2d9e667e 508static int dmar_map_gfx = 1;
7d3b03ce 509static int dmar_forcedac;
5e0d2a6f 510static int intel_iommu_strict;
6dd9a7c7 511static int intel_iommu_superpage = 1;
c83b2f20 512static int intel_iommu_ecs = 1;
ae853ddb
DW
513static int intel_iommu_pasid28;
514static int iommu_identity_mapping;
c83b2f20 515
ae853ddb
DW
516#define IDENTMAP_ALL 1
517#define IDENTMAP_GFX 2
518#define IDENTMAP_AZALIA 4
c83b2f20 519
d42fde70
DW
520/* Broadwell and Skylake have broken ECS support — normal so-called "second
521 * level" translation of DMA requests-without-PASID doesn't actually happen
522 * unless you also set the NESTE bit in an extended context-entry. Which of
523 * course means that SVM doesn't work because it's trying to do nested
524 * translation of the physical addresses it finds in the process page tables,
525 * through the IOVA->phys mapping found in the "second level" page tables.
526 *
527 * The VT-d specification was retroactively changed to change the definition
528 * of the capability bits and pretend that Broadwell/Skylake never happened...
529 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
530 * for some reason it was the PASID capability bit which was redefined (from
531 * bit 28 on BDW/SKL to bit 40 in future).
532 *
533 * So our test for ECS needs to eschew those implementations which set the old
534 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
535 * Unless we are working around the 'pasid28' limitations, that is, by putting
536 * the device into passthrough mode for normal DMA and thus masking the bug.
537 */
c83b2f20 538#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
d42fde70
DW
539 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
540/* PASID support is thus enabled if ECS is enabled and *either* of the old
541 * or new capability bits are set. */
542#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
543 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
ba395927 544
c0771df8
DW
545int intel_iommu_gfx_mapped;
546EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
547
ba395927
KA
548#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
549static DEFINE_SPINLOCK(device_domain_lock);
550static LIST_HEAD(device_domain_list);
551
b0119e87 552const struct iommu_ops intel_iommu_ops;
a8bcbb0d 553
4158c2ec
JR
554static bool translation_pre_enabled(struct intel_iommu *iommu)
555{
556 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
557}
558
091d42e4
JR
559static void clear_translation_pre_enabled(struct intel_iommu *iommu)
560{
561 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
562}
563
4158c2ec
JR
564static void init_translation_status(struct intel_iommu *iommu)
565{
566 u32 gsts;
567
568 gsts = readl(iommu->reg + DMAR_GSTS_REG);
569 if (gsts & DMA_GSTS_TES)
570 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
571}
572
00a77deb
JR
573/* Convert generic 'struct iommu_domain to private struct dmar_domain */
574static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
575{
576 return container_of(dom, struct dmar_domain, domain);
577}
578
ba395927
KA
579static int __init intel_iommu_setup(char *str)
580{
581 if (!str)
582 return -EINVAL;
583 while (*str) {
0cd5c3c8
KM
584 if (!strncmp(str, "on", 2)) {
585 dmar_disabled = 0;
9f10e5bf 586 pr_info("IOMMU enabled\n");
0cd5c3c8 587 } else if (!strncmp(str, "off", 3)) {
ba395927 588 dmar_disabled = 1;
9f10e5bf 589 pr_info("IOMMU disabled\n");
ba395927
KA
590 } else if (!strncmp(str, "igfx_off", 8)) {
591 dmar_map_gfx = 0;
9f10e5bf 592 pr_info("Disable GFX device mapping\n");
7d3b03ce 593 } else if (!strncmp(str, "forcedac", 8)) {
9f10e5bf 594 pr_info("Forcing DAC for PCI devices\n");
7d3b03ce 595 dmar_forcedac = 1;
5e0d2a6f 596 } else if (!strncmp(str, "strict", 6)) {
9f10e5bf 597 pr_info("Disable batched IOTLB flush\n");
5e0d2a6f 598 intel_iommu_strict = 1;
6dd9a7c7 599 } else if (!strncmp(str, "sp_off", 6)) {
9f10e5bf 600 pr_info("Disable supported super page\n");
6dd9a7c7 601 intel_iommu_superpage = 0;
c83b2f20
DW
602 } else if (!strncmp(str, "ecs_off", 7)) {
603 printk(KERN_INFO
604 "Intel-IOMMU: disable extended context table support\n");
605 intel_iommu_ecs = 0;
ae853ddb
DW
606 } else if (!strncmp(str, "pasid28", 7)) {
607 printk(KERN_INFO
608 "Intel-IOMMU: enable pre-production PASID support\n");
609 intel_iommu_pasid28 = 1;
610 iommu_identity_mapping |= IDENTMAP_GFX;
bfd20f1c
SL
611 } else if (!strncmp(str, "tboot_noforce", 13)) {
612 printk(KERN_INFO
613 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
614 intel_iommu_tboot_noforce = 1;
ba395927
KA
615 }
616
617 str += strcspn(str, ",");
618 while (*str == ',')
619 str++;
620 }
621 return 0;
622}
623__setup("intel_iommu=", intel_iommu_setup);
624
625static struct kmem_cache *iommu_domain_cache;
626static struct kmem_cache *iommu_devinfo_cache;
ba395927 627
9452d5bf
JR
628static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
629{
8bf47816
JR
630 struct dmar_domain **domains;
631 int idx = did >> 8;
632
633 domains = iommu->domains[idx];
634 if (!domains)
635 return NULL;
636
637 return domains[did & 0xff];
9452d5bf
JR
638}
639
640static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
641 struct dmar_domain *domain)
642{
8bf47816
JR
643 struct dmar_domain **domains;
644 int idx = did >> 8;
645
646 if (!iommu->domains[idx]) {
647 size_t size = 256 * sizeof(struct dmar_domain *);
648 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
649 }
650
651 domains = iommu->domains[idx];
652 if (WARN_ON(!domains))
653 return;
654 else
655 domains[did & 0xff] = domain;
9452d5bf
JR
656}
657
4c923d47 658static inline void *alloc_pgtable_page(int node)
eb3fa7cb 659{
4c923d47
SS
660 struct page *page;
661 void *vaddr = NULL;
eb3fa7cb 662
4c923d47
SS
663 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
664 if (page)
665 vaddr = page_address(page);
eb3fa7cb 666 return vaddr;
ba395927
KA
667}
668
669static inline void free_pgtable_page(void *vaddr)
670{
671 free_page((unsigned long)vaddr);
672}
673
674static inline void *alloc_domain_mem(void)
675{
354bb65e 676 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
ba395927
KA
677}
678
38717946 679static void free_domain_mem(void *vaddr)
ba395927
KA
680{
681 kmem_cache_free(iommu_domain_cache, vaddr);
682}
683
684static inline void * alloc_devinfo_mem(void)
685{
354bb65e 686 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
ba395927
KA
687}
688
689static inline void free_devinfo_mem(void *vaddr)
690{
691 kmem_cache_free(iommu_devinfo_cache, vaddr);
692}
693
ab8dfe25
JL
694static inline int domain_type_is_vm(struct dmar_domain *domain)
695{
696 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
697}
698
28ccce0d
JR
699static inline int domain_type_is_si(struct dmar_domain *domain)
700{
701 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
702}
703
ab8dfe25
JL
704static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
705{
706 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
707 DOMAIN_FLAG_STATIC_IDENTITY);
708}
1b573683 709
162d1b10
JL
710static inline int domain_pfn_supported(struct dmar_domain *domain,
711 unsigned long pfn)
712{
713 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
714
715 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
716}
717
4ed0d3e6 718static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
1b573683
WH
719{
720 unsigned long sagaw;
721 int agaw = -1;
722
723 sagaw = cap_sagaw(iommu->cap);
4ed0d3e6 724 for (agaw = width_to_agaw(max_gaw);
1b573683
WH
725 agaw >= 0; agaw--) {
726 if (test_bit(agaw, &sagaw))
727 break;
728 }
729
730 return agaw;
731}
732
4ed0d3e6
FY
733/*
734 * Calculate max SAGAW for each iommu.
735 */
736int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
737{
738 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
739}
740
741/*
742 * calculate agaw for each iommu.
743 * "SAGAW" may be different across iommus, use a default agaw, and
744 * get a supported less agaw for iommus that don't support the default agaw.
745 */
746int iommu_calculate_agaw(struct intel_iommu *iommu)
747{
748 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
749}
750
2c2e2c38 751/* This functionin only returns single iommu in a domain */
8c11e798
WH
752static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
753{
754 int iommu_id;
755
2c2e2c38 756 /* si_domain and vm domain should not get here. */
ab8dfe25 757 BUG_ON(domain_type_is_vm_or_si(domain));
29a27719
JR
758 for_each_domain_iommu(iommu_id, domain)
759 break;
760
8c11e798
WH
761 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
762 return NULL;
763
764 return g_iommus[iommu_id];
765}
766
8e604097
WH
767static void domain_update_iommu_coherency(struct dmar_domain *domain)
768{
d0501960
DW
769 struct dmar_drhd_unit *drhd;
770 struct intel_iommu *iommu;
2f119c78
QL
771 bool found = false;
772 int i;
2e12bc29 773
d0501960 774 domain->iommu_coherency = 1;
8e604097 775
29a27719 776 for_each_domain_iommu(i, domain) {
2f119c78 777 found = true;
8e604097
WH
778 if (!ecap_coherent(g_iommus[i]->ecap)) {
779 domain->iommu_coherency = 0;
780 break;
781 }
8e604097 782 }
d0501960
DW
783 if (found)
784 return;
785
786 /* No hardware attached; use lowest common denominator */
787 rcu_read_lock();
788 for_each_active_iommu(iommu, drhd) {
789 if (!ecap_coherent(iommu->ecap)) {
790 domain->iommu_coherency = 0;
791 break;
792 }
793 }
794 rcu_read_unlock();
8e604097
WH
795}
796
161f6934 797static int domain_update_iommu_snooping(struct intel_iommu *skip)
58c610bd 798{
161f6934
JL
799 struct dmar_drhd_unit *drhd;
800 struct intel_iommu *iommu;
801 int ret = 1;
58c610bd 802
161f6934
JL
803 rcu_read_lock();
804 for_each_active_iommu(iommu, drhd) {
805 if (iommu != skip) {
806 if (!ecap_sc_support(iommu->ecap)) {
807 ret = 0;
808 break;
809 }
58c610bd 810 }
58c610bd 811 }
161f6934
JL
812 rcu_read_unlock();
813
814 return ret;
58c610bd
SY
815}
816
161f6934 817static int domain_update_iommu_superpage(struct intel_iommu *skip)
6dd9a7c7 818{
8140a95d 819 struct dmar_drhd_unit *drhd;
161f6934 820 struct intel_iommu *iommu;
8140a95d 821 int mask = 0xf;
6dd9a7c7
YS
822
823 if (!intel_iommu_superpage) {
161f6934 824 return 0;
6dd9a7c7
YS
825 }
826
8140a95d 827 /* set iommu_superpage to the smallest common denominator */
0e242612 828 rcu_read_lock();
8140a95d 829 for_each_active_iommu(iommu, drhd) {
161f6934
JL
830 if (iommu != skip) {
831 mask &= cap_super_page_val(iommu->cap);
832 if (!mask)
833 break;
6dd9a7c7
YS
834 }
835 }
0e242612
JL
836 rcu_read_unlock();
837
161f6934 838 return fls(mask);
6dd9a7c7
YS
839}
840
58c610bd
SY
841/* Some capabilities may be different across iommus */
842static void domain_update_iommu_cap(struct dmar_domain *domain)
843{
844 domain_update_iommu_coherency(domain);
161f6934
JL
845 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
846 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
58c610bd
SY
847}
848
03ecc32c
DW
849static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
850 u8 bus, u8 devfn, int alloc)
851{
852 struct root_entry *root = &iommu->root_entry[bus];
853 struct context_entry *context;
854 u64 *entry;
855
4df4eab1 856 entry = &root->lo;
c83b2f20 857 if (ecs_enabled(iommu)) {
03ecc32c
DW
858 if (devfn >= 0x80) {
859 devfn -= 0x80;
860 entry = &root->hi;
861 }
862 devfn *= 2;
863 }
03ecc32c
DW
864 if (*entry & 1)
865 context = phys_to_virt(*entry & VTD_PAGE_MASK);
866 else {
867 unsigned long phy_addr;
868 if (!alloc)
869 return NULL;
870
871 context = alloc_pgtable_page(iommu->node);
872 if (!context)
873 return NULL;
874
875 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
876 phy_addr = virt_to_phys((void *)context);
877 *entry = phy_addr | 1;
878 __iommu_flush_cache(iommu, entry, sizeof(*entry));
879 }
880 return &context[devfn];
881}
882
4ed6a540
DW
883static int iommu_dummy(struct device *dev)
884{
885 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
886}
887
156baca8 888static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
c7151a8d
WH
889{
890 struct dmar_drhd_unit *drhd = NULL;
b683b230 891 struct intel_iommu *iommu;
156baca8
DW
892 struct device *tmp;
893 struct pci_dev *ptmp, *pdev = NULL;
aa4d066a 894 u16 segment = 0;
c7151a8d
WH
895 int i;
896
4ed6a540
DW
897 if (iommu_dummy(dev))
898 return NULL;
899
156baca8 900 if (dev_is_pci(dev)) {
1c387188
AR
901 struct pci_dev *pf_pdev;
902
156baca8 903 pdev = to_pci_dev(dev);
1c387188
AR
904 /* VFs aren't listed in scope tables; we need to look up
905 * the PF instead to find the IOMMU. */
906 pf_pdev = pci_physfn(pdev);
907 dev = &pf_pdev->dev;
156baca8 908 segment = pci_domain_nr(pdev->bus);
ca5b74d2 909 } else if (has_acpi_companion(dev))
156baca8
DW
910 dev = &ACPI_COMPANION(dev)->dev;
911
0e242612 912 rcu_read_lock();
b683b230 913 for_each_active_iommu(iommu, drhd) {
156baca8 914 if (pdev && segment != drhd->segment)
276dbf99 915 continue;
c7151a8d 916
b683b230 917 for_each_active_dev_scope(drhd->devices,
156baca8
DW
918 drhd->devices_cnt, i, tmp) {
919 if (tmp == dev) {
1c387188
AR
920 /* For a VF use its original BDF# not that of the PF
921 * which we used for the IOMMU lookup. Strictly speaking
922 * we could do this for all PCI devices; we only need to
923 * get the BDF# from the scope table for ACPI matches. */
5003ae1e 924 if (pdev && pdev->is_virtfn)
1c387188
AR
925 goto got_pdev;
926
156baca8
DW
927 *bus = drhd->devices[i].bus;
928 *devfn = drhd->devices[i].devfn;
b683b230 929 goto out;
156baca8
DW
930 }
931
932 if (!pdev || !dev_is_pci(tmp))
933 continue;
934
935 ptmp = to_pci_dev(tmp);
936 if (ptmp->subordinate &&
937 ptmp->subordinate->number <= pdev->bus->number &&
938 ptmp->subordinate->busn_res.end >= pdev->bus->number)
939 goto got_pdev;
924b6231 940 }
c7151a8d 941
156baca8
DW
942 if (pdev && drhd->include_all) {
943 got_pdev:
944 *bus = pdev->bus->number;
945 *devfn = pdev->devfn;
b683b230 946 goto out;
156baca8 947 }
c7151a8d 948 }
b683b230 949 iommu = NULL;
156baca8 950 out:
0e242612 951 rcu_read_unlock();
c7151a8d 952
b683b230 953 return iommu;
c7151a8d
WH
954}
955
5331fe6f
WH
956static void domain_flush_cache(struct dmar_domain *domain,
957 void *addr, int size)
958{
959 if (!domain->iommu_coherency)
960 clflush_cache_range(addr, size);
961}
962
ba395927
KA
963static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
964{
ba395927 965 struct context_entry *context;
03ecc32c 966 int ret = 0;
ba395927
KA
967 unsigned long flags;
968
969 spin_lock_irqsave(&iommu->lock, flags);
03ecc32c
DW
970 context = iommu_context_addr(iommu, bus, devfn, 0);
971 if (context)
972 ret = context_present(context);
ba395927
KA
973 spin_unlock_irqrestore(&iommu->lock, flags);
974 return ret;
975}
976
977static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
978{
ba395927
KA
979 struct context_entry *context;
980 unsigned long flags;
981
982 spin_lock_irqsave(&iommu->lock, flags);
03ecc32c 983 context = iommu_context_addr(iommu, bus, devfn, 0);
ba395927 984 if (context) {
03ecc32c
DW
985 context_clear_entry(context);
986 __iommu_flush_cache(iommu, context, sizeof(*context));
ba395927
KA
987 }
988 spin_unlock_irqrestore(&iommu->lock, flags);
989}
990
991static void free_context_table(struct intel_iommu *iommu)
992{
ba395927
KA
993 int i;
994 unsigned long flags;
995 struct context_entry *context;
996
997 spin_lock_irqsave(&iommu->lock, flags);
998 if (!iommu->root_entry) {
999 goto out;
1000 }
1001 for (i = 0; i < ROOT_ENTRY_NR; i++) {
03ecc32c 1002 context = iommu_context_addr(iommu, i, 0, 0);
ba395927
KA
1003 if (context)
1004 free_pgtable_page(context);
03ecc32c 1005
c83b2f20 1006 if (!ecs_enabled(iommu))
03ecc32c
DW
1007 continue;
1008
1009 context = iommu_context_addr(iommu, i, 0x80, 0);
1010 if (context)
1011 free_pgtable_page(context);
1012
ba395927
KA
1013 }
1014 free_pgtable_page(iommu->root_entry);
1015 iommu->root_entry = NULL;
1016out:
1017 spin_unlock_irqrestore(&iommu->lock, flags);
1018}
1019
b026fd28 1020static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
5cf0a76f 1021 unsigned long pfn, int *target_level)
ba395927 1022{
ba395927
KA
1023 struct dma_pte *parent, *pte = NULL;
1024 int level = agaw_to_level(domain->agaw);
4399c8bf 1025 int offset;
ba395927
KA
1026
1027 BUG_ON(!domain->pgd);
f9423606 1028
162d1b10 1029 if (!domain_pfn_supported(domain, pfn))
f9423606
JS
1030 /* Address beyond IOMMU's addressing capabilities. */
1031 return NULL;
1032
ba395927
KA
1033 parent = domain->pgd;
1034
5cf0a76f 1035 while (1) {
ba395927
KA
1036 void *tmp_page;
1037
b026fd28 1038 offset = pfn_level_offset(pfn, level);
ba395927 1039 pte = &parent[offset];
5cf0a76f 1040 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
6dd9a7c7 1041 break;
5cf0a76f 1042 if (level == *target_level)
ba395927
KA
1043 break;
1044
19c239ce 1045 if (!dma_pte_present(pte)) {
c85994e4
DW
1046 uint64_t pteval;
1047
4c923d47 1048 tmp_page = alloc_pgtable_page(domain->nid);
ba395927 1049
206a73c1 1050 if (!tmp_page)
ba395927 1051 return NULL;
206a73c1 1052
c85994e4 1053 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
64de5af0 1054 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
effad4b5 1055 if (cmpxchg64(&pte->val, 0ULL, pteval))
c85994e4
DW
1056 /* Someone else set it while we were thinking; use theirs. */
1057 free_pgtable_page(tmp_page);
effad4b5 1058 else
c85994e4 1059 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927 1060 }
5cf0a76f
DW
1061 if (level == 1)
1062 break;
1063
19c239ce 1064 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
1065 level--;
1066 }
1067
5cf0a76f
DW
1068 if (!*target_level)
1069 *target_level = level;
1070
ba395927
KA
1071 return pte;
1072}
1073
6dd9a7c7 1074
ba395927 1075/* return address's pte at specific level */
90dcfb5e
DW
1076static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1077 unsigned long pfn,
6dd9a7c7 1078 int level, int *large_page)
ba395927
KA
1079{
1080 struct dma_pte *parent, *pte = NULL;
1081 int total = agaw_to_level(domain->agaw);
1082 int offset;
1083
1084 parent = domain->pgd;
1085 while (level <= total) {
90dcfb5e 1086 offset = pfn_level_offset(pfn, total);
ba395927
KA
1087 pte = &parent[offset];
1088 if (level == total)
1089 return pte;
1090
6dd9a7c7
YS
1091 if (!dma_pte_present(pte)) {
1092 *large_page = total;
ba395927 1093 break;
6dd9a7c7
YS
1094 }
1095
e16922af 1096 if (dma_pte_superpage(pte)) {
6dd9a7c7
YS
1097 *large_page = total;
1098 return pte;
1099 }
1100
19c239ce 1101 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
1102 total--;
1103 }
1104 return NULL;
1105}
1106
ba395927 1107/* clear last level pte, a tlb flush should be followed */
5cf0a76f 1108static void dma_pte_clear_range(struct dmar_domain *domain,
595badf5
DW
1109 unsigned long start_pfn,
1110 unsigned long last_pfn)
ba395927 1111{
6dd9a7c7 1112 unsigned int large_page = 1;
310a5ab9 1113 struct dma_pte *first_pte, *pte;
66eae846 1114
162d1b10
JL
1115 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1116 BUG_ON(!domain_pfn_supported(domain, last_pfn));
59c36286 1117 BUG_ON(start_pfn > last_pfn);
ba395927 1118
04b18e65 1119 /* we don't need lock here; nobody else touches the iova range */
59c36286 1120 do {
6dd9a7c7
YS
1121 large_page = 1;
1122 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
310a5ab9 1123 if (!pte) {
6dd9a7c7 1124 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
310a5ab9
DW
1125 continue;
1126 }
6dd9a7c7 1127 do {
310a5ab9 1128 dma_clear_pte(pte);
6dd9a7c7 1129 start_pfn += lvl_to_nr_pages(large_page);
310a5ab9 1130 pte++;
75e6bf96
DW
1131 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1132
310a5ab9
DW
1133 domain_flush_cache(domain, first_pte,
1134 (void *)pte - (void *)first_pte);
59c36286
DW
1135
1136 } while (start_pfn && start_pfn <= last_pfn);
ba395927
KA
1137}
1138
3269ee0b 1139static void dma_pte_free_level(struct dmar_domain *domain, int level,
bc24c571
DD
1140 int retain_level, struct dma_pte *pte,
1141 unsigned long pfn, unsigned long start_pfn,
1142 unsigned long last_pfn)
3269ee0b
AW
1143{
1144 pfn = max(start_pfn, pfn);
1145 pte = &pte[pfn_level_offset(pfn, level)];
1146
1147 do {
1148 unsigned long level_pfn;
1149 struct dma_pte *level_pte;
1150
1151 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1152 goto next;
1153
f7116e11 1154 level_pfn = pfn & level_mask(level);
3269ee0b
AW
1155 level_pte = phys_to_virt(dma_pte_addr(pte));
1156
bc24c571
DD
1157 if (level > 2) {
1158 dma_pte_free_level(domain, level - 1, retain_level,
1159 level_pte, level_pfn, start_pfn,
1160 last_pfn);
1161 }
3269ee0b 1162
bc24c571
DD
1163 /*
1164 * Free the page table if we're below the level we want to
1165 * retain and the range covers the entire table.
1166 */
1167 if (level < retain_level && !(start_pfn > level_pfn ||
08336fd2 1168 last_pfn < level_pfn + level_size(level) - 1)) {
3269ee0b
AW
1169 dma_clear_pte(pte);
1170 domain_flush_cache(domain, pte, sizeof(*pte));
1171 free_pgtable_page(level_pte);
1172 }
1173next:
1174 pfn += level_size(level);
1175 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1176}
1177
bc24c571
DD
1178/*
1179 * clear last level (leaf) ptes and free page table pages below the
1180 * level we wish to keep intact.
1181 */
ba395927 1182static void dma_pte_free_pagetable(struct dmar_domain *domain,
d794dc9b 1183 unsigned long start_pfn,
bc24c571
DD
1184 unsigned long last_pfn,
1185 int retain_level)
ba395927 1186{
162d1b10
JL
1187 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1188 BUG_ON(!domain_pfn_supported(domain, last_pfn));
59c36286 1189 BUG_ON(start_pfn > last_pfn);
ba395927 1190
d41a4adb
JL
1191 dma_pte_clear_range(domain, start_pfn, last_pfn);
1192
f3a0a52f 1193 /* We don't need lock here; nobody else touches the iova range */
bc24c571 1194 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
3269ee0b 1195 domain->pgd, 0, start_pfn, last_pfn);
6660c63a 1196
ba395927 1197 /* free pgd */
d794dc9b 1198 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
ba395927
KA
1199 free_pgtable_page(domain->pgd);
1200 domain->pgd = NULL;
1201 }
1202}
1203
ea8ea460
DW
1204/* When a page at a given level is being unlinked from its parent, we don't
1205 need to *modify* it at all. All we need to do is make a list of all the
1206 pages which can be freed just as soon as we've flushed the IOTLB and we
1207 know the hardware page-walk will no longer touch them.
1208 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1209 be freed. */
1210static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1211 int level, struct dma_pte *pte,
1212 struct page *freelist)
1213{
1214 struct page *pg;
1215
1216 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1217 pg->freelist = freelist;
1218 freelist = pg;
1219
1220 if (level == 1)
1221 return freelist;
1222
adeb2590
JL
1223 pte = page_address(pg);
1224 do {
ea8ea460
DW
1225 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1226 freelist = dma_pte_list_pagetables(domain, level - 1,
1227 pte, freelist);
adeb2590
JL
1228 pte++;
1229 } while (!first_pte_in_page(pte));
ea8ea460
DW
1230
1231 return freelist;
1232}
1233
1234static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1235 struct dma_pte *pte, unsigned long pfn,
1236 unsigned long start_pfn,
1237 unsigned long last_pfn,
1238 struct page *freelist)
1239{
1240 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1241
1242 pfn = max(start_pfn, pfn);
1243 pte = &pte[pfn_level_offset(pfn, level)];
1244
1245 do {
1246 unsigned long level_pfn;
1247
1248 if (!dma_pte_present(pte))
1249 goto next;
1250
1251 level_pfn = pfn & level_mask(level);
1252
1253 /* If range covers entire pagetable, free it */
1254 if (start_pfn <= level_pfn &&
1255 last_pfn >= level_pfn + level_size(level) - 1) {
1256 /* These suborbinate page tables are going away entirely. Don't
1257 bother to clear them; we're just going to *free* them. */
1258 if (level > 1 && !dma_pte_superpage(pte))
1259 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1260
1261 dma_clear_pte(pte);
1262 if (!first_pte)
1263 first_pte = pte;
1264 last_pte = pte;
1265 } else if (level > 1) {
1266 /* Recurse down into a level that isn't *entirely* obsolete */
1267 freelist = dma_pte_clear_level(domain, level - 1,
1268 phys_to_virt(dma_pte_addr(pte)),
1269 level_pfn, start_pfn, last_pfn,
1270 freelist);
1271 }
1272next:
1273 pfn += level_size(level);
1274 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1275
1276 if (first_pte)
1277 domain_flush_cache(domain, first_pte,
1278 (void *)++last_pte - (void *)first_pte);
1279
1280 return freelist;
1281}
1282
1283/* We can't just free the pages because the IOMMU may still be walking
1284 the page tables, and may have cached the intermediate levels. The
1285 pages can only be freed after the IOTLB flush has been done. */
b690420a
JR
1286static struct page *domain_unmap(struct dmar_domain *domain,
1287 unsigned long start_pfn,
1288 unsigned long last_pfn)
ea8ea460 1289{
ea8ea460
DW
1290 struct page *freelist = NULL;
1291
162d1b10
JL
1292 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1293 BUG_ON(!domain_pfn_supported(domain, last_pfn));
ea8ea460
DW
1294 BUG_ON(start_pfn > last_pfn);
1295
1296 /* we don't need lock here; nobody else touches the iova range */
1297 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1298 domain->pgd, 0, start_pfn, last_pfn, NULL);
1299
1300 /* free pgd */
1301 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1302 struct page *pgd_page = virt_to_page(domain->pgd);
1303 pgd_page->freelist = freelist;
1304 freelist = pgd_page;
1305
1306 domain->pgd = NULL;
1307 }
1308
1309 return freelist;
1310}
1311
b690420a 1312static void dma_free_pagelist(struct page *freelist)
ea8ea460
DW
1313{
1314 struct page *pg;
1315
1316 while ((pg = freelist)) {
1317 freelist = pg->freelist;
1318 free_pgtable_page(page_address(pg));
1319 }
1320}
1321
ba395927
KA
1322/* iommu handling */
1323static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1324{
1325 struct root_entry *root;
1326 unsigned long flags;
1327
4c923d47 1328 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
ffebeb46 1329 if (!root) {
9f10e5bf 1330 pr_err("Allocating root entry for %s failed\n",
ffebeb46 1331 iommu->name);
ba395927 1332 return -ENOMEM;
ffebeb46 1333 }
ba395927 1334
5b6985ce 1335 __iommu_flush_cache(iommu, root, ROOT_SIZE);
ba395927
KA
1336
1337 spin_lock_irqsave(&iommu->lock, flags);
1338 iommu->root_entry = root;
1339 spin_unlock_irqrestore(&iommu->lock, flags);
1340
1341 return 0;
1342}
1343
ba395927
KA
1344static void iommu_set_root_entry(struct intel_iommu *iommu)
1345{
03ecc32c 1346 u64 addr;
c416daa9 1347 u32 sts;
ba395927
KA
1348 unsigned long flag;
1349
03ecc32c 1350 addr = virt_to_phys(iommu->root_entry);
c83b2f20 1351 if (ecs_enabled(iommu))
03ecc32c 1352 addr |= DMA_RTADDR_RTT;
ba395927 1353
1f5b3c3f 1354 raw_spin_lock_irqsave(&iommu->register_lock, flag);
03ecc32c 1355 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
ba395927 1356
c416daa9 1357 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
1358
1359 /* Make sure hardware complete it */
1360 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1361 readl, (sts & DMA_GSTS_RTPS), sts);
ba395927 1362
1f5b3c3f 1363 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1364}
1365
1366static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1367{
1368 u32 val;
1369 unsigned long flag;
1370
9af88143 1371 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
ba395927 1372 return;
ba395927 1373
1f5b3c3f 1374 raw_spin_lock_irqsave(&iommu->register_lock, flag);
462b60f6 1375 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
1376
1377 /* Make sure hardware complete it */
1378 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1379 readl, (!(val & DMA_GSTS_WBFS)), val);
ba395927 1380
1f5b3c3f 1381 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1382}
1383
1384/* return value determine if we need a write buffer flush */
4c25a2c1
DW
1385static void __iommu_flush_context(struct intel_iommu *iommu,
1386 u16 did, u16 source_id, u8 function_mask,
1387 u64 type)
ba395927
KA
1388{
1389 u64 val = 0;
1390 unsigned long flag;
1391
ba395927
KA
1392 switch (type) {
1393 case DMA_CCMD_GLOBAL_INVL:
1394 val = DMA_CCMD_GLOBAL_INVL;
1395 break;
1396 case DMA_CCMD_DOMAIN_INVL:
1397 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1398 break;
1399 case DMA_CCMD_DEVICE_INVL:
1400 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1401 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1402 break;
1403 default:
1404 BUG();
1405 }
1406 val |= DMA_CCMD_ICC;
1407
1f5b3c3f 1408 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
1409 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1410
1411 /* Make sure hardware complete it */
1412 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1413 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1414
1f5b3c3f 1415 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1416}
1417
ba395927 1418/* return value determine if we need a write buffer flush */
1f0ef2aa
DW
1419static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1420 u64 addr, unsigned int size_order, u64 type)
ba395927
KA
1421{
1422 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1423 u64 val = 0, val_iva = 0;
1424 unsigned long flag;
1425
ba395927
KA
1426 switch (type) {
1427 case DMA_TLB_GLOBAL_FLUSH:
1428 /* global flush doesn't need set IVA_REG */
1429 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1430 break;
1431 case DMA_TLB_DSI_FLUSH:
1432 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1433 break;
1434 case DMA_TLB_PSI_FLUSH:
1435 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
ea8ea460 1436 /* IH bit is passed in as part of address */
ba395927
KA
1437 val_iva = size_order | addr;
1438 break;
1439 default:
1440 BUG();
1441 }
1442 /* Note: set drain read/write */
1443#if 0
1444 /*
1445 * This is probably to be super secure.. Looks like we can
1446 * ignore it without any impact.
1447 */
1448 if (cap_read_drain(iommu->cap))
1449 val |= DMA_TLB_READ_DRAIN;
1450#endif
1451 if (cap_write_drain(iommu->cap))
1452 val |= DMA_TLB_WRITE_DRAIN;
1453
1f5b3c3f 1454 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
1455 /* Note: Only uses first TLB reg currently */
1456 if (val_iva)
1457 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1458 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1459
1460 /* Make sure hardware complete it */
1461 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1462 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1463
1f5b3c3f 1464 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1465
1466 /* check IOTLB invalidation granularity */
1467 if (DMA_TLB_IAIG(val) == 0)
9f10e5bf 1468 pr_err("Flush IOTLB failed\n");
ba395927 1469 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
9f10e5bf 1470 pr_debug("TLB flush request %Lx, actual %Lx\n",
5b6985ce
FY
1471 (unsigned long long)DMA_TLB_IIRG(type),
1472 (unsigned long long)DMA_TLB_IAIG(val));
ba395927
KA
1473}
1474
64ae892b
DW
1475static struct device_domain_info *
1476iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1477 u8 bus, u8 devfn)
93a23a72 1478{
93a23a72 1479 struct device_domain_info *info;
93a23a72 1480
55d94043
JR
1481 assert_spin_locked(&device_domain_lock);
1482
93a23a72
YZ
1483 if (!iommu->qi)
1484 return NULL;
1485
93a23a72 1486 list_for_each_entry(info, &domain->devices, link)
c3b497c6
JL
1487 if (info->iommu == iommu && info->bus == bus &&
1488 info->devfn == devfn) {
b16d0cb9
DW
1489 if (info->ats_supported && info->dev)
1490 return info;
93a23a72
YZ
1491 break;
1492 }
93a23a72 1493
b16d0cb9 1494 return NULL;
93a23a72
YZ
1495}
1496
0824c592
OP
1497static void domain_update_iotlb(struct dmar_domain *domain)
1498{
1499 struct device_domain_info *info;
1500 bool has_iotlb_device = false;
1501
1502 assert_spin_locked(&device_domain_lock);
1503
1504 list_for_each_entry(info, &domain->devices, link) {
1505 struct pci_dev *pdev;
1506
1507 if (!info->dev || !dev_is_pci(info->dev))
1508 continue;
1509
1510 pdev = to_pci_dev(info->dev);
1511 if (pdev->ats_enabled) {
1512 has_iotlb_device = true;
1513 break;
1514 }
1515 }
1516
1517 domain->has_iotlb_device = has_iotlb_device;
1518}
1519
93a23a72 1520static void iommu_enable_dev_iotlb(struct device_domain_info *info)
ba395927 1521{
fb0cc3aa
BH
1522 struct pci_dev *pdev;
1523
0824c592
OP
1524 assert_spin_locked(&device_domain_lock);
1525
0bcb3e28 1526 if (!info || !dev_is_pci(info->dev))
93a23a72
YZ
1527 return;
1528
fb0cc3aa 1529 pdev = to_pci_dev(info->dev);
fb0cc3aa 1530
b16d0cb9
DW
1531#ifdef CONFIG_INTEL_IOMMU_SVM
1532 /* The PCIe spec, in its wisdom, declares that the behaviour of
1533 the device if you enable PASID support after ATS support is
1534 undefined. So always enable PASID support on devices which
1535 have it, even if we can't yet know if we're ever going to
1536 use it. */
1537 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1538 info->pasid_enabled = 1;
1539
1540 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1541 info->pri_enabled = 1;
1542#endif
1543 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1544 info->ats_enabled = 1;
0824c592 1545 domain_update_iotlb(info->domain);
b16d0cb9
DW
1546 info->ats_qdep = pci_ats_queue_depth(pdev);
1547 }
93a23a72
YZ
1548}
1549
1550static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1551{
b16d0cb9
DW
1552 struct pci_dev *pdev;
1553
0824c592
OP
1554 assert_spin_locked(&device_domain_lock);
1555
da972fb1 1556 if (!dev_is_pci(info->dev))
93a23a72
YZ
1557 return;
1558
b16d0cb9
DW
1559 pdev = to_pci_dev(info->dev);
1560
1561 if (info->ats_enabled) {
1562 pci_disable_ats(pdev);
1563 info->ats_enabled = 0;
0824c592 1564 domain_update_iotlb(info->domain);
b16d0cb9
DW
1565 }
1566#ifdef CONFIG_INTEL_IOMMU_SVM
1567 if (info->pri_enabled) {
1568 pci_disable_pri(pdev);
1569 info->pri_enabled = 0;
1570 }
1571 if (info->pasid_enabled) {
1572 pci_disable_pasid(pdev);
1573 info->pasid_enabled = 0;
1574 }
1575#endif
93a23a72
YZ
1576}
1577
1578static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1579 u64 addr, unsigned mask)
1580{
1581 u16 sid, qdep;
1582 unsigned long flags;
1583 struct device_domain_info *info;
1584
0824c592
OP
1585 if (!domain->has_iotlb_device)
1586 return;
1587
93a23a72
YZ
1588 spin_lock_irqsave(&device_domain_lock, flags);
1589 list_for_each_entry(info, &domain->devices, link) {
b16d0cb9 1590 if (!info->ats_enabled)
93a23a72
YZ
1591 continue;
1592
1593 sid = info->bus << 8 | info->devfn;
b16d0cb9 1594 qdep = info->ats_qdep;
93a23a72
YZ
1595 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1596 }
1597 spin_unlock_irqrestore(&device_domain_lock, flags);
1598}
1599
a1ddcbe9
JR
1600static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1601 struct dmar_domain *domain,
1602 unsigned long pfn, unsigned int pages,
1603 int ih, int map)
ba395927 1604{
9dd2fe89 1605 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
03d6a246 1606 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
a1ddcbe9 1607 u16 did = domain->iommu_did[iommu->seq_id];
ba395927 1608
ba395927
KA
1609 BUG_ON(pages == 0);
1610
ea8ea460
DW
1611 if (ih)
1612 ih = 1 << 6;
ba395927 1613 /*
9dd2fe89
YZ
1614 * Fallback to domain selective flush if no PSI support or the size is
1615 * too big.
ba395927
KA
1616 * PSI requires page size to be 2 ^ x, and the base address is naturally
1617 * aligned to the size
1618 */
9dd2fe89
YZ
1619 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1620 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1f0ef2aa 1621 DMA_TLB_DSI_FLUSH);
9dd2fe89 1622 else
ea8ea460 1623 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
9dd2fe89 1624 DMA_TLB_PSI_FLUSH);
bf92df30
YZ
1625
1626 /*
82653633
NA
1627 * In caching mode, changes of pages from non-present to present require
1628 * flush. However, device IOTLB doesn't need to be flushed in this case.
bf92df30 1629 */
82653633 1630 if (!cap_caching_mode(iommu->cap) || !map)
9452d5bf
JR
1631 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1632 addr, mask);
ba395927
KA
1633}
1634
f8bab735 1635static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1636{
1637 u32 pmen;
1638 unsigned long flags;
1639
1f5b3c3f 1640 raw_spin_lock_irqsave(&iommu->register_lock, flags);
f8bab735 1641 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1642 pmen &= ~DMA_PMEN_EPM;
1643 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1644
1645 /* wait for the protected region status bit to clear */
1646 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1647 readl, !(pmen & DMA_PMEN_PRS), pmen);
1648
1f5b3c3f 1649 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
f8bab735 1650}
1651
2a41ccee 1652static void iommu_enable_translation(struct intel_iommu *iommu)
ba395927
KA
1653{
1654 u32 sts;
1655 unsigned long flags;
1656
1f5b3c3f 1657 raw_spin_lock_irqsave(&iommu->register_lock, flags);
c416daa9
DW
1658 iommu->gcmd |= DMA_GCMD_TE;
1659 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
1660
1661 /* Make sure hardware complete it */
1662 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1663 readl, (sts & DMA_GSTS_TES), sts);
ba395927 1664
1f5b3c3f 1665 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
ba395927
KA
1666}
1667
2a41ccee 1668static void iommu_disable_translation(struct intel_iommu *iommu)
ba395927
KA
1669{
1670 u32 sts;
1671 unsigned long flag;
1672
1f5b3c3f 1673 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
1674 iommu->gcmd &= ~DMA_GCMD_TE;
1675 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1676
1677 /* Make sure hardware complete it */
1678 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1679 readl, (!(sts & DMA_GSTS_TES)), sts);
ba395927 1680
1f5b3c3f 1681 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1682}
1683
3460a6d9 1684
ba395927
KA
1685static int iommu_init_domains(struct intel_iommu *iommu)
1686{
8bf47816
JR
1687 u32 ndomains, nlongs;
1688 size_t size;
ba395927
KA
1689
1690 ndomains = cap_ndoms(iommu->cap);
8bf47816 1691 pr_debug("%s: Number of Domains supported <%d>\n",
9f10e5bf 1692 iommu->name, ndomains);
ba395927
KA
1693 nlongs = BITS_TO_LONGS(ndomains);
1694
94a91b50
DD
1695 spin_lock_init(&iommu->lock);
1696
ba395927
KA
1697 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1698 if (!iommu->domain_ids) {
9f10e5bf
JR
1699 pr_err("%s: Allocating domain id array failed\n",
1700 iommu->name);
ba395927
KA
1701 return -ENOMEM;
1702 }
8bf47816 1703
86f004c7 1704 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
8bf47816
JR
1705 iommu->domains = kzalloc(size, GFP_KERNEL);
1706
1707 if (iommu->domains) {
1708 size = 256 * sizeof(struct dmar_domain *);
1709 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1710 }
1711
1712 if (!iommu->domains || !iommu->domains[0]) {
9f10e5bf
JR
1713 pr_err("%s: Allocating domain array failed\n",
1714 iommu->name);
852bdb04 1715 kfree(iommu->domain_ids);
8bf47816 1716 kfree(iommu->domains);
852bdb04 1717 iommu->domain_ids = NULL;
8bf47816 1718 iommu->domains = NULL;
ba395927
KA
1719 return -ENOMEM;
1720 }
1721
8bf47816
JR
1722
1723
ba395927 1724 /*
c0e8a6c8
JR
1725 * If Caching mode is set, then invalid translations are tagged
1726 * with domain-id 0, hence we need to pre-allocate it. We also
1727 * use domain-id 0 as a marker for non-allocated domain-id, so
1728 * make sure it is not used for a real domain.
ba395927 1729 */
c0e8a6c8
JR
1730 set_bit(0, iommu->domain_ids);
1731
ba395927
KA
1732 return 0;
1733}
ba395927 1734
ffebeb46 1735static void disable_dmar_iommu(struct intel_iommu *iommu)
ba395927 1736{
29a27719 1737 struct device_domain_info *info, *tmp;
55d94043 1738 unsigned long flags;
ba395927 1739
29a27719
JR
1740 if (!iommu->domains || !iommu->domain_ids)
1741 return;
a4eaa86c 1742
bea64033 1743again:
55d94043 1744 spin_lock_irqsave(&device_domain_lock, flags);
29a27719
JR
1745 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1746 struct dmar_domain *domain;
1747
1748 if (info->iommu != iommu)
1749 continue;
1750
1751 if (!info->dev || !info->domain)
1752 continue;
1753
1754 domain = info->domain;
1755
bea64033 1756 __dmar_remove_one_dev_info(info);
29a27719 1757
bea64033
JR
1758 if (!domain_type_is_vm_or_si(domain)) {
1759 /*
1760 * The domain_exit() function can't be called under
1761 * device_domain_lock, as it takes this lock itself.
1762 * So release the lock here and re-run the loop
1763 * afterwards.
1764 */
1765 spin_unlock_irqrestore(&device_domain_lock, flags);
29a27719 1766 domain_exit(domain);
bea64033
JR
1767 goto again;
1768 }
ba395927 1769 }
55d94043 1770 spin_unlock_irqrestore(&device_domain_lock, flags);
ba395927
KA
1771
1772 if (iommu->gcmd & DMA_GCMD_TE)
1773 iommu_disable_translation(iommu);
ffebeb46 1774}
ba395927 1775
ffebeb46
JL
1776static void free_dmar_iommu(struct intel_iommu *iommu)
1777{
1778 if ((iommu->domains) && (iommu->domain_ids)) {
86f004c7 1779 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
8bf47816
JR
1780 int i;
1781
1782 for (i = 0; i < elems; i++)
1783 kfree(iommu->domains[i]);
ffebeb46
JL
1784 kfree(iommu->domains);
1785 kfree(iommu->domain_ids);
1786 iommu->domains = NULL;
1787 iommu->domain_ids = NULL;
1788 }
ba395927 1789
d9630fe9
WH
1790 g_iommus[iommu->seq_id] = NULL;
1791
ba395927
KA
1792 /* free context mapping */
1793 free_context_table(iommu);
8a94ade4
DW
1794
1795#ifdef CONFIG_INTEL_IOMMU_SVM
a222a7f0
DW
1796 if (pasid_enabled(iommu)) {
1797 if (ecap_prs(iommu->ecap))
1798 intel_svm_finish_prq(iommu);
8a94ade4 1799 intel_svm_free_pasid_tables(iommu);
a222a7f0 1800 }
8a94ade4 1801#endif
ba395927
KA
1802}
1803
ab8dfe25 1804static struct dmar_domain *alloc_domain(int flags)
ba395927 1805{
ba395927 1806 struct dmar_domain *domain;
ba395927
KA
1807
1808 domain = alloc_domain_mem();
1809 if (!domain)
1810 return NULL;
1811
ab8dfe25 1812 memset(domain, 0, sizeof(*domain));
4c923d47 1813 domain->nid = -1;
ab8dfe25 1814 domain->flags = flags;
0824c592 1815 domain->has_iotlb_device = false;
92d03cc8 1816 INIT_LIST_HEAD(&domain->devices);
2c2e2c38
FY
1817
1818 return domain;
1819}
1820
d160aca5
JR
1821/* Must be called with iommu->lock */
1822static int domain_attach_iommu(struct dmar_domain *domain,
fb170fb4
JL
1823 struct intel_iommu *iommu)
1824{
44bde614 1825 unsigned long ndomains;
55d94043 1826 int num;
44bde614 1827
55d94043 1828 assert_spin_locked(&device_domain_lock);
d160aca5 1829 assert_spin_locked(&iommu->lock);
ba395927 1830
29a27719
JR
1831 domain->iommu_refcnt[iommu->seq_id] += 1;
1832 domain->iommu_count += 1;
1833 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
fb170fb4 1834 ndomains = cap_ndoms(iommu->cap);
d160aca5
JR
1835 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1836
1837 if (num >= ndomains) {
1838 pr_err("%s: No free domain ids\n", iommu->name);
1839 domain->iommu_refcnt[iommu->seq_id] -= 1;
1840 domain->iommu_count -= 1;
55d94043 1841 return -ENOSPC;
2c2e2c38 1842 }
ba395927 1843
d160aca5
JR
1844 set_bit(num, iommu->domain_ids);
1845 set_iommu_domain(iommu, num, domain);
1846
1847 domain->iommu_did[iommu->seq_id] = num;
1848 domain->nid = iommu->node;
fb170fb4 1849
fb170fb4
JL
1850 domain_update_iommu_cap(domain);
1851 }
d160aca5 1852
55d94043 1853 return 0;
fb170fb4
JL
1854}
1855
1856static int domain_detach_iommu(struct dmar_domain *domain,
1857 struct intel_iommu *iommu)
1858{
d160aca5 1859 int num, count = INT_MAX;
d160aca5 1860
55d94043 1861 assert_spin_locked(&device_domain_lock);
d160aca5 1862 assert_spin_locked(&iommu->lock);
fb170fb4 1863
29a27719
JR
1864 domain->iommu_refcnt[iommu->seq_id] -= 1;
1865 count = --domain->iommu_count;
1866 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
d160aca5
JR
1867 num = domain->iommu_did[iommu->seq_id];
1868 clear_bit(num, iommu->domain_ids);
1869 set_iommu_domain(iommu, num, NULL);
fb170fb4 1870
fb170fb4 1871 domain_update_iommu_cap(domain);
c0e8a6c8 1872 domain->iommu_did[iommu->seq_id] = 0;
fb170fb4 1873 }
fb170fb4
JL
1874
1875 return count;
1876}
1877
ba395927 1878static struct iova_domain reserved_iova_list;
8a443df4 1879static struct lock_class_key reserved_rbtree_key;
ba395927 1880
51a63e67 1881static int dmar_init_reserved_ranges(void)
ba395927
KA
1882{
1883 struct pci_dev *pdev = NULL;
1884 struct iova *iova;
1885 int i;
ba395927 1886
0fb5fe87
RM
1887 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1888 DMA_32BIT_PFN);
ba395927 1889
8a443df4
MG
1890 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1891 &reserved_rbtree_key);
1892
ba395927
KA
1893 /* IOAPIC ranges shouldn't be accessed by DMA */
1894 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1895 IOVA_PFN(IOAPIC_RANGE_END));
51a63e67 1896 if (!iova) {
9f10e5bf 1897 pr_err("Reserve IOAPIC range failed\n");
51a63e67
JC
1898 return -ENODEV;
1899 }
ba395927
KA
1900
1901 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1902 for_each_pci_dev(pdev) {
1903 struct resource *r;
1904
1905 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1906 r = &pdev->resource[i];
1907 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1908 continue;
1a4a4551
DW
1909 iova = reserve_iova(&reserved_iova_list,
1910 IOVA_PFN(r->start),
1911 IOVA_PFN(r->end));
51a63e67 1912 if (!iova) {
9f10e5bf 1913 pr_err("Reserve iova failed\n");
51a63e67
JC
1914 return -ENODEV;
1915 }
ba395927
KA
1916 }
1917 }
51a63e67 1918 return 0;
ba395927
KA
1919}
1920
1921static void domain_reserve_special_ranges(struct dmar_domain *domain)
1922{
1923 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1924}
1925
1926static inline int guestwidth_to_adjustwidth(int gaw)
1927{
1928 int agaw;
1929 int r = (gaw - 12) % 9;
1930
1931 if (r == 0)
1932 agaw = gaw;
1933 else
1934 agaw = gaw + 9 - r;
1935 if (agaw > 64)
1936 agaw = 64;
1937 return agaw;
1938}
1939
dc534b25
JR
1940static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1941 int guest_width)
ba395927 1942{
ba395927
KA
1943 int adjust_width, agaw;
1944 unsigned long sagaw;
1945
0fb5fe87
RM
1946 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1947 DMA_32BIT_PFN);
ba395927
KA
1948 domain_reserve_special_ranges(domain);
1949
1950 /* calculate AGAW */
ba395927
KA
1951 if (guest_width > cap_mgaw(iommu->cap))
1952 guest_width = cap_mgaw(iommu->cap);
1953 domain->gaw = guest_width;
1954 adjust_width = guestwidth_to_adjustwidth(guest_width);
1955 agaw = width_to_agaw(adjust_width);
1956 sagaw = cap_sagaw(iommu->cap);
1957 if (!test_bit(agaw, &sagaw)) {
1958 /* hardware doesn't support it, choose a bigger one */
9f10e5bf 1959 pr_debug("Hardware doesn't support agaw %d\n", agaw);
ba395927
KA
1960 agaw = find_next_bit(&sagaw, 5, agaw);
1961 if (agaw >= 5)
1962 return -ENODEV;
1963 }
1964 domain->agaw = agaw;
ba395927 1965
8e604097
WH
1966 if (ecap_coherent(iommu->ecap))
1967 domain->iommu_coherency = 1;
1968 else
1969 domain->iommu_coherency = 0;
1970
58c610bd
SY
1971 if (ecap_sc_support(iommu->ecap))
1972 domain->iommu_snooping = 1;
1973 else
1974 domain->iommu_snooping = 0;
1975
214e39aa
DW
1976 if (intel_iommu_superpage)
1977 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1978 else
1979 domain->iommu_superpage = 0;
1980
4c923d47 1981 domain->nid = iommu->node;
c7151a8d 1982
ba395927 1983 /* always allocate the top pgd */
4c923d47 1984 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
ba395927
KA
1985 if (!domain->pgd)
1986 return -ENOMEM;
5b6985ce 1987 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
ba395927
KA
1988 return 0;
1989}
1990
1991static void domain_exit(struct dmar_domain *domain)
1992{
ea8ea460 1993 struct page *freelist = NULL;
ba395927
KA
1994
1995 /* Domain 0 is reserved, so dont process it */
1996 if (!domain)
1997 return;
1998
7b668357 1999 /* Flush any lazy unmaps that may reference this domain */
aa473240
OP
2000 if (!intel_iommu_strict) {
2001 int cpu;
2002
2003 for_each_possible_cpu(cpu)
2004 flush_unmaps_timeout(cpu);
2005 }
7b668357 2006
d160aca5
JR
2007 /* Remove associated devices and clear attached or cached domains */
2008 rcu_read_lock();
ba395927 2009 domain_remove_dev_info(domain);
d160aca5 2010 rcu_read_unlock();
92d03cc8 2011
ba395927
KA
2012 /* destroy iovas */
2013 put_iova_domain(&domain->iovad);
ba395927 2014
ea8ea460 2015 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927 2016
ea8ea460
DW
2017 dma_free_pagelist(freelist);
2018
ba395927
KA
2019 free_domain_mem(domain);
2020}
2021
64ae892b
DW
2022static int domain_context_mapping_one(struct dmar_domain *domain,
2023 struct intel_iommu *iommu,
28ccce0d 2024 u8 bus, u8 devfn)
ba395927 2025{
c6c2cebd 2026 u16 did = domain->iommu_did[iommu->seq_id];
28ccce0d
JR
2027 int translation = CONTEXT_TT_MULTI_LEVEL;
2028 struct device_domain_info *info = NULL;
ba395927 2029 struct context_entry *context;
ba395927 2030 unsigned long flags;
ea6606b0 2031 struct dma_pte *pgd;
55d94043 2032 int ret, agaw;
28ccce0d 2033
c6c2cebd
JR
2034 WARN_ON(did == 0);
2035
28ccce0d
JR
2036 if (hw_pass_through && domain_type_is_si(domain))
2037 translation = CONTEXT_TT_PASS_THROUGH;
ba395927
KA
2038
2039 pr_debug("Set context mapping for %02x:%02x.%d\n",
2040 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
4ed0d3e6 2041
ba395927 2042 BUG_ON(!domain->pgd);
5331fe6f 2043
55d94043
JR
2044 spin_lock_irqsave(&device_domain_lock, flags);
2045 spin_lock(&iommu->lock);
2046
2047 ret = -ENOMEM;
03ecc32c 2048 context = iommu_context_addr(iommu, bus, devfn, 1);
ba395927 2049 if (!context)
55d94043 2050 goto out_unlock;
ba395927 2051
55d94043
JR
2052 ret = 0;
2053 if (context_present(context))
2054 goto out_unlock;
cf484d0e 2055
aec0e861
XP
2056 /*
2057 * For kdump cases, old valid entries may be cached due to the
2058 * in-flight DMA and copied pgtable, but there is no unmapping
2059 * behaviour for them, thus we need an explicit cache flush for
2060 * the newly-mapped device. For kdump, at this point, the device
2061 * is supposed to finish reset at its driver probe stage, so no
2062 * in-flight DMA will exist, and we don't need to worry anymore
2063 * hereafter.
2064 */
2065 if (context_copied(context)) {
2066 u16 did_old = context_domain_id(context);
2067
2068 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
2069 iommu->flush.flush_context(iommu, did_old,
2070 (((u16)bus) << 8) | devfn,
2071 DMA_CCMD_MASK_NOBIT,
2072 DMA_CCMD_DEVICE_INVL);
2073 }
2074
ea6606b0
WH
2075 pgd = domain->pgd;
2076
de24e553 2077 context_clear_entry(context);
c6c2cebd 2078 context_set_domain_id(context, did);
ea6606b0 2079
de24e553
JR
2080 /*
2081 * Skip top levels of page tables for iommu which has less agaw
2082 * than default. Unnecessary for PT mode.
2083 */
93a23a72 2084 if (translation != CONTEXT_TT_PASS_THROUGH) {
de24e553 2085 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
55d94043 2086 ret = -ENOMEM;
de24e553 2087 pgd = phys_to_virt(dma_pte_addr(pgd));
55d94043
JR
2088 if (!dma_pte_present(pgd))
2089 goto out_unlock;
ea6606b0 2090 }
4ed0d3e6 2091
64ae892b 2092 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
b16d0cb9
DW
2093 if (info && info->ats_supported)
2094 translation = CONTEXT_TT_DEV_IOTLB;
2095 else
2096 translation = CONTEXT_TT_MULTI_LEVEL;
de24e553 2097
93a23a72
YZ
2098 context_set_address_root(context, virt_to_phys(pgd));
2099 context_set_address_width(context, iommu->agaw);
de24e553
JR
2100 } else {
2101 /*
2102 * In pass through mode, AW must be programmed to
2103 * indicate the largest AGAW value supported by
2104 * hardware. And ASR is ignored by hardware.
2105 */
2106 context_set_address_width(context, iommu->msagaw);
93a23a72 2107 }
4ed0d3e6
FY
2108
2109 context_set_translation_type(context, translation);
c07e7d21
MM
2110 context_set_fault_enable(context);
2111 context_set_present(context);
5331fe6f 2112 domain_flush_cache(domain, context, sizeof(*context));
ba395927 2113
4c25a2c1
DW
2114 /*
2115 * It's a non-present to present mapping. If hardware doesn't cache
2116 * non-present entry we only need to flush the write-buffer. If the
2117 * _does_ cache non-present entries, then it does so in the special
2118 * domain #0, which we have to flush:
2119 */
2120 if (cap_caching_mode(iommu->cap)) {
2121 iommu->flush.flush_context(iommu, 0,
2122 (((u16)bus) << 8) | devfn,
2123 DMA_CCMD_MASK_NOBIT,
2124 DMA_CCMD_DEVICE_INVL);
c6c2cebd 2125 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
4c25a2c1 2126 } else {
ba395927 2127 iommu_flush_write_buffer(iommu);
4c25a2c1 2128 }
93a23a72 2129 iommu_enable_dev_iotlb(info);
c7151a8d 2130
55d94043
JR
2131 ret = 0;
2132
2133out_unlock:
2134 spin_unlock(&iommu->lock);
2135 spin_unlock_irqrestore(&device_domain_lock, flags);
fb170fb4 2136
5c365d18 2137 return ret;
ba395927
KA
2138}
2139
579305f7
AW
2140struct domain_context_mapping_data {
2141 struct dmar_domain *domain;
2142 struct intel_iommu *iommu;
579305f7
AW
2143};
2144
2145static int domain_context_mapping_cb(struct pci_dev *pdev,
2146 u16 alias, void *opaque)
2147{
2148 struct domain_context_mapping_data *data = opaque;
2149
2150 return domain_context_mapping_one(data->domain, data->iommu,
28ccce0d 2151 PCI_BUS_NUM(alias), alias & 0xff);
579305f7
AW
2152}
2153
ba395927 2154static int
28ccce0d 2155domain_context_mapping(struct dmar_domain *domain, struct device *dev)
ba395927 2156{
64ae892b 2157 struct intel_iommu *iommu;
156baca8 2158 u8 bus, devfn;
579305f7 2159 struct domain_context_mapping_data data;
64ae892b 2160
e1f167f3 2161 iommu = device_to_iommu(dev, &bus, &devfn);
64ae892b
DW
2162 if (!iommu)
2163 return -ENODEV;
ba395927 2164
579305f7 2165 if (!dev_is_pci(dev))
28ccce0d 2166 return domain_context_mapping_one(domain, iommu, bus, devfn);
579305f7
AW
2167
2168 data.domain = domain;
2169 data.iommu = iommu;
579305f7
AW
2170
2171 return pci_for_each_dma_alias(to_pci_dev(dev),
2172 &domain_context_mapping_cb, &data);
2173}
2174
2175static int domain_context_mapped_cb(struct pci_dev *pdev,
2176 u16 alias, void *opaque)
2177{
2178 struct intel_iommu *iommu = opaque;
2179
2180 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
ba395927
KA
2181}
2182
e1f167f3 2183static int domain_context_mapped(struct device *dev)
ba395927 2184{
5331fe6f 2185 struct intel_iommu *iommu;
156baca8 2186 u8 bus, devfn;
5331fe6f 2187
e1f167f3 2188 iommu = device_to_iommu(dev, &bus, &devfn);
5331fe6f
WH
2189 if (!iommu)
2190 return -ENODEV;
ba395927 2191
579305f7
AW
2192 if (!dev_is_pci(dev))
2193 return device_context_mapped(iommu, bus, devfn);
e1f167f3 2194
579305f7
AW
2195 return !pci_for_each_dma_alias(to_pci_dev(dev),
2196 domain_context_mapped_cb, iommu);
ba395927
KA
2197}
2198
f532959b
FY
2199/* Returns a number of VTD pages, but aligned to MM page size */
2200static inline unsigned long aligned_nrpages(unsigned long host_addr,
2201 size_t size)
2202{
2203 host_addr &= ~PAGE_MASK;
2204 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2205}
2206
6dd9a7c7
YS
2207/* Return largest possible superpage level for a given mapping */
2208static inline int hardware_largepage_caps(struct dmar_domain *domain,
2209 unsigned long iov_pfn,
2210 unsigned long phy_pfn,
2211 unsigned long pages)
2212{
2213 int support, level = 1;
2214 unsigned long pfnmerge;
2215
2216 support = domain->iommu_superpage;
2217
2218 /* To use a large page, the virtual *and* physical addresses
2219 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2220 of them will mean we have to use smaller pages. So just
2221 merge them and check both at once. */
2222 pfnmerge = iov_pfn | phy_pfn;
2223
2224 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2225 pages >>= VTD_STRIDE_SHIFT;
2226 if (!pages)
2227 break;
2228 pfnmerge >>= VTD_STRIDE_SHIFT;
2229 level++;
2230 support--;
2231 }
2232 return level;
2233}
2234
9051aa02
DW
2235static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2236 struct scatterlist *sg, unsigned long phys_pfn,
2237 unsigned long nr_pages, int prot)
e1605495
DW
2238{
2239 struct dma_pte *first_pte = NULL, *pte = NULL;
9051aa02 2240 phys_addr_t uninitialized_var(pteval);
cc4f14aa 2241 unsigned long sg_res = 0;
6dd9a7c7
YS
2242 unsigned int largepage_lvl = 0;
2243 unsigned long lvl_pages = 0;
e1605495 2244
162d1b10 2245 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
e1605495
DW
2246
2247 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2248 return -EINVAL;
2249
2250 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2251
cc4f14aa
JL
2252 if (!sg) {
2253 sg_res = nr_pages;
9051aa02
DW
2254 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2255 }
2256
6dd9a7c7 2257 while (nr_pages > 0) {
c85994e4
DW
2258 uint64_t tmp;
2259
e1605495 2260 if (!sg_res) {
f532959b 2261 sg_res = aligned_nrpages(sg->offset, sg->length);
e1605495
DW
2262 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2263 sg->dma_length = sg->length;
3e6110fd 2264 pteval = page_to_phys(sg_page(sg)) | prot;
6dd9a7c7 2265 phys_pfn = pteval >> VTD_PAGE_SHIFT;
e1605495 2266 }
6dd9a7c7 2267
e1605495 2268 if (!pte) {
6dd9a7c7
YS
2269 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2270
5cf0a76f 2271 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
e1605495
DW
2272 if (!pte)
2273 return -ENOMEM;
6dd9a7c7 2274 /* It is large page*/
6491d4d0 2275 if (largepage_lvl > 1) {
ba2374fd
CZ
2276 unsigned long nr_superpages, end_pfn;
2277
6dd9a7c7 2278 pteval |= DMA_PTE_LARGE_PAGE;
d41a4adb 2279 lvl_pages = lvl_to_nr_pages(largepage_lvl);
ba2374fd
CZ
2280
2281 nr_superpages = sg_res / lvl_pages;
2282 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2283
d41a4adb
JL
2284 /*
2285 * Ensure that old small page tables are
ba2374fd 2286 * removed to make room for superpage(s).
bc24c571
DD
2287 * We're adding new large pages, so make sure
2288 * we don't remove their parent tables.
d41a4adb 2289 */
bc24c571
DD
2290 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2291 largepage_lvl + 1);
6491d4d0 2292 } else {
6dd9a7c7 2293 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
6491d4d0 2294 }
6dd9a7c7 2295
e1605495
DW
2296 }
2297 /* We don't need lock here, nobody else
2298 * touches the iova range
2299 */
7766a3fb 2300 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
c85994e4 2301 if (tmp) {
1bf20f0d 2302 static int dumps = 5;
9f10e5bf
JR
2303 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2304 iov_pfn, tmp, (unsigned long long)pteval);
1bf20f0d
DW
2305 if (dumps) {
2306 dumps--;
2307 debug_dma_dump_mappings(NULL);
2308 }
2309 WARN_ON(1);
2310 }
6dd9a7c7
YS
2311
2312 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2313
2314 BUG_ON(nr_pages < lvl_pages);
2315 BUG_ON(sg_res < lvl_pages);
2316
2317 nr_pages -= lvl_pages;
2318 iov_pfn += lvl_pages;
2319 phys_pfn += lvl_pages;
2320 pteval += lvl_pages * VTD_PAGE_SIZE;
2321 sg_res -= lvl_pages;
2322
2323 /* If the next PTE would be the first in a new page, then we
2324 need to flush the cache on the entries we've just written.
2325 And then we'll need to recalculate 'pte', so clear it and
2326 let it get set again in the if (!pte) block above.
2327
2328 If we're done (!nr_pages) we need to flush the cache too.
2329
2330 Also if we've been setting superpages, we may need to
2331 recalculate 'pte' and switch back to smaller pages for the
2332 end of the mapping, if the trailing size is not enough to
2333 use another superpage (i.e. sg_res < lvl_pages). */
e1605495 2334 pte++;
6dd9a7c7
YS
2335 if (!nr_pages || first_pte_in_page(pte) ||
2336 (largepage_lvl > 1 && sg_res < lvl_pages)) {
e1605495
DW
2337 domain_flush_cache(domain, first_pte,
2338 (void *)pte - (void *)first_pte);
2339 pte = NULL;
2340 }
6dd9a7c7
YS
2341
2342 if (!sg_res && nr_pages)
e1605495
DW
2343 sg = sg_next(sg);
2344 }
2345 return 0;
2346}
2347
9051aa02
DW
2348static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2349 struct scatterlist *sg, unsigned long nr_pages,
2350 int prot)
ba395927 2351{
9051aa02
DW
2352 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2353}
6f6a00e4 2354
9051aa02
DW
2355static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2356 unsigned long phys_pfn, unsigned long nr_pages,
2357 int prot)
2358{
2359 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
ba395927
KA
2360}
2361
2452d9db 2362static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
ba395927 2363{
c7151a8d
WH
2364 if (!iommu)
2365 return;
8c11e798
WH
2366
2367 clear_context_table(iommu, bus, devfn);
2368 iommu->flush.flush_context(iommu, 0, 0, 0,
4c25a2c1 2369 DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 2370 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
ba395927
KA
2371}
2372
109b9b04
DW
2373static inline void unlink_domain_info(struct device_domain_info *info)
2374{
2375 assert_spin_locked(&device_domain_lock);
2376 list_del(&info->link);
2377 list_del(&info->global);
2378 if (info->dev)
0bcb3e28 2379 info->dev->archdata.iommu = NULL;
109b9b04
DW
2380}
2381
ba395927
KA
2382static void domain_remove_dev_info(struct dmar_domain *domain)
2383{
3a74ca01 2384 struct device_domain_info *info, *tmp;
fb170fb4 2385 unsigned long flags;
ba395927
KA
2386
2387 spin_lock_irqsave(&device_domain_lock, flags);
76f45fe3 2388 list_for_each_entry_safe(info, tmp, &domain->devices, link)
127c7615 2389 __dmar_remove_one_dev_info(info);
ba395927
KA
2390 spin_unlock_irqrestore(&device_domain_lock, flags);
2391}
2392
2393/*
2394 * find_domain
1525a29a 2395 * Note: we use struct device->archdata.iommu stores the info
ba395927 2396 */
1525a29a 2397static struct dmar_domain *find_domain(struct device *dev)
ba395927
KA
2398{
2399 struct device_domain_info *info;
2400
2401 /* No lock here, assumes no domain exit in normal case */
1525a29a 2402 info = dev->archdata.iommu;
b316d02a 2403 if (likely(info))
ba395927
KA
2404 return info->domain;
2405 return NULL;
2406}
2407
5a8f40e8 2408static inline struct device_domain_info *
745f2586
JL
2409dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2410{
2411 struct device_domain_info *info;
2412
2413 list_for_each_entry(info, &device_domain_list, global)
41e80dca 2414 if (info->iommu->segment == segment && info->bus == bus &&
745f2586 2415 info->devfn == devfn)
5a8f40e8 2416 return info;
745f2586
JL
2417
2418 return NULL;
2419}
2420
5db31569
JR
2421static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2422 int bus, int devfn,
2423 struct device *dev,
2424 struct dmar_domain *domain)
745f2586 2425{
5a8f40e8 2426 struct dmar_domain *found = NULL;
745f2586
JL
2427 struct device_domain_info *info;
2428 unsigned long flags;
d160aca5 2429 int ret;
745f2586
JL
2430
2431 info = alloc_devinfo_mem();
2432 if (!info)
b718cd3d 2433 return NULL;
745f2586 2434
745f2586
JL
2435 info->bus = bus;
2436 info->devfn = devfn;
b16d0cb9
DW
2437 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2438 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2439 info->ats_qdep = 0;
745f2586
JL
2440 info->dev = dev;
2441 info->domain = domain;
5a8f40e8 2442 info->iommu = iommu;
745f2586 2443
b16d0cb9
DW
2444 if (dev && dev_is_pci(dev)) {
2445 struct pci_dev *pdev = to_pci_dev(info->dev);
2446
2447 if (ecap_dev_iotlb_support(iommu->ecap) &&
2448 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2449 dmar_find_matched_atsr_unit(pdev))
2450 info->ats_supported = 1;
2451
2452 if (ecs_enabled(iommu)) {
2453 if (pasid_enabled(iommu)) {
2454 int features = pci_pasid_features(pdev);
2455 if (features >= 0)
2456 info->pasid_supported = features | 1;
2457 }
2458
2459 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2460 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2461 info->pri_supported = 1;
2462 }
2463 }
2464
745f2586
JL
2465 spin_lock_irqsave(&device_domain_lock, flags);
2466 if (dev)
0bcb3e28 2467 found = find_domain(dev);
f303e507
JR
2468
2469 if (!found) {
5a8f40e8 2470 struct device_domain_info *info2;
41e80dca 2471 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
f303e507
JR
2472 if (info2) {
2473 found = info2->domain;
2474 info2->dev = dev;
2475 }
5a8f40e8 2476 }
f303e507 2477
745f2586
JL
2478 if (found) {
2479 spin_unlock_irqrestore(&device_domain_lock, flags);
2480 free_devinfo_mem(info);
b718cd3d
DW
2481 /* Caller must free the original domain */
2482 return found;
745f2586
JL
2483 }
2484
d160aca5
JR
2485 spin_lock(&iommu->lock);
2486 ret = domain_attach_iommu(domain, iommu);
2487 spin_unlock(&iommu->lock);
2488
2489 if (ret) {
c6c2cebd 2490 spin_unlock_irqrestore(&device_domain_lock, flags);
499f3aa4 2491 free_devinfo_mem(info);
c6c2cebd
JR
2492 return NULL;
2493 }
c6c2cebd 2494
b718cd3d
DW
2495 list_add(&info->link, &domain->devices);
2496 list_add(&info->global, &device_domain_list);
2497 if (dev)
2498 dev->archdata.iommu = info;
2499 spin_unlock_irqrestore(&device_domain_lock, flags);
2500
cc4e2575
JR
2501 if (dev && domain_context_mapping(domain, dev)) {
2502 pr_err("Domain context map for %s failed\n", dev_name(dev));
e6de0f8d 2503 dmar_remove_one_dev_info(domain, dev);
cc4e2575
JR
2504 return NULL;
2505 }
2506
b718cd3d 2507 return domain;
745f2586
JL
2508}
2509
579305f7
AW
2510static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2511{
2512 *(u16 *)opaque = alias;
2513 return 0;
2514}
2515
76208356 2516static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
ba395927 2517{
cc4e2575 2518 struct device_domain_info *info = NULL;
76208356 2519 struct dmar_domain *domain = NULL;
579305f7 2520 struct intel_iommu *iommu;
08a7f456 2521 u16 req_id, dma_alias;
ba395927 2522 unsigned long flags;
aa4d066a 2523 u8 bus, devfn;
ba395927 2524
579305f7
AW
2525 iommu = device_to_iommu(dev, &bus, &devfn);
2526 if (!iommu)
2527 return NULL;
2528
08a7f456
JR
2529 req_id = ((u16)bus << 8) | devfn;
2530
146922ec
DW
2531 if (dev_is_pci(dev)) {
2532 struct pci_dev *pdev = to_pci_dev(dev);
276dbf99 2533
579305f7
AW
2534 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2535
2536 spin_lock_irqsave(&device_domain_lock, flags);
2537 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2538 PCI_BUS_NUM(dma_alias),
2539 dma_alias & 0xff);
2540 if (info) {
2541 iommu = info->iommu;
2542 domain = info->domain;
5a8f40e8 2543 }
579305f7 2544 spin_unlock_irqrestore(&device_domain_lock, flags);
ba395927 2545
76208356 2546 /* DMA alias already has a domain, use it */
579305f7 2547 if (info)
76208356 2548 goto out;
579305f7 2549 }
ba395927 2550
146922ec 2551 /* Allocate and initialize new domain for the device */
ab8dfe25 2552 domain = alloc_domain(0);
745f2586 2553 if (!domain)
579305f7 2554 return NULL;
dc534b25 2555 if (domain_init(domain, iommu, gaw)) {
579305f7
AW
2556 domain_exit(domain);
2557 return NULL;
2c2e2c38 2558 }
ba395927 2559
76208356 2560out:
579305f7 2561
76208356
JR
2562 return domain;
2563}
579305f7 2564
76208356
JR
2565static struct dmar_domain *set_domain_for_dev(struct device *dev,
2566 struct dmar_domain *domain)
2567{
2568 struct intel_iommu *iommu;
2569 struct dmar_domain *tmp;
2570 u16 req_id, dma_alias;
2571 u8 bus, devfn;
2572
2573 iommu = device_to_iommu(dev, &bus, &devfn);
2574 if (!iommu)
2575 return NULL;
2576
2577 req_id = ((u16)bus << 8) | devfn;
2578
2579 if (dev_is_pci(dev)) {
2580 struct pci_dev *pdev = to_pci_dev(dev);
2581
2582 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2583
2584 /* register PCI DMA alias device */
2585 if (req_id != dma_alias) {
2586 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2587 dma_alias & 0xff, NULL, domain);
2588
2589 if (!tmp || tmp != domain)
2590 return tmp;
2591 }
ba395927
KA
2592 }
2593
5db31569 2594 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
76208356
JR
2595 if (!tmp || tmp != domain)
2596 return tmp;
2597
2598 return domain;
2599}
579305f7 2600
76208356
JR
2601static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2602{
2603 struct dmar_domain *domain, *tmp;
2604
2605 domain = find_domain(dev);
2606 if (domain)
2607 goto out;
2608
2609 domain = find_or_alloc_domain(dev, gaw);
2610 if (!domain)
2611 goto out;
2612
2613 tmp = set_domain_for_dev(dev, domain);
2614 if (!tmp || domain != tmp) {
579305f7
AW
2615 domain_exit(domain);
2616 domain = tmp;
2617 }
b718cd3d 2618
76208356
JR
2619out:
2620
b718cd3d 2621 return domain;
ba395927
KA
2622}
2623
b213203e
DW
2624static int iommu_domain_identity_map(struct dmar_domain *domain,
2625 unsigned long long start,
2626 unsigned long long end)
ba395927 2627{
c5395d5c
DW
2628 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2629 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2630
2631 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2632 dma_to_mm_pfn(last_vpfn))) {
9f10e5bf 2633 pr_err("Reserving iova failed\n");
b213203e 2634 return -ENOMEM;
ba395927
KA
2635 }
2636
af1089ce 2637 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
ba395927
KA
2638 /*
2639 * RMRR range might have overlap with physical memory range,
2640 * clear it first
2641 */
c5395d5c 2642 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
ba395927 2643
c5395d5c
DW
2644 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2645 last_vpfn - first_vpfn + 1,
61df7443 2646 DMA_PTE_READ|DMA_PTE_WRITE);
b213203e
DW
2647}
2648
d66ce54b
JR
2649static int domain_prepare_identity_map(struct device *dev,
2650 struct dmar_domain *domain,
2651 unsigned long long start,
2652 unsigned long long end)
b213203e 2653{
19943b0e
DW
2654 /* For _hardware_ passthrough, don't bother. But for software
2655 passthrough, we do it anyway -- it may indicate a memory
2656 range which is reserved in E820, so which didn't get set
2657 up to start with in si_domain */
2658 if (domain == si_domain && hw_pass_through) {
9f10e5bf
JR
2659 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2660 dev_name(dev), start, end);
19943b0e
DW
2661 return 0;
2662 }
2663
9f10e5bf
JR
2664 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2665 dev_name(dev), start, end);
2666
5595b528
DW
2667 if (end < start) {
2668 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2669 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2670 dmi_get_system_info(DMI_BIOS_VENDOR),
2671 dmi_get_system_info(DMI_BIOS_VERSION),
2672 dmi_get_system_info(DMI_PRODUCT_VERSION));
d66ce54b 2673 return -EIO;
5595b528
DW
2674 }
2675
2ff729f5
DW
2676 if (end >> agaw_to_width(domain->agaw)) {
2677 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2678 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2679 agaw_to_width(domain->agaw),
2680 dmi_get_system_info(DMI_BIOS_VENDOR),
2681 dmi_get_system_info(DMI_BIOS_VERSION),
2682 dmi_get_system_info(DMI_PRODUCT_VERSION));
d66ce54b 2683 return -EIO;
2ff729f5 2684 }
19943b0e 2685
d66ce54b
JR
2686 return iommu_domain_identity_map(domain, start, end);
2687}
ba395927 2688
d66ce54b
JR
2689static int iommu_prepare_identity_map(struct device *dev,
2690 unsigned long long start,
2691 unsigned long long end)
2692{
2693 struct dmar_domain *domain;
2694 int ret;
2695
2696 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2697 if (!domain)
2698 return -ENOMEM;
2699
2700 ret = domain_prepare_identity_map(dev, domain, start, end);
2701 if (ret)
2702 domain_exit(domain);
b213203e 2703
ba395927 2704 return ret;
ba395927
KA
2705}
2706
2707static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
0b9d9753 2708 struct device *dev)
ba395927 2709{
0b9d9753 2710 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba395927 2711 return 0;
0b9d9753
DW
2712 return iommu_prepare_identity_map(dev, rmrr->base_address,
2713 rmrr->end_address);
ba395927
KA
2714}
2715
d3f13810 2716#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
49a0429e
KA
2717static inline void iommu_prepare_isa(void)
2718{
2719 struct pci_dev *pdev;
2720 int ret;
2721
2722 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2723 if (!pdev)
2724 return;
2725
9f10e5bf 2726 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
0b9d9753 2727 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
49a0429e
KA
2728
2729 if (ret)
9f10e5bf 2730 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
49a0429e 2731
9b27e82d 2732 pci_dev_put(pdev);
49a0429e
KA
2733}
2734#else
2735static inline void iommu_prepare_isa(void)
2736{
2737 return;
2738}
d3f13810 2739#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
49a0429e 2740
2c2e2c38 2741static int md_domain_init(struct dmar_domain *domain, int guest_width);
c7ab48d2 2742
071e1374 2743static int __init si_domain_init(int hw)
2c2e2c38 2744{
c7ab48d2 2745 int nid, ret = 0;
2c2e2c38 2746
ab8dfe25 2747 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2c2e2c38
FY
2748 if (!si_domain)
2749 return -EFAULT;
2750
2c2e2c38
FY
2751 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2752 domain_exit(si_domain);
2753 return -EFAULT;
2754 }
2755
0dc79715 2756 pr_debug("Identity mapping domain allocated\n");
2c2e2c38 2757
19943b0e
DW
2758 if (hw)
2759 return 0;
2760
c7ab48d2 2761 for_each_online_node(nid) {
5dfe8660
TH
2762 unsigned long start_pfn, end_pfn;
2763 int i;
2764
2765 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2766 ret = iommu_domain_identity_map(si_domain,
2767 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2768 if (ret)
2769 return ret;
2770 }
c7ab48d2
DW
2771 }
2772
2c2e2c38
FY
2773 return 0;
2774}
2775
9b226624 2776static int identity_mapping(struct device *dev)
2c2e2c38
FY
2777{
2778 struct device_domain_info *info;
2779
2780 if (likely(!iommu_identity_mapping))
2781 return 0;
2782
9b226624 2783 info = dev->archdata.iommu;
cb452a40
MT
2784 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2785 return (info->domain == si_domain);
2c2e2c38 2786
2c2e2c38
FY
2787 return 0;
2788}
2789
28ccce0d 2790static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2c2e2c38 2791{
0ac72664 2792 struct dmar_domain *ndomain;
5a8f40e8 2793 struct intel_iommu *iommu;
156baca8 2794 u8 bus, devfn;
2c2e2c38 2795
5913c9bf 2796 iommu = device_to_iommu(dev, &bus, &devfn);
5a8f40e8
DW
2797 if (!iommu)
2798 return -ENODEV;
2799
5db31569 2800 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
0ac72664
DW
2801 if (ndomain != domain)
2802 return -EBUSY;
2c2e2c38
FY
2803
2804 return 0;
2805}
2806
0b9d9753 2807static bool device_has_rmrr(struct device *dev)
ea2447f7
TM
2808{
2809 struct dmar_rmrr_unit *rmrr;
832bd858 2810 struct device *tmp;
ea2447f7
TM
2811 int i;
2812
0e242612 2813 rcu_read_lock();
ea2447f7 2814 for_each_rmrr_units(rmrr) {
b683b230
JL
2815 /*
2816 * Return TRUE if this RMRR contains the device that
2817 * is passed in.
2818 */
2819 for_each_active_dev_scope(rmrr->devices,
2820 rmrr->devices_cnt, i, tmp)
0b9d9753 2821 if (tmp == dev) {
0e242612 2822 rcu_read_unlock();
ea2447f7 2823 return true;
b683b230 2824 }
ea2447f7 2825 }
0e242612 2826 rcu_read_unlock();
ea2447f7
TM
2827 return false;
2828}
2829
c875d2c1
AW
2830/*
2831 * There are a couple cases where we need to restrict the functionality of
2832 * devices associated with RMRRs. The first is when evaluating a device for
2833 * identity mapping because problems exist when devices are moved in and out
2834 * of domains and their respective RMRR information is lost. This means that
2835 * a device with associated RMRRs will never be in a "passthrough" domain.
2836 * The second is use of the device through the IOMMU API. This interface
2837 * expects to have full control of the IOVA space for the device. We cannot
2838 * satisfy both the requirement that RMRR access is maintained and have an
2839 * unencumbered IOVA space. We also have no ability to quiesce the device's
2840 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2841 * We therefore prevent devices associated with an RMRR from participating in
2842 * the IOMMU API, which eliminates them from device assignment.
2843 *
2844 * In both cases we assume that PCI USB devices with RMRRs have them largely
2845 * for historical reasons and that the RMRR space is not actively used post
2846 * boot. This exclusion may change if vendors begin to abuse it.
18436afd
DW
2847 *
2848 * The same exception is made for graphics devices, with the requirement that
2849 * any use of the RMRR regions will be torn down before assigning the device
2850 * to a guest.
c875d2c1
AW
2851 */
2852static bool device_is_rmrr_locked(struct device *dev)
2853{
2854 if (!device_has_rmrr(dev))
2855 return false;
2856
2857 if (dev_is_pci(dev)) {
2858 struct pci_dev *pdev = to_pci_dev(dev);
2859
18436afd 2860 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
c875d2c1
AW
2861 return false;
2862 }
2863
2864 return true;
2865}
2866
3bdb2591 2867static int iommu_should_identity_map(struct device *dev, int startup)
6941af28 2868{
ea2447f7 2869
3bdb2591
DW
2870 if (dev_is_pci(dev)) {
2871 struct pci_dev *pdev = to_pci_dev(dev);
ea2447f7 2872
c875d2c1 2873 if (device_is_rmrr_locked(dev))
3bdb2591 2874 return 0;
e0fc7e0b 2875
3bdb2591
DW
2876 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2877 return 1;
e0fc7e0b 2878
3bdb2591
DW
2879 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2880 return 1;
6941af28 2881
3bdb2591 2882 if (!(iommu_identity_mapping & IDENTMAP_ALL))
3dfc813d 2883 return 0;
3bdb2591
DW
2884
2885 /*
2886 * We want to start off with all devices in the 1:1 domain, and
2887 * take them out later if we find they can't access all of memory.
2888 *
2889 * However, we can't do this for PCI devices behind bridges,
2890 * because all PCI devices behind the same bridge will end up
2891 * with the same source-id on their transactions.
2892 *
2893 * Practically speaking, we can't change things around for these
2894 * devices at run-time, because we can't be sure there'll be no
2895 * DMA transactions in flight for any of their siblings.
2896 *
2897 * So PCI devices (unless they're on the root bus) as well as
2898 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2899 * the 1:1 domain, just in _case_ one of their siblings turns out
2900 * not to be able to map all of memory.
2901 */
2902 if (!pci_is_pcie(pdev)) {
2903 if (!pci_is_root_bus(pdev->bus))
2904 return 0;
2905 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2906 return 0;
2907 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
3dfc813d 2908 return 0;
3bdb2591
DW
2909 } else {
2910 if (device_has_rmrr(dev))
2911 return 0;
2912 }
3dfc813d 2913
3bdb2591 2914 /*
3dfc813d 2915 * At boot time, we don't yet know if devices will be 64-bit capable.
3bdb2591 2916 * Assume that they will — if they turn out not to be, then we can
3dfc813d
DW
2917 * take them out of the 1:1 domain later.
2918 */
8fcc5372
CW
2919 if (!startup) {
2920 /*
2921 * If the device's dma_mask is less than the system's memory
2922 * size then this is not a candidate for identity mapping.
2923 */
3bdb2591 2924 u64 dma_mask = *dev->dma_mask;
8fcc5372 2925
3bdb2591
DW
2926 if (dev->coherent_dma_mask &&
2927 dev->coherent_dma_mask < dma_mask)
2928 dma_mask = dev->coherent_dma_mask;
8fcc5372 2929
3bdb2591 2930 return dma_mask >= dma_get_required_mask(dev);
8fcc5372 2931 }
6941af28
DW
2932
2933 return 1;
2934}
2935
cf04eee8
DW
2936static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2937{
2938 int ret;
2939
2940 if (!iommu_should_identity_map(dev, 1))
2941 return 0;
2942
28ccce0d 2943 ret = domain_add_dev_info(si_domain, dev);
cf04eee8 2944 if (!ret)
9f10e5bf
JR
2945 pr_info("%s identity mapping for device %s\n",
2946 hw ? "Hardware" : "Software", dev_name(dev));
cf04eee8
DW
2947 else if (ret == -ENODEV)
2948 /* device not associated with an iommu */
2949 ret = 0;
2950
2951 return ret;
2952}
2953
2954
071e1374 2955static int __init iommu_prepare_static_identity_mapping(int hw)
2c2e2c38 2956{
2c2e2c38 2957 struct pci_dev *pdev = NULL;
cf04eee8
DW
2958 struct dmar_drhd_unit *drhd;
2959 struct intel_iommu *iommu;
2960 struct device *dev;
2961 int i;
2962 int ret = 0;
2c2e2c38 2963
2c2e2c38 2964 for_each_pci_dev(pdev) {
cf04eee8
DW
2965 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2966 if (ret)
2967 return ret;
2968 }
2969
2970 for_each_active_iommu(iommu, drhd)
2971 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2972 struct acpi_device_physical_node *pn;
2973 struct acpi_device *adev;
2974
2975 if (dev->bus != &acpi_bus_type)
2976 continue;
86080ccc 2977
cf04eee8
DW
2978 adev= to_acpi_device(dev);
2979 mutex_lock(&adev->physical_node_lock);
2980 list_for_each_entry(pn, &adev->physical_node_list, node) {
2981 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2982 if (ret)
2983 break;
eae460b6 2984 }
cf04eee8
DW
2985 mutex_unlock(&adev->physical_node_lock);
2986 if (ret)
2987 return ret;
62edf5dc 2988 }
2c2e2c38
FY
2989
2990 return 0;
2991}
2992
ffebeb46
JL
2993static void intel_iommu_init_qi(struct intel_iommu *iommu)
2994{
2995 /*
2996 * Start from the sane iommu hardware state.
2997 * If the queued invalidation is already initialized by us
2998 * (for example, while enabling interrupt-remapping) then
2999 * we got the things already rolling from a sane state.
3000 */
3001 if (!iommu->qi) {
3002 /*
3003 * Clear any previous faults.
3004 */
3005 dmar_fault(-1, iommu);
3006 /*
3007 * Disable queued invalidation if supported and already enabled
3008 * before OS handover.
3009 */
3010 dmar_disable_qi(iommu);
3011 }
3012
3013 if (dmar_enable_qi(iommu)) {
3014 /*
3015 * Queued Invalidate not enabled, use Register Based Invalidate
3016 */
3017 iommu->flush.flush_context = __iommu_flush_context;
3018 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
9f10e5bf 3019 pr_info("%s: Using Register based invalidation\n",
ffebeb46
JL
3020 iommu->name);
3021 } else {
3022 iommu->flush.flush_context = qi_flush_context;
3023 iommu->flush.flush_iotlb = qi_flush_iotlb;
9f10e5bf 3024 pr_info("%s: Using Queued invalidation\n", iommu->name);
ffebeb46
JL
3025 }
3026}
3027
091d42e4 3028static int copy_context_table(struct intel_iommu *iommu,
dfddb969 3029 struct root_entry *old_re,
091d42e4
JR
3030 struct context_entry **tbl,
3031 int bus, bool ext)
3032{
dbcd861f 3033 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
543c8dcf 3034 struct context_entry *new_ce = NULL, ce;
dfddb969 3035 struct context_entry *old_ce = NULL;
543c8dcf 3036 struct root_entry re;
091d42e4
JR
3037 phys_addr_t old_ce_phys;
3038
3039 tbl_idx = ext ? bus * 2 : bus;
dfddb969 3040 memcpy(&re, old_re, sizeof(re));
091d42e4
JR
3041
3042 for (devfn = 0; devfn < 256; devfn++) {
3043 /* First calculate the correct index */
3044 idx = (ext ? devfn * 2 : devfn) % 256;
3045
3046 if (idx == 0) {
3047 /* First save what we may have and clean up */
3048 if (new_ce) {
3049 tbl[tbl_idx] = new_ce;
3050 __iommu_flush_cache(iommu, new_ce,
3051 VTD_PAGE_SIZE);
3052 pos = 1;
3053 }
3054
3055 if (old_ce)
3056 iounmap(old_ce);
3057
3058 ret = 0;
3059 if (devfn < 0x80)
543c8dcf 3060 old_ce_phys = root_entry_lctp(&re);
091d42e4 3061 else
543c8dcf 3062 old_ce_phys = root_entry_uctp(&re);
091d42e4
JR
3063
3064 if (!old_ce_phys) {
3065 if (ext && devfn == 0) {
3066 /* No LCTP, try UCTP */
3067 devfn = 0x7f;
3068 continue;
3069 } else {
3070 goto out;
3071 }
3072 }
3073
3074 ret = -ENOMEM;
dfddb969
DW
3075 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3076 MEMREMAP_WB);
091d42e4
JR
3077 if (!old_ce)
3078 goto out;
3079
3080 new_ce = alloc_pgtable_page(iommu->node);
3081 if (!new_ce)
3082 goto out_unmap;
3083
3084 ret = 0;
3085 }
3086
3087 /* Now copy the context entry */
dfddb969 3088 memcpy(&ce, old_ce + idx, sizeof(ce));
091d42e4 3089
cf484d0e 3090 if (!__context_present(&ce))
091d42e4
JR
3091 continue;
3092
dbcd861f
JR
3093 did = context_domain_id(&ce);
3094 if (did >= 0 && did < cap_ndoms(iommu->cap))
3095 set_bit(did, iommu->domain_ids);
3096
cf484d0e
JR
3097 /*
3098 * We need a marker for copied context entries. This
3099 * marker needs to work for the old format as well as
3100 * for extended context entries.
3101 *
3102 * Bit 67 of the context entry is used. In the old
3103 * format this bit is available to software, in the
3104 * extended format it is the PGE bit, but PGE is ignored
3105 * by HW if PASIDs are disabled (and thus still
3106 * available).
3107 *
3108 * So disable PASIDs first and then mark the entry
3109 * copied. This means that we don't copy PASID
3110 * translations from the old kernel, but this is fine as
3111 * faults there are not fatal.
3112 */
3113 context_clear_pasid_enable(&ce);
3114 context_set_copied(&ce);
3115
091d42e4
JR
3116 new_ce[idx] = ce;
3117 }
3118
3119 tbl[tbl_idx + pos] = new_ce;
3120
3121 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3122
3123out_unmap:
dfddb969 3124 memunmap(old_ce);
091d42e4
JR
3125
3126out:
3127 return ret;
3128}
3129
3130static int copy_translation_tables(struct intel_iommu *iommu)
3131{
3132 struct context_entry **ctxt_tbls;
dfddb969 3133 struct root_entry *old_rt;
091d42e4
JR
3134 phys_addr_t old_rt_phys;
3135 int ctxt_table_entries;
3136 unsigned long flags;
3137 u64 rtaddr_reg;
3138 int bus, ret;
c3361f2f 3139 bool new_ext, ext;
091d42e4
JR
3140
3141 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3142 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
c3361f2f
JR
3143 new_ext = !!ecap_ecs(iommu->ecap);
3144
3145 /*
3146 * The RTT bit can only be changed when translation is disabled,
3147 * but disabling translation means to open a window for data
3148 * corruption. So bail out and don't copy anything if we would
3149 * have to change the bit.
3150 */
3151 if (new_ext != ext)
3152 return -EINVAL;
091d42e4
JR
3153
3154 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3155 if (!old_rt_phys)
3156 return -EINVAL;
3157
dfddb969 3158 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
091d42e4
JR
3159 if (!old_rt)
3160 return -ENOMEM;
3161
3162 /* This is too big for the stack - allocate it from slab */
3163 ctxt_table_entries = ext ? 512 : 256;
3164 ret = -ENOMEM;
3165 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3166 if (!ctxt_tbls)
3167 goto out_unmap;
3168
3169 for (bus = 0; bus < 256; bus++) {
3170 ret = copy_context_table(iommu, &old_rt[bus],
3171 ctxt_tbls, bus, ext);
3172 if (ret) {
3173 pr_err("%s: Failed to copy context table for bus %d\n",
3174 iommu->name, bus);
3175 continue;
3176 }
3177 }
3178
3179 spin_lock_irqsave(&iommu->lock, flags);
3180
3181 /* Context tables are copied, now write them to the root_entry table */
3182 for (bus = 0; bus < 256; bus++) {
3183 int idx = ext ? bus * 2 : bus;
3184 u64 val;
3185
3186 if (ctxt_tbls[idx]) {
3187 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3188 iommu->root_entry[bus].lo = val;
3189 }
3190
3191 if (!ext || !ctxt_tbls[idx + 1])
3192 continue;
3193
3194 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3195 iommu->root_entry[bus].hi = val;
3196 }
3197
3198 spin_unlock_irqrestore(&iommu->lock, flags);
3199
3200 kfree(ctxt_tbls);
3201
3202 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3203
3204 ret = 0;
3205
3206out_unmap:
dfddb969 3207 memunmap(old_rt);
091d42e4
JR
3208
3209 return ret;
3210}
3211
b779260b 3212static int __init init_dmars(void)
ba395927
KA
3213{
3214 struct dmar_drhd_unit *drhd;
3215 struct dmar_rmrr_unit *rmrr;
a87f4918 3216 bool copied_tables = false;
832bd858 3217 struct device *dev;
ba395927 3218 struct intel_iommu *iommu;
aa473240 3219 int i, ret, cpu;
2c2e2c38 3220
ba395927
KA
3221 /*
3222 * for each drhd
3223 * allocate root
3224 * initialize and program root entry to not present
3225 * endfor
3226 */
3227 for_each_drhd_unit(drhd) {
5e0d2a6f 3228 /*
3229 * lock not needed as this is only incremented in the single
3230 * threaded kernel __init code path all other access are read
3231 * only
3232 */
78d8e704 3233 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
1b198bb0
MT
3234 g_num_of_iommus++;
3235 continue;
3236 }
9f10e5bf 3237 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
5e0d2a6f 3238 }
3239
ffebeb46
JL
3240 /* Preallocate enough resources for IOMMU hot-addition */
3241 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3242 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3243
d9630fe9
WH
3244 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3245 GFP_KERNEL);
3246 if (!g_iommus) {
9f10e5bf 3247 pr_err("Allocating global iommu array failed\n");
d9630fe9
WH
3248 ret = -ENOMEM;
3249 goto error;
3250 }
3251
aa473240
OP
3252 for_each_possible_cpu(cpu) {
3253 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3254 cpu);
3255
3256 dfd->tables = kzalloc(g_num_of_iommus *
3257 sizeof(struct deferred_flush_table),
3258 GFP_KERNEL);
3259 if (!dfd->tables) {
3260 ret = -ENOMEM;
3261 goto free_g_iommus;
3262 }
3263
3264 spin_lock_init(&dfd->lock);
3265 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
5e0d2a6f 3266 }
3267
7c919779 3268 for_each_active_iommu(iommu, drhd) {
d9630fe9 3269 g_iommus[iommu->seq_id] = iommu;
ba395927 3270
b63d80d1
JR
3271 intel_iommu_init_qi(iommu);
3272
e61d98d8
SS
3273 ret = iommu_init_domains(iommu);
3274 if (ret)
989d51fc 3275 goto free_iommu;
e61d98d8 3276
4158c2ec
JR
3277 init_translation_status(iommu);
3278
091d42e4
JR
3279 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3280 iommu_disable_translation(iommu);
3281 clear_translation_pre_enabled(iommu);
3282 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3283 iommu->name);
3284 }
4158c2ec 3285
ba395927
KA
3286 /*
3287 * TBD:
3288 * we could share the same root & context tables
25985edc 3289 * among all IOMMU's. Need to Split it later.
ba395927
KA
3290 */
3291 ret = iommu_alloc_root_entry(iommu);
ffebeb46 3292 if (ret)
989d51fc 3293 goto free_iommu;
5f0a7f76 3294
091d42e4
JR
3295 if (translation_pre_enabled(iommu)) {
3296 pr_info("Translation already enabled - trying to copy translation structures\n");
3297
3298 ret = copy_translation_tables(iommu);
3299 if (ret) {
3300 /*
3301 * We found the IOMMU with translation
3302 * enabled - but failed to copy over the
3303 * old root-entry table. Try to proceed
3304 * by disabling translation now and
3305 * allocating a clean root-entry table.
3306 * This might cause DMAR faults, but
3307 * probably the dump will still succeed.
3308 */
3309 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3310 iommu->name);
3311 iommu_disable_translation(iommu);
3312 clear_translation_pre_enabled(iommu);
3313 } else {
3314 pr_info("Copied translation tables from previous kernel for %s\n",
3315 iommu->name);
a87f4918 3316 copied_tables = true;
091d42e4
JR
3317 }
3318 }
3319
4ed0d3e6 3320 if (!ecap_pass_through(iommu->ecap))
19943b0e 3321 hw_pass_through = 0;
8a94ade4
DW
3322#ifdef CONFIG_INTEL_IOMMU_SVM
3323 if (pasid_enabled(iommu))
3324 intel_svm_alloc_pasid_tables(iommu);
3325#endif
ba395927
KA
3326 }
3327
a4c34ff1
JR
3328 /*
3329 * Now that qi is enabled on all iommus, set the root entry and flush
3330 * caches. This is required on some Intel X58 chipsets, otherwise the
3331 * flush_context function will loop forever and the boot hangs.
3332 */
3333 for_each_active_iommu(iommu, drhd) {
3334 iommu_flush_write_buffer(iommu);
3335 iommu_set_root_entry(iommu);
3336 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3337 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3338 }
3339
19943b0e 3340 if (iommu_pass_through)
e0fc7e0b
DW
3341 iommu_identity_mapping |= IDENTMAP_ALL;
3342
d3f13810 3343#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
e0fc7e0b 3344 iommu_identity_mapping |= IDENTMAP_GFX;
19943b0e 3345#endif
e0fc7e0b 3346
21e722c4
AR
3347 check_tylersburg_isoch();
3348
86080ccc
JR
3349 if (iommu_identity_mapping) {
3350 ret = si_domain_init(hw_pass_through);
3351 if (ret)
3352 goto free_iommu;
3353 }
3354
e0fc7e0b 3355
a87f4918
JR
3356 /*
3357 * If we copied translations from a previous kernel in the kdump
3358 * case, we can not assign the devices to domains now, as that
3359 * would eliminate the old mappings. So skip this part and defer
3360 * the assignment to device driver initialization time.
3361 */
3362 if (copied_tables)
3363 goto domains_done;
3364
ba395927 3365 /*
19943b0e
DW
3366 * If pass through is not set or not enabled, setup context entries for
3367 * identity mappings for rmrr, gfx, and isa and may fall back to static
3368 * identity mapping if iommu_identity_mapping is set.
ba395927 3369 */
19943b0e
DW
3370 if (iommu_identity_mapping) {
3371 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
4ed0d3e6 3372 if (ret) {
9f10e5bf 3373 pr_crit("Failed to setup IOMMU pass-through\n");
989d51fc 3374 goto free_iommu;
ba395927
KA
3375 }
3376 }
ba395927 3377 /*
19943b0e
DW
3378 * For each rmrr
3379 * for each dev attached to rmrr
3380 * do
3381 * locate drhd for dev, alloc domain for dev
3382 * allocate free domain
3383 * allocate page table entries for rmrr
3384 * if context not allocated for bus
3385 * allocate and init context
3386 * set present in root table for this bus
3387 * init context with domain, translation etc
3388 * endfor
3389 * endfor
ba395927 3390 */
9f10e5bf 3391 pr_info("Setting RMRR:\n");
19943b0e 3392 for_each_rmrr_units(rmrr) {
b683b230
JL
3393 /* some BIOS lists non-exist devices in DMAR table. */
3394 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
832bd858 3395 i, dev) {
0b9d9753 3396 ret = iommu_prepare_rmrr_dev(rmrr, dev);
19943b0e 3397 if (ret)
9f10e5bf 3398 pr_err("Mapping reserved region failed\n");
ba395927 3399 }
4ed0d3e6 3400 }
49a0429e 3401
19943b0e
DW
3402 iommu_prepare_isa();
3403
a87f4918
JR
3404domains_done:
3405
ba395927
KA
3406 /*
3407 * for each drhd
3408 * enable fault log
3409 * global invalidate context cache
3410 * global invalidate iotlb
3411 * enable translation
3412 */
7c919779 3413 for_each_iommu(iommu, drhd) {
51a63e67
JC
3414 if (drhd->ignored) {
3415 /*
3416 * we always have to disable PMRs or DMA may fail on
3417 * this device
3418 */
3419 if (force_on)
7c919779 3420 iommu_disable_protect_mem_regions(iommu);
ba395927 3421 continue;
51a63e67 3422 }
ba395927
KA
3423
3424 iommu_flush_write_buffer(iommu);
3425
a222a7f0
DW
3426#ifdef CONFIG_INTEL_IOMMU_SVM
3427 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3428 ret = intel_svm_enable_prq(iommu);
3429 if (ret)
3430 goto free_iommu;
3431 }
3432#endif
3460a6d9
KA
3433 ret = dmar_set_interrupt(iommu);
3434 if (ret)
989d51fc 3435 goto free_iommu;
3460a6d9 3436
8939ddf6
JR
3437 if (!translation_pre_enabled(iommu))
3438 iommu_enable_translation(iommu);
3439
b94996c9 3440 iommu_disable_protect_mem_regions(iommu);
ba395927
KA
3441 }
3442
3443 return 0;
989d51fc
JL
3444
3445free_iommu:
ffebeb46
JL
3446 for_each_active_iommu(iommu, drhd) {
3447 disable_dmar_iommu(iommu);
a868e6b7 3448 free_dmar_iommu(iommu);
ffebeb46 3449 }
989d51fc 3450free_g_iommus:
aa473240
OP
3451 for_each_possible_cpu(cpu)
3452 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
d9630fe9 3453 kfree(g_iommus);
989d51fc 3454error:
ba395927
KA
3455 return ret;
3456}
3457
5a5e02a6 3458/* This takes a number of _MM_ pages, not VTD pages */
2aac6304 3459static unsigned long intel_alloc_iova(struct device *dev,
875764de
DW
3460 struct dmar_domain *domain,
3461 unsigned long nrpages, uint64_t dma_mask)
ba395927 3462{
22e2f9fa 3463 unsigned long iova_pfn = 0;
ba395927 3464
875764de
DW
3465 /* Restrict dma_mask to the width that the iommu can handle */
3466 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
8f6429c7
RM
3467 /* Ensure we reserve the whole size-aligned region */
3468 nrpages = __roundup_pow_of_two(nrpages);
875764de
DW
3469
3470 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
ba395927
KA
3471 /*
3472 * First try to allocate an io virtual address in
284901a9 3473 * DMA_BIT_MASK(32) and if that fails then try allocating
3609801e 3474 * from higher range
ba395927 3475 */
22e2f9fa
OP
3476 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3477 IOVA_PFN(DMA_BIT_MASK(32)));
3478 if (iova_pfn)
3479 return iova_pfn;
875764de 3480 }
22e2f9fa
OP
3481 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3482 if (unlikely(!iova_pfn)) {
9f10e5bf 3483 pr_err("Allocating %ld-page iova for %s failed",
207e3592 3484 nrpages, dev_name(dev));
2aac6304 3485 return 0;
f76aec76
KA
3486 }
3487
22e2f9fa 3488 return iova_pfn;
f76aec76
KA
3489}
3490
b316d02a 3491static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
f76aec76 3492{
1c5ebba9 3493 struct dmar_domain *domain, *tmp;
b1ce5b79 3494 struct dmar_rmrr_unit *rmrr;
b1ce5b79
JR
3495 struct device *i_dev;
3496 int i, ret;
f76aec76 3497
1c5ebba9
JR
3498 domain = find_domain(dev);
3499 if (domain)
3500 goto out;
3501
3502 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3503 if (!domain)
3504 goto out;
ba395927 3505
b1ce5b79
JR
3506 /* We have a new domain - setup possible RMRRs for the device */
3507 rcu_read_lock();
3508 for_each_rmrr_units(rmrr) {
3509 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3510 i, i_dev) {
3511 if (i_dev != dev)
3512 continue;
3513
3514 ret = domain_prepare_identity_map(dev, domain,
3515 rmrr->base_address,
3516 rmrr->end_address);
3517 if (ret)
3518 dev_err(dev, "Mapping reserved region failed\n");
3519 }
3520 }
3521 rcu_read_unlock();
3522
1c5ebba9
JR
3523 tmp = set_domain_for_dev(dev, domain);
3524 if (!tmp || domain != tmp) {
3525 domain_exit(domain);
3526 domain = tmp;
3527 }
3528
3529out:
3530
3531 if (!domain)
3532 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3533
3534
f76aec76
KA
3535 return domain;
3536}
3537
ecb509ec 3538/* Check if the dev needs to go through non-identity map and unmap process.*/
73676832 3539static int iommu_no_mapping(struct device *dev)
2c2e2c38
FY
3540{
3541 int found;
3542
3d89194a 3543 if (iommu_dummy(dev))
1e4c64c4
DW
3544 return 1;
3545
2c2e2c38 3546 if (!iommu_identity_mapping)
1e4c64c4 3547 return 0;
2c2e2c38 3548
9b226624 3549 found = identity_mapping(dev);
2c2e2c38 3550 if (found) {
ecb509ec 3551 if (iommu_should_identity_map(dev, 0))
2c2e2c38
FY
3552 return 1;
3553 else {
3554 /*
3555 * 32 bit DMA is removed from si_domain and fall back
3556 * to non-identity mapping.
3557 */
e6de0f8d 3558 dmar_remove_one_dev_info(si_domain, dev);
9f10e5bf
JR
3559 pr_info("32bit %s uses non-identity mapping\n",
3560 dev_name(dev));
2c2e2c38
FY
3561 return 0;
3562 }
3563 } else {
3564 /*
3565 * In case of a detached 64 bit DMA device from vm, the device
3566 * is put into si_domain for identity mapping.
3567 */
ecb509ec 3568 if (iommu_should_identity_map(dev, 0)) {
2c2e2c38 3569 int ret;
28ccce0d 3570 ret = domain_add_dev_info(si_domain, dev);
2c2e2c38 3571 if (!ret) {
9f10e5bf
JR
3572 pr_info("64bit %s uses identity mapping\n",
3573 dev_name(dev));
2c2e2c38
FY
3574 return 1;
3575 }
3576 }
3577 }
3578
1e4c64c4 3579 return 0;
2c2e2c38
FY
3580}
3581
5040a918 3582static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
bb9e6d65 3583 size_t size, int dir, u64 dma_mask)
f76aec76 3584{
f76aec76 3585 struct dmar_domain *domain;
5b6985ce 3586 phys_addr_t start_paddr;
2aac6304 3587 unsigned long iova_pfn;
f76aec76 3588 int prot = 0;
6865f0d1 3589 int ret;
8c11e798 3590 struct intel_iommu *iommu;
33041ec0 3591 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
f76aec76
KA
3592
3593 BUG_ON(dir == DMA_NONE);
2c2e2c38 3594
5040a918 3595 if (iommu_no_mapping(dev))
6865f0d1 3596 return paddr;
f76aec76 3597
5040a918 3598 domain = get_valid_domain_for_dev(dev);
f76aec76
KA
3599 if (!domain)
3600 return 0;
3601
8c11e798 3602 iommu = domain_get_iommu(domain);
88cb6a74 3603 size = aligned_nrpages(paddr, size);
f76aec76 3604
2aac6304
OP
3605 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3606 if (!iova_pfn)
f76aec76
KA
3607 goto error;
3608
ba395927
KA
3609 /*
3610 * Check if DMAR supports zero-length reads on write only
3611 * mappings..
3612 */
3613 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 3614 !cap_zlr(iommu->cap))
ba395927
KA
3615 prot |= DMA_PTE_READ;
3616 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3617 prot |= DMA_PTE_WRITE;
3618 /*
6865f0d1 3619 * paddr - (paddr + size) might be partial page, we should map the whole
ba395927 3620 * page. Note: if two part of one page are separately mapped, we
6865f0d1 3621 * might have two guest_addr mapping to the same host paddr, but this
ba395927
KA
3622 * is not a big problem
3623 */
2aac6304 3624 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
33041ec0 3625 mm_to_dma_pfn(paddr_pfn), size, prot);
ba395927
KA
3626 if (ret)
3627 goto error;
3628
1f0ef2aa
DW
3629 /* it's a non-present to present mapping. Only flush if caching mode */
3630 if (cap_caching_mode(iommu->cap))
a1ddcbe9 3631 iommu_flush_iotlb_psi(iommu, domain,
2aac6304 3632 mm_to_dma_pfn(iova_pfn),
a1ddcbe9 3633 size, 0, 1);
1f0ef2aa 3634 else
8c11e798 3635 iommu_flush_write_buffer(iommu);
f76aec76 3636
2aac6304 3637 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
03d6a246
DW
3638 start_paddr += paddr & ~PAGE_MASK;
3639 return start_paddr;
ba395927 3640
ba395927 3641error:
2aac6304 3642 if (iova_pfn)
22e2f9fa 3643 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
9f10e5bf 3644 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
5040a918 3645 dev_name(dev), size, (unsigned long long)paddr, dir);
ba395927
KA
3646 return 0;
3647}
3648
ffbbef5c
FT
3649static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3650 unsigned long offset, size_t size,
3651 enum dma_data_direction dir,
00085f1e 3652 unsigned long attrs)
bb9e6d65 3653{
ffbbef5c 3654 return __intel_map_single(dev, page_to_phys(page) + offset, size,
46333e37 3655 dir, *dev->dma_mask);
bb9e6d65
FT
3656}
3657
aa473240 3658static void flush_unmaps(struct deferred_flush_data *flush_data)
5e0d2a6f 3659{
80b20dd8 3660 int i, j;
5e0d2a6f 3661
aa473240 3662 flush_data->timer_on = 0;
5e0d2a6f 3663
3664 /* just flush them all */
3665 for (i = 0; i < g_num_of_iommus; i++) {
a2bb8459 3666 struct intel_iommu *iommu = g_iommus[i];
aa473240
OP
3667 struct deferred_flush_table *flush_table =
3668 &flush_data->tables[i];
a2bb8459
WH
3669 if (!iommu)
3670 continue;
c42d9f32 3671
aa473240 3672 if (!flush_table->next)
9dd2fe89
YZ
3673 continue;
3674
78d5f0f5
NA
3675 /* In caching mode, global flushes turn emulation expensive */
3676 if (!cap_caching_mode(iommu->cap))
3677 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
93a23a72 3678 DMA_TLB_GLOBAL_FLUSH);
aa473240 3679 for (j = 0; j < flush_table->next; j++) {
93a23a72 3680 unsigned long mask;
314f1dc1 3681 struct deferred_flush_entry *entry =
aa473240 3682 &flush_table->entries[j];
2aac6304 3683 unsigned long iova_pfn = entry->iova_pfn;
769530e4 3684 unsigned long nrpages = entry->nrpages;
314f1dc1
OP
3685 struct dmar_domain *domain = entry->domain;
3686 struct page *freelist = entry->freelist;
78d5f0f5
NA
3687
3688 /* On real hardware multiple invalidations are expensive */
3689 if (cap_caching_mode(iommu->cap))
a1ddcbe9 3690 iommu_flush_iotlb_psi(iommu, domain,
2aac6304 3691 mm_to_dma_pfn(iova_pfn),
769530e4 3692 nrpages, !freelist, 0);
78d5f0f5 3693 else {
769530e4 3694 mask = ilog2(nrpages);
314f1dc1 3695 iommu_flush_dev_iotlb(domain,
2aac6304 3696 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
78d5f0f5 3697 }
22e2f9fa 3698 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
314f1dc1
OP
3699 if (freelist)
3700 dma_free_pagelist(freelist);
80b20dd8 3701 }
aa473240 3702 flush_table->next = 0;
5e0d2a6f 3703 }
3704
aa473240 3705 flush_data->size = 0;
5e0d2a6f 3706}
3707
aa473240 3708static void flush_unmaps_timeout(unsigned long cpuid)
5e0d2a6f 3709{
aa473240 3710 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
80b20dd8 3711 unsigned long flags;
3712
aa473240
OP
3713 spin_lock_irqsave(&flush_data->lock, flags);
3714 flush_unmaps(flush_data);
3715 spin_unlock_irqrestore(&flush_data->lock, flags);
5e0d2a6f 3716}
3717
2aac6304 3718static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
769530e4 3719 unsigned long nrpages, struct page *freelist)
5e0d2a6f 3720{
3721 unsigned long flags;
314f1dc1 3722 int entry_id, iommu_id;
8c11e798 3723 struct intel_iommu *iommu;
314f1dc1 3724 struct deferred_flush_entry *entry;
aa473240 3725 struct deferred_flush_data *flush_data;
5e0d2a6f 3726
58c4a95f 3727 flush_data = raw_cpu_ptr(&deferred_flush);
aa473240
OP
3728
3729 /* Flush all CPUs' entries to avoid deferring too much. If
3730 * this becomes a bottleneck, can just flush us, and rely on
3731 * flush timer for the rest.
3732 */
3733 if (flush_data->size == HIGH_WATER_MARK) {
3734 int cpu;
3735
3736 for_each_online_cpu(cpu)
3737 flush_unmaps_timeout(cpu);
3738 }
3739
3740 spin_lock_irqsave(&flush_data->lock, flags);
80b20dd8 3741
8c11e798
WH
3742 iommu = domain_get_iommu(dom);
3743 iommu_id = iommu->seq_id;
c42d9f32 3744
aa473240
OP
3745 entry_id = flush_data->tables[iommu_id].next;
3746 ++(flush_data->tables[iommu_id].next);
5e0d2a6f 3747
aa473240 3748 entry = &flush_data->tables[iommu_id].entries[entry_id];
314f1dc1 3749 entry->domain = dom;
2aac6304 3750 entry->iova_pfn = iova_pfn;
769530e4 3751 entry->nrpages = nrpages;
314f1dc1 3752 entry->freelist = freelist;
5e0d2a6f 3753
aa473240
OP
3754 if (!flush_data->timer_on) {
3755 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3756 flush_data->timer_on = 1;
5e0d2a6f 3757 }
aa473240
OP
3758 flush_data->size++;
3759 spin_unlock_irqrestore(&flush_data->lock, flags);
5e0d2a6f 3760}
3761
769530e4 3762static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
ba395927 3763{
f76aec76 3764 struct dmar_domain *domain;
d794dc9b 3765 unsigned long start_pfn, last_pfn;
769530e4 3766 unsigned long nrpages;
2aac6304 3767 unsigned long iova_pfn;
8c11e798 3768 struct intel_iommu *iommu;
ea8ea460 3769 struct page *freelist;
ba395927 3770
73676832 3771 if (iommu_no_mapping(dev))
f76aec76 3772 return;
2c2e2c38 3773
1525a29a 3774 domain = find_domain(dev);
ba395927
KA
3775 BUG_ON(!domain);
3776
8c11e798
WH
3777 iommu = domain_get_iommu(domain);
3778
2aac6304 3779 iova_pfn = IOVA_PFN(dev_addr);
ba395927 3780
769530e4 3781 nrpages = aligned_nrpages(dev_addr, size);
2aac6304 3782 start_pfn = mm_to_dma_pfn(iova_pfn);
769530e4 3783 last_pfn = start_pfn + nrpages - 1;
ba395927 3784
d794dc9b 3785 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
207e3592 3786 dev_name(dev), start_pfn, last_pfn);
ba395927 3787
ea8ea460 3788 freelist = domain_unmap(domain, start_pfn, last_pfn);
d794dc9b 3789
5e0d2a6f 3790 if (intel_iommu_strict) {
a1ddcbe9 3791 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
769530e4 3792 nrpages, !freelist, 0);
5e0d2a6f 3793 /* free iova */
22e2f9fa 3794 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
ea8ea460 3795 dma_free_pagelist(freelist);
5e0d2a6f 3796 } else {
2aac6304 3797 add_unmap(domain, iova_pfn, nrpages, freelist);
5e0d2a6f 3798 /*
3799 * queue up the release of the unmap to save the 1/6th of the
3800 * cpu used up by the iotlb flush operation...
3801 */
5e0d2a6f 3802 }
ba395927
KA
3803}
3804
d41a4adb
JL
3805static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3806 size_t size, enum dma_data_direction dir,
00085f1e 3807 unsigned long attrs)
d41a4adb 3808{
769530e4 3809 intel_unmap(dev, dev_addr, size);
d41a4adb
JL
3810}
3811
5040a918 3812static void *intel_alloc_coherent(struct device *dev, size_t size,
baa676fc 3813 dma_addr_t *dma_handle, gfp_t flags,
00085f1e 3814 unsigned long attrs)
ba395927 3815{
36746436 3816 struct page *page = NULL;
ba395927
KA
3817 int order;
3818
5b6985ce 3819 size = PAGE_ALIGN(size);
ba395927 3820 order = get_order(size);
e8bb910d 3821
5040a918 3822 if (!iommu_no_mapping(dev))
e8bb910d 3823 flags &= ~(GFP_DMA | GFP_DMA32);
5040a918
DW
3824 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3825 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
e8bb910d
AW
3826 flags |= GFP_DMA;
3827 else
3828 flags |= GFP_DMA32;
3829 }
ba395927 3830
d0164adc 3831 if (gfpflags_allow_blocking(flags)) {
36746436
AM
3832 unsigned int count = size >> PAGE_SHIFT;
3833
712c604d 3834 page = dma_alloc_from_contiguous(dev, count, order, flags);
36746436
AM
3835 if (page && iommu_no_mapping(dev) &&
3836 page_to_phys(page) + size > dev->coherent_dma_mask) {
3837 dma_release_from_contiguous(dev, page, count);
3838 page = NULL;
3839 }
3840 }
3841
3842 if (!page)
3843 page = alloc_pages(flags, order);
3844 if (!page)
ba395927 3845 return NULL;
36746436 3846 memset(page_address(page), 0, size);
ba395927 3847
36746436 3848 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
bb9e6d65 3849 DMA_BIDIRECTIONAL,
5040a918 3850 dev->coherent_dma_mask);
ba395927 3851 if (*dma_handle)
36746436
AM
3852 return page_address(page);
3853 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3854 __free_pages(page, order);
3855
ba395927
KA
3856 return NULL;
3857}
3858
5040a918 3859static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
00085f1e 3860 dma_addr_t dma_handle, unsigned long attrs)
ba395927
KA
3861{
3862 int order;
36746436 3863 struct page *page = virt_to_page(vaddr);
ba395927 3864
5b6985ce 3865 size = PAGE_ALIGN(size);
ba395927
KA
3866 order = get_order(size);
3867
769530e4 3868 intel_unmap(dev, dma_handle, size);
36746436
AM
3869 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3870 __free_pages(page, order);
ba395927
KA
3871}
3872
5040a918 3873static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
d7ab5c46 3874 int nelems, enum dma_data_direction dir,
00085f1e 3875 unsigned long attrs)
ba395927 3876{
769530e4
OP
3877 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3878 unsigned long nrpages = 0;
3879 struct scatterlist *sg;
3880 int i;
3881
3882 for_each_sg(sglist, sg, nelems, i) {
3883 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3884 }
3885
3886 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
ba395927
KA
3887}
3888
ba395927 3889static int intel_nontranslate_map_sg(struct device *hddev,
c03ab37c 3890 struct scatterlist *sglist, int nelems, int dir)
ba395927
KA
3891{
3892 int i;
c03ab37c 3893 struct scatterlist *sg;
ba395927 3894
c03ab37c 3895 for_each_sg(sglist, sg, nelems, i) {
12d4d40e 3896 BUG_ON(!sg_page(sg));
3e6110fd 3897 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
c03ab37c 3898 sg->dma_length = sg->length;
ba395927
KA
3899 }
3900 return nelems;
3901}
3902
5040a918 3903static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
00085f1e 3904 enum dma_data_direction dir, unsigned long attrs)
ba395927 3905{
ba395927 3906 int i;
ba395927 3907 struct dmar_domain *domain;
f76aec76
KA
3908 size_t size = 0;
3909 int prot = 0;
2aac6304 3910 unsigned long iova_pfn;
f76aec76 3911 int ret;
c03ab37c 3912 struct scatterlist *sg;
b536d24d 3913 unsigned long start_vpfn;
8c11e798 3914 struct intel_iommu *iommu;
ba395927
KA
3915
3916 BUG_ON(dir == DMA_NONE);
5040a918
DW
3917 if (iommu_no_mapping(dev))
3918 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
ba395927 3919
5040a918 3920 domain = get_valid_domain_for_dev(dev);
f76aec76
KA
3921 if (!domain)
3922 return 0;
3923
8c11e798
WH
3924 iommu = domain_get_iommu(domain);
3925
b536d24d 3926 for_each_sg(sglist, sg, nelems, i)
88cb6a74 3927 size += aligned_nrpages(sg->offset, sg->length);
f76aec76 3928
2aac6304 3929 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
5040a918 3930 *dev->dma_mask);
2aac6304 3931 if (!iova_pfn) {
c03ab37c 3932 sglist->dma_length = 0;
f76aec76
KA
3933 return 0;
3934 }
3935
3936 /*
3937 * Check if DMAR supports zero-length reads on write only
3938 * mappings..
3939 */
3940 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 3941 !cap_zlr(iommu->cap))
f76aec76
KA
3942 prot |= DMA_PTE_READ;
3943 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3944 prot |= DMA_PTE_WRITE;
3945
2aac6304 3946 start_vpfn = mm_to_dma_pfn(iova_pfn);
e1605495 3947
f532959b 3948 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
e1605495 3949 if (unlikely(ret)) {
e1605495 3950 dma_pte_free_pagetable(domain, start_vpfn,
bc24c571
DD
3951 start_vpfn + size - 1,
3952 agaw_to_level(domain->agaw) + 1);
22e2f9fa 3953 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
e1605495 3954 return 0;
ba395927
KA
3955 }
3956
1f0ef2aa
DW
3957 /* it's a non-present to present mapping. Only flush if caching mode */
3958 if (cap_caching_mode(iommu->cap))
a1ddcbe9 3959 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
1f0ef2aa 3960 else
8c11e798 3961 iommu_flush_write_buffer(iommu);
1f0ef2aa 3962
ba395927
KA
3963 return nelems;
3964}
3965
dfb805e8
FT
3966static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3967{
3968 return !dma_addr;
3969}
3970
01e1932a 3971const struct dma_map_ops intel_dma_ops = {
baa676fc
AP
3972 .alloc = intel_alloc_coherent,
3973 .free = intel_free_coherent,
ba395927
KA
3974 .map_sg = intel_map_sg,
3975 .unmap_sg = intel_unmap_sg,
ffbbef5c
FT
3976 .map_page = intel_map_page,
3977 .unmap_page = intel_unmap_page,
dfb805e8 3978 .mapping_error = intel_mapping_error,
ba395927
KA
3979};
3980
3981static inline int iommu_domain_cache_init(void)
3982{
3983 int ret = 0;
3984
3985 iommu_domain_cache = kmem_cache_create("iommu_domain",
3986 sizeof(struct dmar_domain),
3987 0,
3988 SLAB_HWCACHE_ALIGN,
3989
3990 NULL);
3991 if (!iommu_domain_cache) {
9f10e5bf 3992 pr_err("Couldn't create iommu_domain cache\n");
ba395927
KA
3993 ret = -ENOMEM;
3994 }
3995
3996 return ret;
3997}
3998
3999static inline int iommu_devinfo_cache_init(void)
4000{
4001 int ret = 0;
4002
4003 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
4004 sizeof(struct device_domain_info),
4005 0,
4006 SLAB_HWCACHE_ALIGN,
ba395927
KA
4007 NULL);
4008 if (!iommu_devinfo_cache) {
9f10e5bf 4009 pr_err("Couldn't create devinfo cache\n");
ba395927
KA
4010 ret = -ENOMEM;
4011 }
4012
4013 return ret;
4014}
4015
ba395927
KA
4016static int __init iommu_init_mempool(void)
4017{
4018 int ret;
ae1ff3d6 4019 ret = iova_cache_get();
ba395927
KA
4020 if (ret)
4021 return ret;
4022
4023 ret = iommu_domain_cache_init();
4024 if (ret)
4025 goto domain_error;
4026
4027 ret = iommu_devinfo_cache_init();
4028 if (!ret)
4029 return ret;
4030
4031 kmem_cache_destroy(iommu_domain_cache);
4032domain_error:
ae1ff3d6 4033 iova_cache_put();
ba395927
KA
4034
4035 return -ENOMEM;
4036}
4037
4038static void __init iommu_exit_mempool(void)
4039{
4040 kmem_cache_destroy(iommu_devinfo_cache);
4041 kmem_cache_destroy(iommu_domain_cache);
ae1ff3d6 4042 iova_cache_put();
ba395927
KA
4043}
4044
556ab45f
DW
4045static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4046{
4047 struct dmar_drhd_unit *drhd;
4048 u32 vtbar;
4049 int rc;
4050
4051 /* We know that this device on this chipset has its own IOMMU.
4052 * If we find it under a different IOMMU, then the BIOS is lying
4053 * to us. Hope that the IOMMU for this device is actually
4054 * disabled, and it needs no translation...
4055 */
4056 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4057 if (rc) {
4058 /* "can't" happen */
4059 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4060 return;
4061 }
4062 vtbar &= 0xffff0000;
4063
4064 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4065 drhd = dmar_find_matched_drhd_unit(pdev);
4066 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4067 TAINT_FIRMWARE_WORKAROUND,
4068 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4069 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4070}
4071DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4072
ba395927
KA
4073static void __init init_no_remapping_devices(void)
4074{
4075 struct dmar_drhd_unit *drhd;
832bd858 4076 struct device *dev;
b683b230 4077 int i;
ba395927
KA
4078
4079 for_each_drhd_unit(drhd) {
4080 if (!drhd->include_all) {
b683b230
JL
4081 for_each_active_dev_scope(drhd->devices,
4082 drhd->devices_cnt, i, dev)
4083 break;
832bd858 4084 /* ignore DMAR unit if no devices exist */
ba395927
KA
4085 if (i == drhd->devices_cnt)
4086 drhd->ignored = 1;
4087 }
4088 }
4089
7c919779 4090 for_each_active_drhd_unit(drhd) {
7c919779 4091 if (drhd->include_all)
ba395927
KA
4092 continue;
4093
b683b230
JL
4094 for_each_active_dev_scope(drhd->devices,
4095 drhd->devices_cnt, i, dev)
832bd858 4096 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
ba395927 4097 break;
ba395927
KA
4098 if (i < drhd->devices_cnt)
4099 continue;
4100
c0771df8
DW
4101 /* This IOMMU has *only* gfx devices. Either bypass it or
4102 set the gfx_mapped flag, as appropriate */
4103 if (dmar_map_gfx) {
4104 intel_iommu_gfx_mapped = 1;
4105 } else {
4106 drhd->ignored = 1;
b683b230
JL
4107 for_each_active_dev_scope(drhd->devices,
4108 drhd->devices_cnt, i, dev)
832bd858 4109 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
ba395927
KA
4110 }
4111 }
4112}
4113
f59c7b69
FY
4114#ifdef CONFIG_SUSPEND
4115static int init_iommu_hw(void)
4116{
4117 struct dmar_drhd_unit *drhd;
4118 struct intel_iommu *iommu = NULL;
4119
4120 for_each_active_iommu(iommu, drhd)
4121 if (iommu->qi)
4122 dmar_reenable_qi(iommu);
4123
b779260b
JC
4124 for_each_iommu(iommu, drhd) {
4125 if (drhd->ignored) {
4126 /*
4127 * we always have to disable PMRs or DMA may fail on
4128 * this device
4129 */
4130 if (force_on)
4131 iommu_disable_protect_mem_regions(iommu);
4132 continue;
4133 }
4134
f59c7b69
FY
4135 iommu_flush_write_buffer(iommu);
4136
4137 iommu_set_root_entry(iommu);
4138
4139 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 4140 DMA_CCMD_GLOBAL_INVL);
2a41ccee
JL
4141 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4142 iommu_enable_translation(iommu);
b94996c9 4143 iommu_disable_protect_mem_regions(iommu);
f59c7b69
FY
4144 }
4145
4146 return 0;
4147}
4148
4149static void iommu_flush_all(void)
4150{
4151 struct dmar_drhd_unit *drhd;
4152 struct intel_iommu *iommu;
4153
4154 for_each_active_iommu(iommu, drhd) {
4155 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 4156 DMA_CCMD_GLOBAL_INVL);
f59c7b69 4157 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 4158 DMA_TLB_GLOBAL_FLUSH);
f59c7b69
FY
4159 }
4160}
4161
134fac3f 4162static int iommu_suspend(void)
f59c7b69
FY
4163{
4164 struct dmar_drhd_unit *drhd;
4165 struct intel_iommu *iommu = NULL;
4166 unsigned long flag;
4167
4168 for_each_active_iommu(iommu, drhd) {
4169 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4170 GFP_ATOMIC);
4171 if (!iommu->iommu_state)
4172 goto nomem;
4173 }
4174
4175 iommu_flush_all();
4176
4177 for_each_active_iommu(iommu, drhd) {
4178 iommu_disable_translation(iommu);
4179
1f5b3c3f 4180 raw_spin_lock_irqsave(&iommu->register_lock, flag);
f59c7b69
FY
4181
4182 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4183 readl(iommu->reg + DMAR_FECTL_REG);
4184 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4185 readl(iommu->reg + DMAR_FEDATA_REG);
4186 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4187 readl(iommu->reg + DMAR_FEADDR_REG);
4188 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4189 readl(iommu->reg + DMAR_FEUADDR_REG);
4190
1f5b3c3f 4191 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
f59c7b69
FY
4192 }
4193 return 0;
4194
4195nomem:
4196 for_each_active_iommu(iommu, drhd)
4197 kfree(iommu->iommu_state);
4198
4199 return -ENOMEM;
4200}
4201
134fac3f 4202static void iommu_resume(void)
f59c7b69
FY
4203{
4204 struct dmar_drhd_unit *drhd;
4205 struct intel_iommu *iommu = NULL;
4206 unsigned long flag;
4207
4208 if (init_iommu_hw()) {
b779260b
JC
4209 if (force_on)
4210 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4211 else
4212 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
134fac3f 4213 return;
f59c7b69
FY
4214 }
4215
4216 for_each_active_iommu(iommu, drhd) {
4217
1f5b3c3f 4218 raw_spin_lock_irqsave(&iommu->register_lock, flag);
f59c7b69
FY
4219
4220 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4221 iommu->reg + DMAR_FECTL_REG);
4222 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4223 iommu->reg + DMAR_FEDATA_REG);
4224 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4225 iommu->reg + DMAR_FEADDR_REG);
4226 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4227 iommu->reg + DMAR_FEUADDR_REG);
4228
1f5b3c3f 4229 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
f59c7b69
FY
4230 }
4231
4232 for_each_active_iommu(iommu, drhd)
4233 kfree(iommu->iommu_state);
f59c7b69
FY
4234}
4235
134fac3f 4236static struct syscore_ops iommu_syscore_ops = {
f59c7b69
FY
4237 .resume = iommu_resume,
4238 .suspend = iommu_suspend,
4239};
4240
134fac3f 4241static void __init init_iommu_pm_ops(void)
f59c7b69 4242{
134fac3f 4243 register_syscore_ops(&iommu_syscore_ops);
f59c7b69
FY
4244}
4245
4246#else
99592ba4 4247static inline void init_iommu_pm_ops(void) {}
f59c7b69
FY
4248#endif /* CONFIG_PM */
4249
318fe7df 4250
c2a0b538 4251int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
318fe7df
SS
4252{
4253 struct acpi_dmar_reserved_memory *rmrr;
0659b8dc 4254 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
318fe7df 4255 struct dmar_rmrr_unit *rmrru;
0659b8dc 4256 size_t length;
318fe7df
SS
4257
4258 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4259 if (!rmrru)
0659b8dc 4260 goto out;
318fe7df
SS
4261
4262 rmrru->hdr = header;
4263 rmrr = (struct acpi_dmar_reserved_memory *)header;
4264 rmrru->base_address = rmrr->base_address;
4265 rmrru->end_address = rmrr->end_address;
0659b8dc
EA
4266
4267 length = rmrr->end_address - rmrr->base_address + 1;
4268 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4269 IOMMU_RESV_DIRECT);
4270 if (!rmrru->resv)
4271 goto free_rmrru;
4272
2e455289
JL
4273 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4274 ((void *)rmrr) + rmrr->header.length,
4275 &rmrru->devices_cnt);
0659b8dc
EA
4276 if (rmrru->devices_cnt && rmrru->devices == NULL)
4277 goto free_all;
318fe7df 4278
2e455289 4279 list_add(&rmrru->list, &dmar_rmrr_units);
318fe7df 4280
2e455289 4281 return 0;
0659b8dc
EA
4282free_all:
4283 kfree(rmrru->resv);
4284free_rmrru:
4285 kfree(rmrru);
4286out:
4287 return -ENOMEM;
318fe7df
SS
4288}
4289
6b197249
JL
4290static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4291{
4292 struct dmar_atsr_unit *atsru;
4293 struct acpi_dmar_atsr *tmp;
4294
4295 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4296 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4297 if (atsr->segment != tmp->segment)
4298 continue;
4299 if (atsr->header.length != tmp->header.length)
4300 continue;
4301 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4302 return atsru;
4303 }
4304
4305 return NULL;
4306}
4307
4308int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
318fe7df
SS
4309{
4310 struct acpi_dmar_atsr *atsr;
4311 struct dmar_atsr_unit *atsru;
4312
6b197249
JL
4313 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4314 return 0;
4315
318fe7df 4316 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
6b197249
JL
4317 atsru = dmar_find_atsr(atsr);
4318 if (atsru)
4319 return 0;
4320
4321 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
318fe7df
SS
4322 if (!atsru)
4323 return -ENOMEM;
4324
6b197249
JL
4325 /*
4326 * If memory is allocated from slab by ACPI _DSM method, we need to
4327 * copy the memory content because the memory buffer will be freed
4328 * on return.
4329 */
4330 atsru->hdr = (void *)(atsru + 1);
4331 memcpy(atsru->hdr, hdr, hdr->length);
318fe7df 4332 atsru->include_all = atsr->flags & 0x1;
2e455289
JL
4333 if (!atsru->include_all) {
4334 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4335 (void *)atsr + atsr->header.length,
4336 &atsru->devices_cnt);
4337 if (atsru->devices_cnt && atsru->devices == NULL) {
4338 kfree(atsru);
4339 return -ENOMEM;
4340 }
4341 }
318fe7df 4342
0e242612 4343 list_add_rcu(&atsru->list, &dmar_atsr_units);
318fe7df
SS
4344
4345 return 0;
4346}
4347
9bdc531e
JL
4348static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4349{
4350 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4351 kfree(atsru);
4352}
4353
6b197249
JL
4354int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4355{
4356 struct acpi_dmar_atsr *atsr;
4357 struct dmar_atsr_unit *atsru;
4358
4359 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4360 atsru = dmar_find_atsr(atsr);
4361 if (atsru) {
4362 list_del_rcu(&atsru->list);
4363 synchronize_rcu();
4364 intel_iommu_free_atsr(atsru);
4365 }
4366
4367 return 0;
4368}
4369
4370int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4371{
4372 int i;
4373 struct device *dev;
4374 struct acpi_dmar_atsr *atsr;
4375 struct dmar_atsr_unit *atsru;
4376
4377 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4378 atsru = dmar_find_atsr(atsr);
4379 if (!atsru)
4380 return 0;
4381
194dc870 4382 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
6b197249
JL
4383 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4384 i, dev)
4385 return -EBUSY;
194dc870 4386 }
6b197249
JL
4387
4388 return 0;
4389}
4390
ffebeb46
JL
4391static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4392{
4393 int sp, ret = 0;
4394 struct intel_iommu *iommu = dmaru->iommu;
4395
4396 if (g_iommus[iommu->seq_id])
4397 return 0;
4398
4399 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
9f10e5bf 4400 pr_warn("%s: Doesn't support hardware pass through.\n",
ffebeb46
JL
4401 iommu->name);
4402 return -ENXIO;
4403 }
4404 if (!ecap_sc_support(iommu->ecap) &&
4405 domain_update_iommu_snooping(iommu)) {
9f10e5bf 4406 pr_warn("%s: Doesn't support snooping.\n",
ffebeb46
JL
4407 iommu->name);
4408 return -ENXIO;
4409 }
4410 sp = domain_update_iommu_superpage(iommu) - 1;
4411 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
9f10e5bf 4412 pr_warn("%s: Doesn't support large page.\n",
ffebeb46
JL
4413 iommu->name);
4414 return -ENXIO;
4415 }
4416
4417 /*
4418 * Disable translation if already enabled prior to OS handover.
4419 */
4420 if (iommu->gcmd & DMA_GCMD_TE)
4421 iommu_disable_translation(iommu);
4422
4423 g_iommus[iommu->seq_id] = iommu;
4424 ret = iommu_init_domains(iommu);
4425 if (ret == 0)
4426 ret = iommu_alloc_root_entry(iommu);
4427 if (ret)
4428 goto out;
4429
8a94ade4
DW
4430#ifdef CONFIG_INTEL_IOMMU_SVM
4431 if (pasid_enabled(iommu))
4432 intel_svm_alloc_pasid_tables(iommu);
4433#endif
4434
ffebeb46
JL
4435 if (dmaru->ignored) {
4436 /*
4437 * we always have to disable PMRs or DMA may fail on this device
4438 */
4439 if (force_on)
4440 iommu_disable_protect_mem_regions(iommu);
4441 return 0;
4442 }
4443
4444 intel_iommu_init_qi(iommu);
4445 iommu_flush_write_buffer(iommu);
a222a7f0
DW
4446
4447#ifdef CONFIG_INTEL_IOMMU_SVM
4448 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4449 ret = intel_svm_enable_prq(iommu);
4450 if (ret)
4451 goto disable_iommu;
4452 }
4453#endif
ffebeb46
JL
4454 ret = dmar_set_interrupt(iommu);
4455 if (ret)
4456 goto disable_iommu;
4457
4458 iommu_set_root_entry(iommu);
4459 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4460 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4461 iommu_enable_translation(iommu);
4462
ffebeb46
JL
4463 iommu_disable_protect_mem_regions(iommu);
4464 return 0;
4465
4466disable_iommu:
4467 disable_dmar_iommu(iommu);
4468out:
4469 free_dmar_iommu(iommu);
4470 return ret;
4471}
4472
6b197249
JL
4473int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4474{
ffebeb46
JL
4475 int ret = 0;
4476 struct intel_iommu *iommu = dmaru->iommu;
4477
4478 if (!intel_iommu_enabled)
4479 return 0;
4480 if (iommu == NULL)
4481 return -EINVAL;
4482
4483 if (insert) {
4484 ret = intel_iommu_add(dmaru);
4485 } else {
4486 disable_dmar_iommu(iommu);
4487 free_dmar_iommu(iommu);
4488 }
4489
4490 return ret;
6b197249
JL
4491}
4492
9bdc531e
JL
4493static void intel_iommu_free_dmars(void)
4494{
4495 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4496 struct dmar_atsr_unit *atsru, *atsr_n;
4497
4498 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4499 list_del(&rmrru->list);
4500 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
0659b8dc 4501 kfree(rmrru->resv);
9bdc531e 4502 kfree(rmrru);
318fe7df
SS
4503 }
4504
9bdc531e
JL
4505 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4506 list_del(&atsru->list);
4507 intel_iommu_free_atsr(atsru);
4508 }
318fe7df
SS
4509}
4510
4511int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4512{
b683b230 4513 int i, ret = 1;
318fe7df 4514 struct pci_bus *bus;
832bd858
DW
4515 struct pci_dev *bridge = NULL;
4516 struct device *tmp;
318fe7df
SS
4517 struct acpi_dmar_atsr *atsr;
4518 struct dmar_atsr_unit *atsru;
4519
4520 dev = pci_physfn(dev);
318fe7df 4521 for (bus = dev->bus; bus; bus = bus->parent) {
b5f82ddf 4522 bridge = bus->self;
d14053b3
DW
4523 /* If it's an integrated device, allow ATS */
4524 if (!bridge)
4525 return 1;
4526 /* Connected via non-PCIe: no ATS */
4527 if (!pci_is_pcie(bridge) ||
62f87c0e 4528 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
318fe7df 4529 return 0;
d14053b3 4530 /* If we found the root port, look it up in the ATSR */
b5f82ddf 4531 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
318fe7df 4532 break;
318fe7df
SS
4533 }
4534
0e242612 4535 rcu_read_lock();
b5f82ddf
JL
4536 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4537 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4538 if (atsr->segment != pci_domain_nr(dev->bus))
4539 continue;
4540
b683b230 4541 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
832bd858 4542 if (tmp == &bridge->dev)
b683b230 4543 goto out;
b5f82ddf
JL
4544
4545 if (atsru->include_all)
b683b230 4546 goto out;
b5f82ddf 4547 }
b683b230
JL
4548 ret = 0;
4549out:
0e242612 4550 rcu_read_unlock();
318fe7df 4551
b683b230 4552 return ret;
318fe7df
SS
4553}
4554
59ce0515
JL
4555int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4556{
4557 int ret = 0;
4558 struct dmar_rmrr_unit *rmrru;
4559 struct dmar_atsr_unit *atsru;
4560 struct acpi_dmar_atsr *atsr;
4561 struct acpi_dmar_reserved_memory *rmrr;
4562
4563 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4564 return 0;
4565
4566 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4567 rmrr = container_of(rmrru->hdr,
4568 struct acpi_dmar_reserved_memory, header);
4569 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4570 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4571 ((void *)rmrr) + rmrr->header.length,
4572 rmrr->segment, rmrru->devices,
4573 rmrru->devices_cnt);
27e24950 4574 if(ret < 0)
59ce0515 4575 return ret;
e6a8c9b3 4576 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
27e24950
JL
4577 dmar_remove_dev_scope(info, rmrr->segment,
4578 rmrru->devices, rmrru->devices_cnt);
59ce0515
JL
4579 }
4580 }
4581
4582 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4583 if (atsru->include_all)
4584 continue;
4585
4586 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4587 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4588 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4589 (void *)atsr + atsr->header.length,
4590 atsr->segment, atsru->devices,
4591 atsru->devices_cnt);
4592 if (ret > 0)
4593 break;
4594 else if(ret < 0)
4595 return ret;
e6a8c9b3 4596 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
59ce0515
JL
4597 if (dmar_remove_dev_scope(info, atsr->segment,
4598 atsru->devices, atsru->devices_cnt))
4599 break;
4600 }
4601 }
4602
4603 return 0;
4604}
4605
99dcaded
FY
4606/*
4607 * Here we only respond to action of unbound device from driver.
4608 *
4609 * Added device is not attached to its DMAR domain here yet. That will happen
4610 * when mapping the device to iova.
4611 */
4612static int device_notifier(struct notifier_block *nb,
4613 unsigned long action, void *data)
4614{
4615 struct device *dev = data;
99dcaded
FY
4616 struct dmar_domain *domain;
4617
3d89194a 4618 if (iommu_dummy(dev))
44cd613c
DW
4619 return 0;
4620
1196c2fb 4621 if (action != BUS_NOTIFY_REMOVED_DEVICE)
7e7dfab7
JL
4622 return 0;
4623
1525a29a 4624 domain = find_domain(dev);
99dcaded
FY
4625 if (!domain)
4626 return 0;
4627
e6de0f8d 4628 dmar_remove_one_dev_info(domain, dev);
ab8dfe25 4629 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
7e7dfab7 4630 domain_exit(domain);
a97590e5 4631
99dcaded
FY
4632 return 0;
4633}
4634
4635static struct notifier_block device_nb = {
4636 .notifier_call = device_notifier,
4637};
4638
75f05569
JL
4639static int intel_iommu_memory_notifier(struct notifier_block *nb,
4640 unsigned long val, void *v)
4641{
4642 struct memory_notify *mhp = v;
4643 unsigned long long start, end;
4644 unsigned long start_vpfn, last_vpfn;
4645
4646 switch (val) {
4647 case MEM_GOING_ONLINE:
4648 start = mhp->start_pfn << PAGE_SHIFT;
4649 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4650 if (iommu_domain_identity_map(si_domain, start, end)) {
9f10e5bf 4651 pr_warn("Failed to build identity map for [%llx-%llx]\n",
75f05569
JL
4652 start, end);
4653 return NOTIFY_BAD;
4654 }
4655 break;
4656
4657 case MEM_OFFLINE:
4658 case MEM_CANCEL_ONLINE:
4659 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4660 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4661 while (start_vpfn <= last_vpfn) {
4662 struct iova *iova;
4663 struct dmar_drhd_unit *drhd;
4664 struct intel_iommu *iommu;
ea8ea460 4665 struct page *freelist;
75f05569
JL
4666
4667 iova = find_iova(&si_domain->iovad, start_vpfn);
4668 if (iova == NULL) {
9f10e5bf 4669 pr_debug("Failed get IOVA for PFN %lx\n",
75f05569
JL
4670 start_vpfn);
4671 break;
4672 }
4673
4674 iova = split_and_remove_iova(&si_domain->iovad, iova,
4675 start_vpfn, last_vpfn);
4676 if (iova == NULL) {
9f10e5bf 4677 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
75f05569
JL
4678 start_vpfn, last_vpfn);
4679 return NOTIFY_BAD;
4680 }
4681
ea8ea460
DW
4682 freelist = domain_unmap(si_domain, iova->pfn_lo,
4683 iova->pfn_hi);
4684
75f05569
JL
4685 rcu_read_lock();
4686 for_each_active_iommu(iommu, drhd)
a1ddcbe9 4687 iommu_flush_iotlb_psi(iommu, si_domain,
a156ef99 4688 iova->pfn_lo, iova_size(iova),
ea8ea460 4689 !freelist, 0);
75f05569 4690 rcu_read_unlock();
ea8ea460 4691 dma_free_pagelist(freelist);
75f05569
JL
4692
4693 start_vpfn = iova->pfn_hi + 1;
4694 free_iova_mem(iova);
4695 }
4696 break;
4697 }
4698
4699 return NOTIFY_OK;
4700}
4701
4702static struct notifier_block intel_iommu_memory_nb = {
4703 .notifier_call = intel_iommu_memory_notifier,
4704 .priority = 0
4705};
4706
22e2f9fa
OP
4707static void free_all_cpu_cached_iovas(unsigned int cpu)
4708{
4709 int i;
4710
4711 for (i = 0; i < g_num_of_iommus; i++) {
4712 struct intel_iommu *iommu = g_iommus[i];
4713 struct dmar_domain *domain;
0caa7616 4714 int did;
22e2f9fa
OP
4715
4716 if (!iommu)
4717 continue;
4718
3bd4f911 4719 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
0caa7616 4720 domain = get_iommu_domain(iommu, (u16)did);
22e2f9fa
OP
4721
4722 if (!domain)
4723 continue;
4724 free_cpu_cached_iovas(cpu, &domain->iovad);
4725 }
4726 }
4727}
4728
21647615 4729static int intel_iommu_cpu_dead(unsigned int cpu)
aa473240 4730{
21647615
AMG
4731 free_all_cpu_cached_iovas(cpu);
4732 flush_unmaps_timeout(cpu);
4733 return 0;
aa473240
OP
4734}
4735
161b28aa
JR
4736static void intel_disable_iommus(void)
4737{
4738 struct intel_iommu *iommu = NULL;
4739 struct dmar_drhd_unit *drhd;
4740
4741 for_each_iommu(iommu, drhd)
4742 iommu_disable_translation(iommu);
4743}
4744
a7fdb6e6
JR
4745static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4746{
4747 return container_of(dev, struct intel_iommu, iommu.dev);
4748}
4749
a5459cfe
AW
4750static ssize_t intel_iommu_show_version(struct device *dev,
4751 struct device_attribute *attr,
4752 char *buf)
4753{
a7fdb6e6 4754 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
a5459cfe
AW
4755 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4756 return sprintf(buf, "%d:%d\n",
4757 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4758}
4759static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4760
4761static ssize_t intel_iommu_show_address(struct device *dev,
4762 struct device_attribute *attr,
4763 char *buf)
4764{
a7fdb6e6 4765 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
a5459cfe
AW
4766 return sprintf(buf, "%llx\n", iommu->reg_phys);
4767}
4768static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4769
4770static ssize_t intel_iommu_show_cap(struct device *dev,
4771 struct device_attribute *attr,
4772 char *buf)
4773{
a7fdb6e6 4774 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
a5459cfe
AW
4775 return sprintf(buf, "%llx\n", iommu->cap);
4776}
4777static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4778
4779static ssize_t intel_iommu_show_ecap(struct device *dev,
4780 struct device_attribute *attr,
4781 char *buf)
4782{
a7fdb6e6 4783 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
a5459cfe
AW
4784 return sprintf(buf, "%llx\n", iommu->ecap);
4785}
4786static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4787
2238c082
AW
4788static ssize_t intel_iommu_show_ndoms(struct device *dev,
4789 struct device_attribute *attr,
4790 char *buf)
4791{
a7fdb6e6 4792 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
2238c082
AW
4793 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4794}
4795static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4796
4797static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4798 struct device_attribute *attr,
4799 char *buf)
4800{
a7fdb6e6 4801 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
2238c082
AW
4802 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4803 cap_ndoms(iommu->cap)));
4804}
4805static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4806
a5459cfe
AW
4807static struct attribute *intel_iommu_attrs[] = {
4808 &dev_attr_version.attr,
4809 &dev_attr_address.attr,
4810 &dev_attr_cap.attr,
4811 &dev_attr_ecap.attr,
2238c082
AW
4812 &dev_attr_domains_supported.attr,
4813 &dev_attr_domains_used.attr,
a5459cfe
AW
4814 NULL,
4815};
4816
4817static struct attribute_group intel_iommu_group = {
4818 .name = "intel-iommu",
4819 .attrs = intel_iommu_attrs,
4820};
4821
4822const struct attribute_group *intel_iommu_groups[] = {
4823 &intel_iommu_group,
4824 NULL,
4825};
4826
ba395927
KA
4827int __init intel_iommu_init(void)
4828{
9bdc531e 4829 int ret = -ENODEV;
3a93c841 4830 struct dmar_drhd_unit *drhd;
7c919779 4831 struct intel_iommu *iommu;
ba395927 4832
a59b50e9
JC
4833 /* VT-d is required for a TXT/tboot launch, so enforce that */
4834 force_on = tboot_force_iommu();
4835
3a5670e8
JL
4836 if (iommu_init_mempool()) {
4837 if (force_on)
4838 panic("tboot: Failed to initialize iommu memory\n");
4839 return -ENOMEM;
4840 }
4841
4842 down_write(&dmar_global_lock);
a59b50e9
JC
4843 if (dmar_table_init()) {
4844 if (force_on)
4845 panic("tboot: Failed to initialize DMAR table\n");
9bdc531e 4846 goto out_free_dmar;
a59b50e9 4847 }
ba395927 4848
c2c7286a 4849 if (dmar_dev_scope_init() < 0) {
a59b50e9
JC
4850 if (force_on)
4851 panic("tboot: Failed to initialize DMAR device scope\n");
9bdc531e 4852 goto out_free_dmar;
a59b50e9 4853 }
1886e8a9 4854
161b28aa 4855 if (no_iommu || dmar_disabled) {
bfd20f1c
SL
4856 /*
4857 * We exit the function here to ensure IOMMU's remapping and
4858 * mempool aren't setup, which means that the IOMMU's PMRs
4859 * won't be disabled via the call to init_dmars(). So disable
4860 * it explicitly here. The PMRs were setup by tboot prior to
4861 * calling SENTER, but the kernel is expected to reset/tear
4862 * down the PMRs.
4863 */
4864 if (intel_iommu_tboot_noforce) {
4865 for_each_iommu(iommu, drhd)
4866 iommu_disable_protect_mem_regions(iommu);
4867 }
4868
161b28aa
JR
4869 /*
4870 * Make sure the IOMMUs are switched off, even when we
4871 * boot into a kexec kernel and the previous kernel left
4872 * them enabled
4873 */
4874 intel_disable_iommus();
9bdc531e 4875 goto out_free_dmar;
161b28aa 4876 }
2ae21010 4877
318fe7df 4878 if (list_empty(&dmar_rmrr_units))
9f10e5bf 4879 pr_info("No RMRR found\n");
318fe7df
SS
4880
4881 if (list_empty(&dmar_atsr_units))
9f10e5bf 4882 pr_info("No ATSR found\n");
318fe7df 4883
51a63e67
JC
4884 if (dmar_init_reserved_ranges()) {
4885 if (force_on)
4886 panic("tboot: Failed to reserve iommu ranges\n");
3a5670e8 4887 goto out_free_reserved_range;
51a63e67 4888 }
ba395927
KA
4889
4890 init_no_remapping_devices();
4891
b779260b 4892 ret = init_dmars();
ba395927 4893 if (ret) {
a59b50e9
JC
4894 if (force_on)
4895 panic("tboot: Failed to initialize DMARs\n");
9f10e5bf 4896 pr_err("Initialization failed\n");
9bdc531e 4897 goto out_free_reserved_range;
ba395927 4898 }
3a5670e8 4899 up_write(&dmar_global_lock);
9f10e5bf 4900 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
ba395927 4901
75f1cdf1
FT
4902#ifdef CONFIG_SWIOTLB
4903 swiotlb = 0;
4904#endif
19943b0e 4905 dma_ops = &intel_dma_ops;
4ed0d3e6 4906
134fac3f 4907 init_iommu_pm_ops();
a8bcbb0d 4908
39ab9555
JR
4909 for_each_active_iommu(iommu, drhd) {
4910 iommu_device_sysfs_add(&iommu->iommu, NULL,
4911 intel_iommu_groups,
4912 "%s", iommu->name);
4913 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4914 iommu_device_register(&iommu->iommu);
4915 }
a5459cfe 4916
4236d97d 4917 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
99dcaded 4918 bus_register_notifier(&pci_bus_type, &device_nb);
75f05569
JL
4919 if (si_domain && !hw_pass_through)
4920 register_memory_notifier(&intel_iommu_memory_nb);
21647615
AMG
4921 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4922 intel_iommu_cpu_dead);
8bc1f85c
ED
4923 intel_iommu_enabled = 1;
4924
ba395927 4925 return 0;
9bdc531e
JL
4926
4927out_free_reserved_range:
4928 put_iova_domain(&reserved_iova_list);
9bdc531e
JL
4929out_free_dmar:
4930 intel_iommu_free_dmars();
3a5670e8
JL
4931 up_write(&dmar_global_lock);
4932 iommu_exit_mempool();
9bdc531e 4933 return ret;
ba395927 4934}
e820482c 4935
2452d9db 4936static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
579305f7
AW
4937{
4938 struct intel_iommu *iommu = opaque;
4939
2452d9db 4940 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
579305f7
AW
4941 return 0;
4942}
4943
4944/*
4945 * NB - intel-iommu lacks any sort of reference counting for the users of
4946 * dependent devices. If multiple endpoints have intersecting dependent
4947 * devices, unbinding the driver from any one of them will possibly leave
4948 * the others unable to operate.
4949 */
2452d9db 4950static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
3199aa6b 4951{
0bcb3e28 4952 if (!iommu || !dev || !dev_is_pci(dev))
3199aa6b
HW
4953 return;
4954
2452d9db 4955 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
3199aa6b
HW
4956}
4957
127c7615 4958static void __dmar_remove_one_dev_info(struct device_domain_info *info)
c7151a8d 4959{
c7151a8d
WH
4960 struct intel_iommu *iommu;
4961 unsigned long flags;
c7151a8d 4962
55d94043
JR
4963 assert_spin_locked(&device_domain_lock);
4964
127c7615 4965 if (WARN_ON(!info))
c7151a8d
WH
4966 return;
4967
127c7615 4968 iommu = info->iommu;
c7151a8d 4969
127c7615
JR
4970 if (info->dev) {
4971 iommu_disable_dev_iotlb(info);
4972 domain_context_clear(iommu, info->dev);
4973 }
c7151a8d 4974
b608ac3b 4975 unlink_domain_info(info);
c7151a8d 4976
d160aca5 4977 spin_lock_irqsave(&iommu->lock, flags);
127c7615 4978 domain_detach_iommu(info->domain, iommu);
d160aca5 4979 spin_unlock_irqrestore(&iommu->lock, flags);
c7151a8d 4980
127c7615 4981 free_devinfo_mem(info);
c7151a8d 4982}
c7151a8d 4983
55d94043
JR
4984static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4985 struct device *dev)
4986{
127c7615 4987 struct device_domain_info *info;
55d94043 4988 unsigned long flags;
3e7abe25 4989
55d94043 4990 spin_lock_irqsave(&device_domain_lock, flags);
127c7615
JR
4991 info = dev->archdata.iommu;
4992 __dmar_remove_one_dev_info(info);
55d94043 4993 spin_unlock_irqrestore(&device_domain_lock, flags);
c7151a8d
WH
4994}
4995
2c2e2c38 4996static int md_domain_init(struct dmar_domain *domain, int guest_width)
5e98c4b1
WH
4997{
4998 int adjust_width;
4999
0fb5fe87
RM
5000 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
5001 DMA_32BIT_PFN);
5e98c4b1
WH
5002 domain_reserve_special_ranges(domain);
5003
5004 /* calculate AGAW */
5005 domain->gaw = guest_width;
5006 adjust_width = guestwidth_to_adjustwidth(guest_width);
5007 domain->agaw = width_to_agaw(adjust_width);
5008
5e98c4b1 5009 domain->iommu_coherency = 0;
c5b15255 5010 domain->iommu_snooping = 0;
6dd9a7c7 5011 domain->iommu_superpage = 0;
fe40f1e0 5012 domain->max_addr = 0;
5e98c4b1
WH
5013
5014 /* always allocate the top pgd */
4c923d47 5015 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5e98c4b1
WH
5016 if (!domain->pgd)
5017 return -ENOMEM;
5018 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5019 return 0;
5020}
5021
00a77deb 5022static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
38717946 5023{
5d450806 5024 struct dmar_domain *dmar_domain;
00a77deb
JR
5025 struct iommu_domain *domain;
5026
5027 if (type != IOMMU_DOMAIN_UNMANAGED)
5028 return NULL;
38717946 5029
ab8dfe25 5030 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
5d450806 5031 if (!dmar_domain) {
9f10e5bf 5032 pr_err("Can't allocate dmar_domain\n");
00a77deb 5033 return NULL;
38717946 5034 }
2c2e2c38 5035 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
9f10e5bf 5036 pr_err("Domain initialization failed\n");
92d03cc8 5037 domain_exit(dmar_domain);
00a77deb 5038 return NULL;
38717946 5039 }
8140a95d 5040 domain_update_iommu_cap(dmar_domain);
faa3d6f5 5041
00a77deb 5042 domain = &dmar_domain->domain;
8a0e715b
JR
5043 domain->geometry.aperture_start = 0;
5044 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5045 domain->geometry.force_aperture = true;
5046
00a77deb 5047 return domain;
38717946 5048}
38717946 5049
00a77deb 5050static void intel_iommu_domain_free(struct iommu_domain *domain)
38717946 5051{
00a77deb 5052 domain_exit(to_dmar_domain(domain));
38717946 5053}
38717946 5054
4c5478c9
JR
5055static int intel_iommu_attach_device(struct iommu_domain *domain,
5056 struct device *dev)
38717946 5057{
00a77deb 5058 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
fe40f1e0
WH
5059 struct intel_iommu *iommu;
5060 int addr_width;
156baca8 5061 u8 bus, devfn;
faa3d6f5 5062
c875d2c1
AW
5063 if (device_is_rmrr_locked(dev)) {
5064 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5065 return -EPERM;
5066 }
5067
7207d8f9
DW
5068 /* normally dev is not mapped */
5069 if (unlikely(domain_context_mapped(dev))) {
faa3d6f5
WH
5070 struct dmar_domain *old_domain;
5071
1525a29a 5072 old_domain = find_domain(dev);
faa3d6f5 5073 if (old_domain) {
d160aca5 5074 rcu_read_lock();
de7e8886 5075 dmar_remove_one_dev_info(old_domain, dev);
d160aca5 5076 rcu_read_unlock();
62c22167
JR
5077
5078 if (!domain_type_is_vm_or_si(old_domain) &&
5079 list_empty(&old_domain->devices))
5080 domain_exit(old_domain);
faa3d6f5
WH
5081 }
5082 }
5083
156baca8 5084 iommu = device_to_iommu(dev, &bus, &devfn);
fe40f1e0
WH
5085 if (!iommu)
5086 return -ENODEV;
5087
5088 /* check if this iommu agaw is sufficient for max mapped address */
5089 addr_width = agaw_to_width(iommu->agaw);
a99c47a2
TL
5090 if (addr_width > cap_mgaw(iommu->cap))
5091 addr_width = cap_mgaw(iommu->cap);
5092
5093 if (dmar_domain->max_addr > (1LL << addr_width)) {
9f10e5bf 5094 pr_err("%s: iommu width (%d) is not "
fe40f1e0 5095 "sufficient for the mapped address (%llx)\n",
a99c47a2 5096 __func__, addr_width, dmar_domain->max_addr);
fe40f1e0
WH
5097 return -EFAULT;
5098 }
a99c47a2
TL
5099 dmar_domain->gaw = addr_width;
5100
5101 /*
5102 * Knock out extra levels of page tables if necessary
5103 */
5104 while (iommu->agaw < dmar_domain->agaw) {
5105 struct dma_pte *pte;
5106
5107 pte = dmar_domain->pgd;
5108 if (dma_pte_present(pte)) {
25cbff16
SY
5109 dmar_domain->pgd = (struct dma_pte *)
5110 phys_to_virt(dma_pte_addr(pte));
7a661013 5111 free_pgtable_page(pte);
a99c47a2
TL
5112 }
5113 dmar_domain->agaw--;
5114 }
fe40f1e0 5115
28ccce0d 5116 return domain_add_dev_info(dmar_domain, dev);
38717946 5117}
38717946 5118
4c5478c9
JR
5119static void intel_iommu_detach_device(struct iommu_domain *domain,
5120 struct device *dev)
38717946 5121{
e6de0f8d 5122 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
faa3d6f5 5123}
c7151a8d 5124
b146a1c9
JR
5125static int intel_iommu_map(struct iommu_domain *domain,
5126 unsigned long iova, phys_addr_t hpa,
5009065d 5127 size_t size, int iommu_prot)
faa3d6f5 5128{
00a77deb 5129 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
fe40f1e0 5130 u64 max_addr;
dde57a21 5131 int prot = 0;
faa3d6f5 5132 int ret;
fe40f1e0 5133
dde57a21
JR
5134 if (iommu_prot & IOMMU_READ)
5135 prot |= DMA_PTE_READ;
5136 if (iommu_prot & IOMMU_WRITE)
5137 prot |= DMA_PTE_WRITE;
9cf06697
SY
5138 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5139 prot |= DMA_PTE_SNP;
dde57a21 5140
163cc52c 5141 max_addr = iova + size;
dde57a21 5142 if (dmar_domain->max_addr < max_addr) {
fe40f1e0
WH
5143 u64 end;
5144
5145 /* check if minimum agaw is sufficient for mapped address */
8954da1f 5146 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
fe40f1e0 5147 if (end < max_addr) {
9f10e5bf 5148 pr_err("%s: iommu width (%d) is not "
fe40f1e0 5149 "sufficient for the mapped address (%llx)\n",
8954da1f 5150 __func__, dmar_domain->gaw, max_addr);
fe40f1e0
WH
5151 return -EFAULT;
5152 }
dde57a21 5153 dmar_domain->max_addr = max_addr;
fe40f1e0 5154 }
ad051221
DW
5155 /* Round up size to next multiple of PAGE_SIZE, if it and
5156 the low bits of hpa would take us onto the next page */
88cb6a74 5157 size = aligned_nrpages(hpa, size);
ad051221
DW
5158 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5159 hpa >> VTD_PAGE_SHIFT, size, prot);
faa3d6f5 5160 return ret;
38717946 5161}
38717946 5162
5009065d 5163static size_t intel_iommu_unmap(struct iommu_domain *domain,
ea8ea460 5164 unsigned long iova, size_t size)
38717946 5165{
00a77deb 5166 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
ea8ea460
DW
5167 struct page *freelist = NULL;
5168 struct intel_iommu *iommu;
5169 unsigned long start_pfn, last_pfn;
5170 unsigned int npages;
42e8c186 5171 int iommu_id, level = 0;
5cf0a76f
DW
5172
5173 /* Cope with horrid API which requires us to unmap more than the
5174 size argument if it happens to be a large-page mapping. */
dc02e46e 5175 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5cf0a76f
DW
5176
5177 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5178 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4b99d352 5179
ea8ea460
DW
5180 start_pfn = iova >> VTD_PAGE_SHIFT;
5181 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5182
5183 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5184
5185 npages = last_pfn - start_pfn + 1;
5186
29a27719 5187 for_each_domain_iommu(iommu_id, dmar_domain) {
a1ddcbe9 5188 iommu = g_iommus[iommu_id];
ea8ea460 5189
42e8c186
JR
5190 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5191 start_pfn, npages, !freelist, 0);
ea8ea460
DW
5192 }
5193
5194 dma_free_pagelist(freelist);
fe40f1e0 5195
163cc52c
DW
5196 if (dmar_domain->max_addr == iova + size)
5197 dmar_domain->max_addr = iova;
b146a1c9 5198
5cf0a76f 5199 return size;
38717946 5200}
38717946 5201
d14d6577 5202static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 5203 dma_addr_t iova)
38717946 5204{
00a77deb 5205 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
38717946 5206 struct dma_pte *pte;
5cf0a76f 5207 int level = 0;
faa3d6f5 5208 u64 phys = 0;
38717946 5209
5cf0a76f 5210 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
38717946 5211 if (pte)
faa3d6f5 5212 phys = dma_pte_addr(pte);
38717946 5213
faa3d6f5 5214 return phys;
38717946 5215}
a8bcbb0d 5216
5d587b8d 5217static bool intel_iommu_capable(enum iommu_cap cap)
dbb9fd86 5218{
dbb9fd86 5219 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5d587b8d 5220 return domain_update_iommu_snooping(NULL) == 1;
323f99cb 5221 if (cap == IOMMU_CAP_INTR_REMAP)
5d587b8d 5222 return irq_remapping_enabled == 1;
dbb9fd86 5223
5d587b8d 5224 return false;
dbb9fd86
SY
5225}
5226
abdfdde2
AW
5227static int intel_iommu_add_device(struct device *dev)
5228{
a5459cfe 5229 struct intel_iommu *iommu;
abdfdde2 5230 struct iommu_group *group;
156baca8 5231 u8 bus, devfn;
70ae6f0d 5232
a5459cfe
AW
5233 iommu = device_to_iommu(dev, &bus, &devfn);
5234 if (!iommu)
70ae6f0d
AW
5235 return -ENODEV;
5236
e3d10af1 5237 iommu_device_link(&iommu->iommu, dev);
a4ff1fc2 5238
e17f9ff4 5239 group = iommu_group_get_for_dev(dev);
783f157b 5240
e17f9ff4
AW
5241 if (IS_ERR(group))
5242 return PTR_ERR(group);
bcb71abe 5243
abdfdde2 5244 iommu_group_put(group);
e17f9ff4 5245 return 0;
abdfdde2 5246}
70ae6f0d 5247
abdfdde2
AW
5248static void intel_iommu_remove_device(struct device *dev)
5249{
a5459cfe
AW
5250 struct intel_iommu *iommu;
5251 u8 bus, devfn;
5252
5253 iommu = device_to_iommu(dev, &bus, &devfn);
5254 if (!iommu)
5255 return;
5256
abdfdde2 5257 iommu_group_remove_device(dev);
a5459cfe 5258
e3d10af1 5259 iommu_device_unlink(&iommu->iommu, dev);
70ae6f0d
AW
5260}
5261
0659b8dc
EA
5262static void intel_iommu_get_resv_regions(struct device *device,
5263 struct list_head *head)
5264{
5265 struct iommu_resv_region *reg;
5266 struct dmar_rmrr_unit *rmrr;
5267 struct device *i_dev;
5268 int i;
5269
5270 rcu_read_lock();
5271 for_each_rmrr_units(rmrr) {
5272 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5273 i, i_dev) {
5274 if (i_dev != device)
5275 continue;
5276
5277 list_add_tail(&rmrr->resv->list, head);
5278 }
5279 }
5280 rcu_read_unlock();
5281
5282 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5283 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
9d3a4de4 5284 0, IOMMU_RESV_MSI);
0659b8dc
EA
5285 if (!reg)
5286 return;
5287 list_add_tail(&reg->list, head);
5288}
5289
5290static void intel_iommu_put_resv_regions(struct device *dev,
5291 struct list_head *head)
5292{
5293 struct iommu_resv_region *entry, *next;
5294
5295 list_for_each_entry_safe(entry, next, head, list) {
5296 if (entry->type == IOMMU_RESV_RESERVED)
5297 kfree(entry);
5298 }
70ae6f0d
AW
5299}
5300
2f26e0a9 5301#ifdef CONFIG_INTEL_IOMMU_SVM
65ca7f5f
JP
5302#define MAX_NR_PASID_BITS (20)
5303static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5304{
5305 /*
5306 * Convert ecap_pss to extend context entry pts encoding, also
5307 * respect the soft pasid_max value set by the iommu.
5308 * - number of PASID bits = ecap_pss + 1
5309 * - number of PASID table entries = 2^(pts + 5)
5310 * Therefore, pts = ecap_pss - 4
5311 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5312 */
5313 if (ecap_pss(iommu->ecap) < 5)
5314 return 0;
5315
5316 /* pasid_max is encoded as actual number of entries not the bits */
5317 return find_first_bit((unsigned long *)&iommu->pasid_max,
5318 MAX_NR_PASID_BITS) - 5;
5319}
5320
2f26e0a9
DW
5321int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5322{
5323 struct device_domain_info *info;
5324 struct context_entry *context;
5325 struct dmar_domain *domain;
5326 unsigned long flags;
5327 u64 ctx_lo;
5328 int ret;
5329
5330 domain = get_valid_domain_for_dev(sdev->dev);
5331 if (!domain)
5332 return -EINVAL;
5333
5334 spin_lock_irqsave(&device_domain_lock, flags);
5335 spin_lock(&iommu->lock);
5336
5337 ret = -EINVAL;
5338 info = sdev->dev->archdata.iommu;
5339 if (!info || !info->pasid_supported)
5340 goto out;
5341
5342 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5343 if (WARN_ON(!context))
5344 goto out;
5345
5346 ctx_lo = context[0].lo;
5347
5348 sdev->did = domain->iommu_did[iommu->seq_id];
5349 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5350
5351 if (!(ctx_lo & CONTEXT_PASIDE)) {
11b93ebf
AR
5352 if (iommu->pasid_state_table)
5353 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
65ca7f5f
JP
5354 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5355 intel_iommu_get_pts(iommu);
5356
2f26e0a9
DW
5357 wmb();
5358 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5359 * extended to permit requests-with-PASID if the PASIDE bit
5360 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5361 * however, the PASIDE bit is ignored and requests-with-PASID
5362 * are unconditionally blocked. Which makes less sense.
5363 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5364 * "guest mode" translation types depending on whether ATS
5365 * is available or not. Annoyingly, we can't use the new
5366 * modes *unless* PASIDE is set. */
5367 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5368 ctx_lo &= ~CONTEXT_TT_MASK;
5369 if (info->ats_supported)
5370 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5371 else
5372 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5373 }
5374 ctx_lo |= CONTEXT_PASIDE;
907fea34
DW
5375 if (iommu->pasid_state_table)
5376 ctx_lo |= CONTEXT_DINVE;
a222a7f0
DW
5377 if (info->pri_supported)
5378 ctx_lo |= CONTEXT_PRS;
2f26e0a9
DW
5379 context[0].lo = ctx_lo;
5380 wmb();
5381 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5382 DMA_CCMD_MASK_NOBIT,
5383 DMA_CCMD_DEVICE_INVL);
5384 }
5385
5386 /* Enable PASID support in the device, if it wasn't already */
5387 if (!info->pasid_enabled)
5388 iommu_enable_dev_iotlb(info);
5389
5390 if (info->ats_enabled) {
5391 sdev->dev_iotlb = 1;
5392 sdev->qdep = info->ats_qdep;
5393 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5394 sdev->qdep = 0;
5395 }
5396 ret = 0;
5397
5398 out:
5399 spin_unlock(&iommu->lock);
5400 spin_unlock_irqrestore(&device_domain_lock, flags);
5401
5402 return ret;
5403}
5404
5405struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5406{
5407 struct intel_iommu *iommu;
5408 u8 bus, devfn;
5409
5410 if (iommu_dummy(dev)) {
5411 dev_warn(dev,
5412 "No IOMMU translation for device; cannot enable SVM\n");
5413 return NULL;
5414 }
5415
5416 iommu = device_to_iommu(dev, &bus, &devfn);
5417 if ((!iommu)) {
b9997e38 5418 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
2f26e0a9
DW
5419 return NULL;
5420 }
5421
5422 if (!iommu->pasid_table) {
b9997e38 5423 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
2f26e0a9
DW
5424 return NULL;
5425 }
5426
5427 return iommu;
5428}
5429#endif /* CONFIG_INTEL_IOMMU_SVM */
5430
b0119e87 5431const struct iommu_ops intel_iommu_ops = {
0659b8dc
EA
5432 .capable = intel_iommu_capable,
5433 .domain_alloc = intel_iommu_domain_alloc,
5434 .domain_free = intel_iommu_domain_free,
5435 .attach_dev = intel_iommu_attach_device,
5436 .detach_dev = intel_iommu_detach_device,
5437 .map = intel_iommu_map,
5438 .unmap = intel_iommu_unmap,
5439 .map_sg = default_iommu_map_sg,
5440 .iova_to_phys = intel_iommu_iova_to_phys,
5441 .add_device = intel_iommu_add_device,
5442 .remove_device = intel_iommu_remove_device,
5443 .get_resv_regions = intel_iommu_get_resv_regions,
5444 .put_resv_regions = intel_iommu_put_resv_regions,
5445 .device_group = pci_device_group,
5446 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
a8bcbb0d 5447};
9af88143 5448
9452618e
DV
5449static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5450{
5451 /* G4x/GM45 integrated gfx dmar support is totally busted. */
9f10e5bf 5452 pr_info("Disabling IOMMU for graphics on this chipset\n");
9452618e
DV
5453 dmar_map_gfx = 0;
5454}
5455
5456DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5457DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5458DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5459DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5460DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5461DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5462DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5463
d34d6517 5464static void quirk_iommu_rwbf(struct pci_dev *dev)
9af88143
DW
5465{
5466 /*
5467 * Mobile 4 Series Chipset neglects to set RWBF capability,
210561ff 5468 * but needs it. Same seems to hold for the desktop versions.
9af88143 5469 */
9f10e5bf 5470 pr_info("Forcing write-buffer flush capability\n");
9af88143
DW
5471 rwbf_quirk = 1;
5472}
5473
5474DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
210561ff
DV
5475DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5476DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5477DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5478DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5479DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5480DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
e0fc7e0b 5481
eecfd57f
AJ
5482#define GGC 0x52
5483#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5484#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5485#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5486#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5487#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5488#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5489#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5490#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5491
d34d6517 5492static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
9eecabcb
DW
5493{
5494 unsigned short ggc;
5495
eecfd57f 5496 if (pci_read_config_word(dev, GGC, &ggc))
9eecabcb
DW
5497 return;
5498
eecfd57f 5499 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
9f10e5bf 5500 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
9eecabcb 5501 dmar_map_gfx = 0;
6fbcfb3e
DW
5502 } else if (dmar_map_gfx) {
5503 /* we have to ensure the gfx device is idle before we flush */
9f10e5bf 5504 pr_info("Disabling batched IOTLB flush on Ironlake\n");
6fbcfb3e
DW
5505 intel_iommu_strict = 1;
5506 }
9eecabcb
DW
5507}
5508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5512
e0fc7e0b
DW
5513/* On Tylersburg chipsets, some BIOSes have been known to enable the
5514 ISOCH DMAR unit for the Azalia sound device, but not give it any
5515 TLB entries, which causes it to deadlock. Check for that. We do
5516 this in a function called from init_dmars(), instead of in a PCI
5517 quirk, because we don't want to print the obnoxious "BIOS broken"
5518 message if VT-d is actually disabled.
5519*/
5520static void __init check_tylersburg_isoch(void)
5521{
5522 struct pci_dev *pdev;
5523 uint32_t vtisochctrl;
5524
5525 /* If there's no Azalia in the system anyway, forget it. */
5526 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5527 if (!pdev)
5528 return;
5529 pci_dev_put(pdev);
5530
5531 /* System Management Registers. Might be hidden, in which case
5532 we can't do the sanity check. But that's OK, because the
5533 known-broken BIOSes _don't_ actually hide it, so far. */
5534 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5535 if (!pdev)
5536 return;
5537
5538 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5539 pci_dev_put(pdev);
5540 return;
5541 }
5542
5543 pci_dev_put(pdev);
5544
5545 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5546 if (vtisochctrl & 1)
5547 return;
5548
5549 /* Drop all bits other than the number of TLB entries */
5550 vtisochctrl &= 0x1c;
5551
5552 /* If we have the recommended number of TLB entries (16), fine. */
5553 if (vtisochctrl == 0x10)
5554 return;
5555
5556 /* Zero TLB entries? You get to ride the short bus to school. */
5557 if (!vtisochctrl) {
5558 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5559 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5560 dmi_get_system_info(DMI_BIOS_VENDOR),
5561 dmi_get_system_info(DMI_BIOS_VERSION),
5562 dmi_get_system_info(DMI_PRODUCT_VERSION));
5563 iommu_identity_mapping |= IDENTMAP_AZALIA;
5564 return;
5565 }
9f10e5bf
JR
5566
5567 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
e0fc7e0b
DW
5568 vtisochctrl);
5569}