]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/x86/kernel/amd_iommu.c
x86/amd-iommu: Remove iommu parameter from dma_ops_domain_(un)map
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / amd_iommu.c
CommitLineData
b6c02715 1/*
bf3118c1 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
b6c02715
JR
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/gfp.h>
22#include <linux/bitops.h>
7f26508b 23#include <linux/debugfs.h>
b6c02715 24#include <linux/scatterlist.h>
51491367 25#include <linux/dma-mapping.h>
b6c02715 26#include <linux/iommu-helper.h>
c156e347 27#include <linux/iommu.h>
b6c02715 28#include <asm/proto.h>
46a7fa27 29#include <asm/iommu.h>
1d9b16d1 30#include <asm/gart.h>
6a9401a7 31#include <asm/amd_iommu_proto.h>
b6c02715 32#include <asm/amd_iommu_types.h>
c6da992e 33#include <asm/amd_iommu.h>
b6c02715
JR
34
35#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
36
136f78a1
JR
37#define EXIT_LOOP_COUNT 10000000
38
b6c02715
JR
39static DEFINE_RWLOCK(amd_iommu_devtable_lock);
40
bd60b735
JR
41/* A list of preallocated protection domains */
42static LIST_HEAD(iommu_pd_list);
43static DEFINE_SPINLOCK(iommu_pd_list_lock);
44
0feae533
JR
45/*
46 * Domain for untranslated devices - only allocated
47 * if iommu=pt passed on kernel cmd line.
48 */
49static struct protection_domain *pt_domain;
50
26961efe 51static struct iommu_ops amd_iommu_ops;
26961efe 52
431b2a20
JR
53/*
54 * general struct to manage commands send to an IOMMU
55 */
d6449536 56struct iommu_cmd {
b6c02715
JR
57 u32 data[4];
58};
59
bd0e5211
JR
60static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
61 struct unity_map_entry *e);
e275a2a0 62static struct dma_ops_domain *find_protection_domain(u16 devid);
8bc3e127 63static u64 *alloc_pte(struct protection_domain *domain,
abdc5eb3
JR
64 unsigned long address, int end_lvl,
65 u64 **pte_page, gfp_t gfp);
00cd122a
JR
66static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
67 unsigned long start_page,
68 unsigned int pages);
a345b23b 69static void reset_iommu_command_buffer(struct amd_iommu *iommu);
9355a081 70static u64 *fetch_pte(struct protection_domain *domain,
a6b256b4 71 unsigned long address, int map_size);
04bfdd84 72static void update_domain(struct protection_domain *domain);
c1eee67b 73
7f26508b
JR
74#ifdef CONFIG_AMD_IOMMU_STATS
75
76/*
77 * Initialization code for statistics collection
78 */
79
da49f6df 80DECLARE_STATS_COUNTER(compl_wait);
0f2a86f2 81DECLARE_STATS_COUNTER(cnt_map_single);
146a6917 82DECLARE_STATS_COUNTER(cnt_unmap_single);
d03f067a 83DECLARE_STATS_COUNTER(cnt_map_sg);
55877a6b 84DECLARE_STATS_COUNTER(cnt_unmap_sg);
c8f0fb36 85DECLARE_STATS_COUNTER(cnt_alloc_coherent);
5d31ee7e 86DECLARE_STATS_COUNTER(cnt_free_coherent);
c1858976 87DECLARE_STATS_COUNTER(cross_page);
f57d98ae 88DECLARE_STATS_COUNTER(domain_flush_single);
18811f55 89DECLARE_STATS_COUNTER(domain_flush_all);
5774f7c5 90DECLARE_STATS_COUNTER(alloced_io_mem);
8ecaf8f1 91DECLARE_STATS_COUNTER(total_map_requests);
da49f6df 92
7f26508b
JR
93static struct dentry *stats_dir;
94static struct dentry *de_isolate;
95static struct dentry *de_fflush;
96
97static void amd_iommu_stats_add(struct __iommu_counter *cnt)
98{
99 if (stats_dir == NULL)
100 return;
101
102 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
103 &cnt->value);
104}
105
106static void amd_iommu_stats_init(void)
107{
108 stats_dir = debugfs_create_dir("amd-iommu", NULL);
109 if (stats_dir == NULL)
110 return;
111
112 de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
113 (u32 *)&amd_iommu_isolate);
114
115 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
116 (u32 *)&amd_iommu_unmap_flush);
da49f6df
JR
117
118 amd_iommu_stats_add(&compl_wait);
0f2a86f2 119 amd_iommu_stats_add(&cnt_map_single);
146a6917 120 amd_iommu_stats_add(&cnt_unmap_single);
d03f067a 121 amd_iommu_stats_add(&cnt_map_sg);
55877a6b 122 amd_iommu_stats_add(&cnt_unmap_sg);
c8f0fb36 123 amd_iommu_stats_add(&cnt_alloc_coherent);
5d31ee7e 124 amd_iommu_stats_add(&cnt_free_coherent);
c1858976 125 amd_iommu_stats_add(&cross_page);
f57d98ae 126 amd_iommu_stats_add(&domain_flush_single);
18811f55 127 amd_iommu_stats_add(&domain_flush_all);
5774f7c5 128 amd_iommu_stats_add(&alloced_io_mem);
8ecaf8f1 129 amd_iommu_stats_add(&total_map_requests);
7f26508b
JR
130}
131
132#endif
133
a80dc3e0
JR
134/****************************************************************************
135 *
136 * Interrupt handling functions
137 *
138 ****************************************************************************/
139
e3e59876
JR
140static void dump_dte_entry(u16 devid)
141{
142 int i;
143
144 for (i = 0; i < 8; ++i)
145 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
146 amd_iommu_dev_table[devid].data[i]);
147}
148
945b4ac4
JR
149static void dump_command(unsigned long phys_addr)
150{
151 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
152 int i;
153
154 for (i = 0; i < 4; ++i)
155 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
156}
157
a345b23b 158static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
90008ee4
JR
159{
160 u32 *event = __evt;
161 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
162 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
163 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
164 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
165 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
166
4c6f40d4 167 printk(KERN_ERR "AMD-Vi: Event logged [");
90008ee4
JR
168
169 switch (type) {
170 case EVENT_TYPE_ILL_DEV:
171 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
172 "address=0x%016llx flags=0x%04x]\n",
173 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
174 address, flags);
e3e59876 175 dump_dte_entry(devid);
90008ee4
JR
176 break;
177 case EVENT_TYPE_IO_FAULT:
178 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
179 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
180 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
181 domid, address, flags);
182 break;
183 case EVENT_TYPE_DEV_TAB_ERR:
184 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
185 "address=0x%016llx flags=0x%04x]\n",
186 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
187 address, flags);
188 break;
189 case EVENT_TYPE_PAGE_TAB_ERR:
190 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
191 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
192 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
193 domid, address, flags);
194 break;
195 case EVENT_TYPE_ILL_CMD:
196 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
a345b23b 197 reset_iommu_command_buffer(iommu);
945b4ac4 198 dump_command(address);
90008ee4
JR
199 break;
200 case EVENT_TYPE_CMD_HARD_ERR:
201 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
202 "flags=0x%04x]\n", address, flags);
203 break;
204 case EVENT_TYPE_IOTLB_INV_TO:
205 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
206 "address=0x%016llx]\n",
207 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
208 address);
209 break;
210 case EVENT_TYPE_INV_DEV_REQ:
211 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
212 "address=0x%016llx flags=0x%04x]\n",
213 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
214 address, flags);
215 break;
216 default:
217 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
218 }
219}
220
221static void iommu_poll_events(struct amd_iommu *iommu)
222{
223 u32 head, tail;
224 unsigned long flags;
225
226 spin_lock_irqsave(&iommu->lock, flags);
227
228 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
229 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
230
231 while (head != tail) {
a345b23b 232 iommu_print_event(iommu, iommu->evt_buf + head);
90008ee4
JR
233 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
234 }
235
236 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
237
238 spin_unlock_irqrestore(&iommu->lock, flags);
239}
240
a80dc3e0
JR
241irqreturn_t amd_iommu_int_handler(int irq, void *data)
242{
90008ee4
JR
243 struct amd_iommu *iommu;
244
3bd22172 245 for_each_iommu(iommu)
90008ee4
JR
246 iommu_poll_events(iommu);
247
248 return IRQ_HANDLED;
a80dc3e0
JR
249}
250
431b2a20
JR
251/****************************************************************************
252 *
253 * IOMMU command queuing functions
254 *
255 ****************************************************************************/
256
257/*
258 * Writes the command to the IOMMUs command buffer and informs the
259 * hardware about the new command. Must be called with iommu->lock held.
260 */
d6449536 261static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
a19ae1ec
JR
262{
263 u32 tail, head;
264 u8 *target;
265
266 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
8a7c5ef3 267 target = iommu->cmd_buf + tail;
a19ae1ec
JR
268 memcpy_toio(target, cmd, sizeof(*cmd));
269 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
270 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
271 if (tail == head)
272 return -ENOMEM;
273 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
274
275 return 0;
276}
277
431b2a20
JR
278/*
279 * General queuing function for commands. Takes iommu->lock and calls
280 * __iommu_queue_command().
281 */
d6449536 282static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
a19ae1ec
JR
283{
284 unsigned long flags;
285 int ret;
286
287 spin_lock_irqsave(&iommu->lock, flags);
288 ret = __iommu_queue_command(iommu, cmd);
09ee17eb 289 if (!ret)
0cfd7aa9 290 iommu->need_sync = true;
a19ae1ec
JR
291 spin_unlock_irqrestore(&iommu->lock, flags);
292
293 return ret;
294}
295
8d201968
JR
296/*
297 * This function waits until an IOMMU has completed a completion
298 * wait command
299 */
300static void __iommu_wait_for_completion(struct amd_iommu *iommu)
301{
302 int ready = 0;
303 unsigned status = 0;
304 unsigned long i = 0;
305
da49f6df
JR
306 INC_STATS_COUNTER(compl_wait);
307
8d201968
JR
308 while (!ready && (i < EXIT_LOOP_COUNT)) {
309 ++i;
310 /* wait for the bit to become one */
311 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
312 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
313 }
314
315 /* set bit back to zero */
316 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
317 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
318
6a1eddd2
JR
319 if (unlikely(i == EXIT_LOOP_COUNT)) {
320 spin_unlock(&iommu->lock);
321 reset_iommu_command_buffer(iommu);
322 spin_lock(&iommu->lock);
323 }
8d201968
JR
324}
325
326/*
327 * This function queues a completion wait command into the command
328 * buffer of an IOMMU
329 */
330static int __iommu_completion_wait(struct amd_iommu *iommu)
331{
332 struct iommu_cmd cmd;
333
334 memset(&cmd, 0, sizeof(cmd));
335 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
336 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
337
338 return __iommu_queue_command(iommu, &cmd);
339}
340
431b2a20
JR
341/*
342 * This function is called whenever we need to ensure that the IOMMU has
343 * completed execution of all commands we sent. It sends a
344 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
345 * us about that by writing a value to a physical address we pass with
346 * the command.
347 */
a19ae1ec
JR
348static int iommu_completion_wait(struct amd_iommu *iommu)
349{
8d201968
JR
350 int ret = 0;
351 unsigned long flags;
a19ae1ec 352
7e4f88da
JR
353 spin_lock_irqsave(&iommu->lock, flags);
354
09ee17eb
JR
355 if (!iommu->need_sync)
356 goto out;
357
8d201968 358 ret = __iommu_completion_wait(iommu);
09ee17eb 359
0cfd7aa9 360 iommu->need_sync = false;
a19ae1ec
JR
361
362 if (ret)
7e4f88da 363 goto out;
a19ae1ec 364
8d201968 365 __iommu_wait_for_completion(iommu);
84df8175 366
7e4f88da
JR
367out:
368 spin_unlock_irqrestore(&iommu->lock, flags);
a19ae1ec
JR
369
370 return 0;
371}
372
0518a3a4
JR
373static void iommu_flush_complete(struct protection_domain *domain)
374{
375 int i;
376
377 for (i = 0; i < amd_iommus_present; ++i) {
378 if (!domain->dev_iommu[i])
379 continue;
380
381 /*
382 * Devices of this domain are behind this IOMMU
383 * We need to wait for completion of all commands.
384 */
385 iommu_completion_wait(amd_iommus[i]);
386 }
387}
388
431b2a20
JR
389/*
390 * Command send function for invalidating a device table entry
391 */
a19ae1ec
JR
392static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
393{
d6449536 394 struct iommu_cmd cmd;
ee2fa743 395 int ret;
a19ae1ec
JR
396
397 BUG_ON(iommu == NULL);
398
399 memset(&cmd, 0, sizeof(cmd));
400 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
401 cmd.data[0] = devid;
402
ee2fa743
JR
403 ret = iommu_queue_command(iommu, &cmd);
404
ee2fa743 405 return ret;
a19ae1ec
JR
406}
407
237b6f33
JR
408static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
409 u16 domid, int pde, int s)
410{
411 memset(cmd, 0, sizeof(*cmd));
412 address &= PAGE_MASK;
413 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
414 cmd->data[1] |= domid;
415 cmd->data[2] = lower_32_bits(address);
416 cmd->data[3] = upper_32_bits(address);
417 if (s) /* size bit - we flush more than one 4kb page */
418 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
419 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
420 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
421}
422
431b2a20
JR
423/*
424 * Generic command send function for invalidaing TLB entries
425 */
a19ae1ec
JR
426static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
427 u64 address, u16 domid, int pde, int s)
428{
d6449536 429 struct iommu_cmd cmd;
ee2fa743 430 int ret;
a19ae1ec 431
237b6f33 432 __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
a19ae1ec 433
ee2fa743
JR
434 ret = iommu_queue_command(iommu, &cmd);
435
ee2fa743 436 return ret;
a19ae1ec
JR
437}
438
431b2a20
JR
439/*
440 * TLB invalidation function which is called from the mapping functions.
441 * It invalidates a single PTE if the range to flush is within a single
442 * page. Otherwise it flushes the whole TLB of the IOMMU.
443 */
6de8ad9b
JR
444static void __iommu_flush_pages(struct protection_domain *domain,
445 u64 address, size_t size, int pde)
a19ae1ec 446{
6de8ad9b 447 int s = 0, i;
dcd1e92e 448 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
a19ae1ec
JR
449
450 address &= PAGE_MASK;
451
999ba417
JR
452 if (pages > 1) {
453 /*
454 * If we have to flush more than one page, flush all
455 * TLB entries for this domain
456 */
457 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
458 s = 1;
a19ae1ec
JR
459 }
460
999ba417 461
6de8ad9b
JR
462 for (i = 0; i < amd_iommus_present; ++i) {
463 if (!domain->dev_iommu[i])
464 continue;
465
466 /*
467 * Devices of this domain are behind this IOMMU
468 * We need a TLB flush
469 */
470 iommu_queue_inv_iommu_pages(amd_iommus[i], address,
471 domain->id, pde, s);
472 }
473
474 return;
475}
476
477static void iommu_flush_pages(struct protection_domain *domain,
478 u64 address, size_t size)
479{
480 __iommu_flush_pages(domain, address, size, 0);
a19ae1ec 481}
b6c02715 482
1c655773 483/* Flush the whole IO/TLB for a given protection domain */
dcd1e92e 484static void iommu_flush_tlb(struct protection_domain *domain)
1c655773 485{
dcd1e92e 486 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1c655773
JR
487}
488
42a49f96 489/* Flush the whole IO/TLB for a given protection domain - including PDE */
dcd1e92e 490static void iommu_flush_tlb_pde(struct protection_domain *domain)
42a49f96 491{
dcd1e92e 492 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
42a49f96
CW
493}
494
43f49609 495/*
09b42804 496 * This function flushes all domains that have devices on the given IOMMU
43f49609 497 */
09b42804 498static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
43f49609 499{
09b42804
JR
500 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
501 struct protection_domain *domain;
e394d72a 502 unsigned long flags;
18811f55 503
09b42804 504 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
bfd1be18 505
09b42804
JR
506 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
507 if (domain->dev_iommu[iommu->index] == 0)
bfd1be18 508 continue;
09b42804
JR
509
510 spin_lock(&domain->lock);
511 iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
512 iommu_flush_complete(domain);
513 spin_unlock(&domain->lock);
bfd1be18 514 }
e394d72a 515
09b42804 516 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
e394d72a
JR
517}
518
09b42804
JR
519/*
520 * This function uses heavy locking and may disable irqs for some time. But
521 * this is no issue because it is only called during resume.
522 */
bfd1be18 523void amd_iommu_flush_all_domains(void)
e394d72a 524{
e3306664 525 struct protection_domain *domain;
09b42804
JR
526 unsigned long flags;
527
528 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
e394d72a 529
e3306664 530 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
09b42804 531 spin_lock(&domain->lock);
e3306664
JR
532 iommu_flush_tlb_pde(domain);
533 iommu_flush_complete(domain);
09b42804 534 spin_unlock(&domain->lock);
e3306664 535 }
09b42804
JR
536
537 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
bfd1be18
JR
538}
539
d586d785 540static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
bfd1be18
JR
541{
542 int i;
543
d586d785
JR
544 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
545 if (iommu != amd_iommu_rlookup_table[i])
bfd1be18 546 continue;
d586d785
JR
547
548 iommu_queue_inv_dev_entry(iommu, i);
549 iommu_completion_wait(iommu);
bfd1be18
JR
550 }
551}
552
6a0dbcbe 553static void flush_devices_by_domain(struct protection_domain *domain)
7d7a110c
JR
554{
555 struct amd_iommu *iommu;
556 int i;
557
558 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
6a0dbcbe
JR
559 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
560 (amd_iommu_pd_table[i] != domain))
7d7a110c
JR
561 continue;
562
563 iommu = amd_iommu_rlookup_table[i];
564 if (!iommu)
565 continue;
566
567 iommu_queue_inv_dev_entry(iommu, i);
568 iommu_completion_wait(iommu);
569 }
570}
571
a345b23b
JR
572static void reset_iommu_command_buffer(struct amd_iommu *iommu)
573{
574 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
575
b26e81b8
JR
576 if (iommu->reset_in_progress)
577 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
578
579 iommu->reset_in_progress = true;
580
a345b23b
JR
581 amd_iommu_reset_cmd_buffer(iommu);
582 flush_all_devices_for_iommu(iommu);
583 flush_all_domains_on_iommu(iommu);
b26e81b8
JR
584
585 iommu->reset_in_progress = false;
a345b23b
JR
586}
587
6a0dbcbe
JR
588void amd_iommu_flush_all_devices(void)
589{
590 flush_devices_by_domain(NULL);
591}
592
431b2a20
JR
593/****************************************************************************
594 *
595 * The functions below are used the create the page table mappings for
596 * unity mapped regions.
597 *
598 ****************************************************************************/
599
600/*
601 * Generic mapping functions. It maps a physical address into a DMA
602 * address space. It allocates the page table pages if necessary.
603 * In the future it can be extended to a generic mapping function
604 * supporting all features of AMD IOMMU page tables like level skipping
605 * and full 64 bit address spaces.
606 */
38e817fe
JR
607static int iommu_map_page(struct protection_domain *dom,
608 unsigned long bus_addr,
609 unsigned long phys_addr,
abdc5eb3
JR
610 int prot,
611 int map_size)
bd0e5211 612{
8bda3092 613 u64 __pte, *pte;
bd0e5211
JR
614
615 bus_addr = PAGE_ALIGN(bus_addr);
bb9d4ff8 616 phys_addr = PAGE_ALIGN(phys_addr);
bd0e5211 617
abdc5eb3
JR
618 BUG_ON(!PM_ALIGNED(map_size, bus_addr));
619 BUG_ON(!PM_ALIGNED(map_size, phys_addr));
620
bad1cac2 621 if (!(prot & IOMMU_PROT_MASK))
bd0e5211
JR
622 return -EINVAL;
623
abdc5eb3 624 pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
bd0e5211
JR
625
626 if (IOMMU_PTE_PRESENT(*pte))
627 return -EBUSY;
628
629 __pte = phys_addr | IOMMU_PTE_P;
630 if (prot & IOMMU_PROT_IR)
631 __pte |= IOMMU_PTE_IR;
632 if (prot & IOMMU_PROT_IW)
633 __pte |= IOMMU_PTE_IW;
634
635 *pte = __pte;
636
04bfdd84
JR
637 update_domain(dom);
638
bd0e5211
JR
639 return 0;
640}
641
eb74ff6c 642static void iommu_unmap_page(struct protection_domain *dom,
a6b256b4 643 unsigned long bus_addr, int map_size)
eb74ff6c 644{
a6b256b4 645 u64 *pte = fetch_pte(dom, bus_addr, map_size);
eb74ff6c 646
38a76eee
JR
647 if (pte)
648 *pte = 0;
eb74ff6c 649}
eb74ff6c 650
431b2a20
JR
651/*
652 * This function checks if a specific unity mapping entry is needed for
653 * this specific IOMMU.
654 */
bd0e5211
JR
655static int iommu_for_unity_map(struct amd_iommu *iommu,
656 struct unity_map_entry *entry)
657{
658 u16 bdf, i;
659
660 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
661 bdf = amd_iommu_alias_table[i];
662 if (amd_iommu_rlookup_table[bdf] == iommu)
663 return 1;
664 }
665
666 return 0;
667}
668
431b2a20
JR
669/*
670 * Init the unity mappings for a specific IOMMU in the system
671 *
672 * Basically iterates over all unity mapping entries and applies them to
673 * the default domain DMA of that IOMMU if necessary.
674 */
bd0e5211
JR
675static int iommu_init_unity_mappings(struct amd_iommu *iommu)
676{
677 struct unity_map_entry *entry;
678 int ret;
679
680 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
681 if (!iommu_for_unity_map(iommu, entry))
682 continue;
683 ret = dma_ops_unity_map(iommu->default_dom, entry);
684 if (ret)
685 return ret;
686 }
687
688 return 0;
689}
690
431b2a20
JR
691/*
692 * This function actually applies the mapping to the page table of the
693 * dma_ops domain.
694 */
bd0e5211
JR
695static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
696 struct unity_map_entry *e)
697{
698 u64 addr;
699 int ret;
700
701 for (addr = e->address_start; addr < e->address_end;
702 addr += PAGE_SIZE) {
abdc5eb3
JR
703 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
704 PM_MAP_4k);
bd0e5211
JR
705 if (ret)
706 return ret;
707 /*
708 * if unity mapping is in aperture range mark the page
709 * as allocated in the aperture
710 */
711 if (addr < dma_dom->aperture_size)
c3239567 712 __set_bit(addr >> PAGE_SHIFT,
384de729 713 dma_dom->aperture[0]->bitmap);
bd0e5211
JR
714 }
715
716 return 0;
717}
718
431b2a20
JR
719/*
720 * Inits the unity mappings required for a specific device
721 */
bd0e5211
JR
722static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
723 u16 devid)
724{
725 struct unity_map_entry *e;
726 int ret;
727
728 list_for_each_entry(e, &amd_iommu_unity_map, list) {
729 if (!(devid >= e->devid_start && devid <= e->devid_end))
730 continue;
731 ret = dma_ops_unity_map(dma_dom, e);
732 if (ret)
733 return ret;
734 }
735
736 return 0;
737}
738
431b2a20
JR
739/****************************************************************************
740 *
741 * The next functions belong to the address allocator for the dma_ops
742 * interface functions. They work like the allocators in the other IOMMU
743 * drivers. Its basically a bitmap which marks the allocated pages in
744 * the aperture. Maybe it could be enhanced in the future to a more
745 * efficient allocator.
746 *
747 ****************************************************************************/
d3086444 748
431b2a20 749/*
384de729 750 * The address allocator core functions.
431b2a20
JR
751 *
752 * called with domain->lock held
753 */
384de729 754
00cd122a
JR
755/*
756 * This function checks if there is a PTE for a given dma address. If
757 * there is one, it returns the pointer to it.
758 */
9355a081 759static u64 *fetch_pte(struct protection_domain *domain,
a6b256b4 760 unsigned long address, int map_size)
00cd122a 761{
9355a081 762 int level;
00cd122a
JR
763 u64 *pte;
764
9355a081
JR
765 level = domain->mode - 1;
766 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
00cd122a 767
a6b256b4 768 while (level > map_size) {
9355a081
JR
769 if (!IOMMU_PTE_PRESENT(*pte))
770 return NULL;
00cd122a 771
9355a081 772 level -= 1;
00cd122a 773
9355a081
JR
774 pte = IOMMU_PTE_PAGE(*pte);
775 pte = &pte[PM_LEVEL_INDEX(level, address)];
00cd122a 776
a6b256b4
JR
777 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
778 pte = NULL;
779 break;
780 }
9355a081 781 }
00cd122a
JR
782
783 return pte;
784}
785
9cabe89b
JR
786/*
787 * This function is used to add a new aperture range to an existing
788 * aperture in case of dma_ops domain allocation or address allocation
789 * failure.
790 */
00cd122a
JR
791static int alloc_new_range(struct amd_iommu *iommu,
792 struct dma_ops_domain *dma_dom,
9cabe89b
JR
793 bool populate, gfp_t gfp)
794{
795 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
00cd122a 796 int i;
9cabe89b 797
f5e9705c
JR
798#ifdef CONFIG_IOMMU_STRESS
799 populate = false;
800#endif
801
9cabe89b
JR
802 if (index >= APERTURE_MAX_RANGES)
803 return -ENOMEM;
804
805 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
806 if (!dma_dom->aperture[index])
807 return -ENOMEM;
808
809 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
810 if (!dma_dom->aperture[index]->bitmap)
811 goto out_free;
812
813 dma_dom->aperture[index]->offset = dma_dom->aperture_size;
814
815 if (populate) {
816 unsigned long address = dma_dom->aperture_size;
817 int i, num_ptes = APERTURE_RANGE_PAGES / 512;
818 u64 *pte, *pte_page;
819
820 for (i = 0; i < num_ptes; ++i) {
abdc5eb3 821 pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
9cabe89b
JR
822 &pte_page, gfp);
823 if (!pte)
824 goto out_free;
825
826 dma_dom->aperture[index]->pte_pages[i] = pte_page;
827
828 address += APERTURE_RANGE_SIZE / 64;
829 }
830 }
831
832 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
833
00cd122a
JR
834 /* Intialize the exclusion range if necessary */
835 if (iommu->exclusion_start &&
836 iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
837 iommu->exclusion_start < dma_dom->aperture_size) {
838 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
839 int pages = iommu_num_pages(iommu->exclusion_start,
840 iommu->exclusion_length,
841 PAGE_SIZE);
842 dma_ops_reserve_addresses(dma_dom, startpage, pages);
843 }
844
845 /*
846 * Check for areas already mapped as present in the new aperture
847 * range and mark those pages as reserved in the allocator. Such
848 * mappings may already exist as a result of requested unity
849 * mappings for devices.
850 */
851 for (i = dma_dom->aperture[index]->offset;
852 i < dma_dom->aperture_size;
853 i += PAGE_SIZE) {
a6b256b4 854 u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
00cd122a
JR
855 if (!pte || !IOMMU_PTE_PRESENT(*pte))
856 continue;
857
858 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
859 }
860
04bfdd84
JR
861 update_domain(&dma_dom->domain);
862
9cabe89b
JR
863 return 0;
864
865out_free:
04bfdd84
JR
866 update_domain(&dma_dom->domain);
867
9cabe89b
JR
868 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
869
870 kfree(dma_dom->aperture[index]);
871 dma_dom->aperture[index] = NULL;
872
873 return -ENOMEM;
874}
875
384de729
JR
876static unsigned long dma_ops_area_alloc(struct device *dev,
877 struct dma_ops_domain *dom,
878 unsigned int pages,
879 unsigned long align_mask,
880 u64 dma_mask,
881 unsigned long start)
882{
803b8cb4 883 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
384de729
JR
884 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
885 int i = start >> APERTURE_RANGE_SHIFT;
886 unsigned long boundary_size;
887 unsigned long address = -1;
888 unsigned long limit;
889
803b8cb4
JR
890 next_bit >>= PAGE_SHIFT;
891
384de729
JR
892 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
893 PAGE_SIZE) >> PAGE_SHIFT;
894
895 for (;i < max_index; ++i) {
896 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
897
898 if (dom->aperture[i]->offset >= dma_mask)
899 break;
900
901 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
902 dma_mask >> PAGE_SHIFT);
903
904 address = iommu_area_alloc(dom->aperture[i]->bitmap,
905 limit, next_bit, pages, 0,
906 boundary_size, align_mask);
907 if (address != -1) {
908 address = dom->aperture[i]->offset +
909 (address << PAGE_SHIFT);
803b8cb4 910 dom->next_address = address + (pages << PAGE_SHIFT);
384de729
JR
911 break;
912 }
913
914 next_bit = 0;
915 }
916
917 return address;
918}
919
d3086444
JR
920static unsigned long dma_ops_alloc_addresses(struct device *dev,
921 struct dma_ops_domain *dom,
6d4f343f 922 unsigned int pages,
832a90c3
JR
923 unsigned long align_mask,
924 u64 dma_mask)
d3086444 925{
d3086444 926 unsigned long address;
d3086444 927
fe16f088
JR
928#ifdef CONFIG_IOMMU_STRESS
929 dom->next_address = 0;
930 dom->need_flush = true;
931#endif
d3086444 932
384de729 933 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
803b8cb4 934 dma_mask, dom->next_address);
d3086444 935
1c655773 936 if (address == -1) {
803b8cb4 937 dom->next_address = 0;
384de729
JR
938 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
939 dma_mask, 0);
1c655773
JR
940 dom->need_flush = true;
941 }
d3086444 942
384de729 943 if (unlikely(address == -1))
8fd524b3 944 address = DMA_ERROR_CODE;
d3086444
JR
945
946 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
947
948 return address;
949}
950
431b2a20
JR
951/*
952 * The address free function.
953 *
954 * called with domain->lock held
955 */
d3086444
JR
956static void dma_ops_free_addresses(struct dma_ops_domain *dom,
957 unsigned long address,
958 unsigned int pages)
959{
384de729
JR
960 unsigned i = address >> APERTURE_RANGE_SHIFT;
961 struct aperture_range *range = dom->aperture[i];
80be308d 962
384de729
JR
963 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
964
47bccd6b
JR
965#ifdef CONFIG_IOMMU_STRESS
966 if (i < 4)
967 return;
968#endif
80be308d 969
803b8cb4 970 if (address >= dom->next_address)
80be308d 971 dom->need_flush = true;
384de729
JR
972
973 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
803b8cb4 974
384de729
JR
975 iommu_area_free(range->bitmap, address, pages);
976
d3086444
JR
977}
978
431b2a20
JR
979/****************************************************************************
980 *
981 * The next functions belong to the domain allocation. A domain is
982 * allocated for every IOMMU as the default domain. If device isolation
983 * is enabled, every device get its own domain. The most important thing
984 * about domains is the page table mapping the DMA address space they
985 * contain.
986 *
987 ****************************************************************************/
988
aeb26f55
JR
989/*
990 * This function adds a protection domain to the global protection domain list
991 */
992static void add_domain_to_list(struct protection_domain *domain)
993{
994 unsigned long flags;
995
996 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
997 list_add(&domain->list, &amd_iommu_pd_list);
998 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
999}
1000
1001/*
1002 * This function removes a protection domain to the global
1003 * protection domain list
1004 */
1005static void del_domain_from_list(struct protection_domain *domain)
1006{
1007 unsigned long flags;
1008
1009 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1010 list_del(&domain->list);
1011 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1012}
1013
ec487d1a
JR
1014static u16 domain_id_alloc(void)
1015{
1016 unsigned long flags;
1017 int id;
1018
1019 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1020 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1021 BUG_ON(id == 0);
1022 if (id > 0 && id < MAX_DOMAIN_ID)
1023 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1024 else
1025 id = 0;
1026 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1027
1028 return id;
1029}
1030
a2acfb75
JR
1031static void domain_id_free(int id)
1032{
1033 unsigned long flags;
1034
1035 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1036 if (id > 0 && id < MAX_DOMAIN_ID)
1037 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1038 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1039}
a2acfb75 1040
431b2a20
JR
1041/*
1042 * Used to reserve address ranges in the aperture (e.g. for exclusion
1043 * ranges.
1044 */
ec487d1a
JR
1045static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1046 unsigned long start_page,
1047 unsigned int pages)
1048{
384de729 1049 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
ec487d1a
JR
1050
1051 if (start_page + pages > last_page)
1052 pages = last_page - start_page;
1053
384de729
JR
1054 for (i = start_page; i < start_page + pages; ++i) {
1055 int index = i / APERTURE_RANGE_PAGES;
1056 int page = i % APERTURE_RANGE_PAGES;
1057 __set_bit(page, dom->aperture[index]->bitmap);
1058 }
ec487d1a
JR
1059}
1060
86db2e5d 1061static void free_pagetable(struct protection_domain *domain)
ec487d1a
JR
1062{
1063 int i, j;
1064 u64 *p1, *p2, *p3;
1065
86db2e5d 1066 p1 = domain->pt_root;
ec487d1a
JR
1067
1068 if (!p1)
1069 return;
1070
1071 for (i = 0; i < 512; ++i) {
1072 if (!IOMMU_PTE_PRESENT(p1[i]))
1073 continue;
1074
1075 p2 = IOMMU_PTE_PAGE(p1[i]);
3cc3d84b 1076 for (j = 0; j < 512; ++j) {
ec487d1a
JR
1077 if (!IOMMU_PTE_PRESENT(p2[j]))
1078 continue;
1079 p3 = IOMMU_PTE_PAGE(p2[j]);
1080 free_page((unsigned long)p3);
1081 }
1082
1083 free_page((unsigned long)p2);
1084 }
1085
1086 free_page((unsigned long)p1);
86db2e5d
JR
1087
1088 domain->pt_root = NULL;
ec487d1a
JR
1089}
1090
431b2a20
JR
1091/*
1092 * Free a domain, only used if something went wrong in the
1093 * allocation path and we need to free an already allocated page table
1094 */
ec487d1a
JR
1095static void dma_ops_domain_free(struct dma_ops_domain *dom)
1096{
384de729
JR
1097 int i;
1098
ec487d1a
JR
1099 if (!dom)
1100 return;
1101
aeb26f55
JR
1102 del_domain_from_list(&dom->domain);
1103
86db2e5d 1104 free_pagetable(&dom->domain);
ec487d1a 1105
384de729
JR
1106 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1107 if (!dom->aperture[i])
1108 continue;
1109 free_page((unsigned long)dom->aperture[i]->bitmap);
1110 kfree(dom->aperture[i]);
1111 }
ec487d1a
JR
1112
1113 kfree(dom);
1114}
1115
431b2a20
JR
1116/*
1117 * Allocates a new protection domain usable for the dma_ops functions.
1118 * It also intializes the page table and the address allocator data
1119 * structures required for the dma_ops interface
1120 */
d9cfed92 1121static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
ec487d1a
JR
1122{
1123 struct dma_ops_domain *dma_dom;
ec487d1a
JR
1124
1125 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1126 if (!dma_dom)
1127 return NULL;
1128
1129 spin_lock_init(&dma_dom->domain.lock);
1130
1131 dma_dom->domain.id = domain_id_alloc();
1132 if (dma_dom->domain.id == 0)
1133 goto free_dma_dom;
8f7a017c 1134 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
ec487d1a 1135 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
9fdb19d6 1136 dma_dom->domain.flags = PD_DMA_OPS_MASK;
ec487d1a
JR
1137 dma_dom->domain.priv = dma_dom;
1138 if (!dma_dom->domain.pt_root)
1139 goto free_dma_dom;
ec487d1a 1140
1c655773 1141 dma_dom->need_flush = false;
bd60b735 1142 dma_dom->target_dev = 0xffff;
1c655773 1143
aeb26f55
JR
1144 add_domain_to_list(&dma_dom->domain);
1145
00cd122a 1146 if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
ec487d1a 1147 goto free_dma_dom;
ec487d1a 1148
431b2a20 1149 /*
ec487d1a
JR
1150 * mark the first page as allocated so we never return 0 as
1151 * a valid dma-address. So we can use 0 as error value
431b2a20 1152 */
384de729 1153 dma_dom->aperture[0]->bitmap[0] = 1;
803b8cb4 1154 dma_dom->next_address = 0;
ec487d1a 1155
ec487d1a
JR
1156
1157 return dma_dom;
1158
1159free_dma_dom:
1160 dma_ops_domain_free(dma_dom);
1161
1162 return NULL;
1163}
1164
5b28df6f
JR
1165/*
1166 * little helper function to check whether a given protection domain is a
1167 * dma_ops domain
1168 */
1169static bool dma_ops_domain(struct protection_domain *domain)
1170{
1171 return domain->flags & PD_DMA_OPS_MASK;
1172}
1173
431b2a20
JR
1174/*
1175 * Find out the protection domain structure for a given PCI device. This
1176 * will give us the pointer to the page table root for example.
1177 */
b20ac0d4
JR
1178static struct protection_domain *domain_for_device(u16 devid)
1179{
1180 struct protection_domain *dom;
1181 unsigned long flags;
1182
1183 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1184 dom = amd_iommu_pd_table[devid];
1185 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1186
1187 return dom;
1188}
1189
407d733e 1190static void set_dte_entry(u16 devid, struct protection_domain *domain)
b20ac0d4 1191{
b20ac0d4 1192 u64 pte_root = virt_to_phys(domain->pt_root);
863c74eb 1193
38ddf41b
JR
1194 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1195 << DEV_ENTRY_MODE_SHIFT;
1196 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
b20ac0d4 1197
b20ac0d4 1198 amd_iommu_dev_table[devid].data[2] = domain->id;
aa879fff
JR
1199 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1200 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
b20ac0d4
JR
1201
1202 amd_iommu_pd_table[devid] = domain;
2b681faf
JR
1203}
1204
1205/*
1206 * If a device is not yet associated with a domain, this function does
1207 * assigns it visible for the hardware
1208 */
1209static void __attach_device(struct amd_iommu *iommu,
1210 struct protection_domain *domain,
1211 u16 devid)
1212{
1213 /* lock domain */
1214 spin_lock(&domain->lock);
1215
1216 /* update DTE entry */
1217 set_dte_entry(devid, domain);
eba6ac60 1218
c4596114
JR
1219 /* Do reference counting */
1220 domain->dev_iommu[iommu->index] += 1;
1221 domain->dev_cnt += 1;
eba6ac60
JR
1222
1223 /* ready */
1224 spin_unlock(&domain->lock);
0feae533 1225}
b20ac0d4 1226
407d733e
JR
1227/*
1228 * If a device is not yet associated with a domain, this function does
1229 * assigns it visible for the hardware
1230 */
0feae533
JR
1231static void attach_device(struct amd_iommu *iommu,
1232 struct protection_domain *domain,
1233 u16 devid)
1234{
eba6ac60
JR
1235 unsigned long flags;
1236
1237 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
0feae533 1238 __attach_device(iommu, domain, devid);
b20ac0d4
JR
1239 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1240
0feae533
JR
1241 /*
1242 * We might boot into a crash-kernel here. The crashed kernel
1243 * left the caches in the IOMMU dirty. So we have to flush
1244 * here to evict all dirty stuff.
1245 */
b20ac0d4 1246 iommu_queue_inv_dev_entry(iommu, devid);
dcd1e92e 1247 iommu_flush_tlb_pde(domain);
b20ac0d4
JR
1248}
1249
355bf553
JR
1250/*
1251 * Removes a device from a protection domain (unlocked)
1252 */
1253static void __detach_device(struct protection_domain *domain, u16 devid)
1254{
c4596114
JR
1255 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1256
1257 BUG_ON(!iommu);
355bf553
JR
1258
1259 /* lock domain */
1260 spin_lock(&domain->lock);
1261
1262 /* remove domain from the lookup table */
1263 amd_iommu_pd_table[devid] = NULL;
1264
1265 /* remove entry from the device table seen by the hardware */
1266 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1267 amd_iommu_dev_table[devid].data[1] = 0;
1268 amd_iommu_dev_table[devid].data[2] = 0;
1269
c5cca146
JR
1270 amd_iommu_apply_erratum_63(devid);
1271
c4596114
JR
1272 /* decrease reference counters */
1273 domain->dev_iommu[iommu->index] -= 1;
1274 domain->dev_cnt -= 1;
355bf553
JR
1275
1276 /* ready */
1277 spin_unlock(&domain->lock);
21129f78
JR
1278
1279 /*
1280 * If we run in passthrough mode the device must be assigned to the
1281 * passthrough domain if it is detached from any other domain
1282 */
1283 if (iommu_pass_through) {
1284 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1285 __attach_device(iommu, pt_domain, devid);
1286 }
355bf553
JR
1287}
1288
1289/*
1290 * Removes a device from a protection domain (with devtable_lock held)
1291 */
1292static void detach_device(struct protection_domain *domain, u16 devid)
1293{
1294 unsigned long flags;
1295
1296 /* lock device table */
1297 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1298 __detach_device(domain, devid);
1299 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1300}
e275a2a0
JR
1301
1302static int device_change_notifier(struct notifier_block *nb,
1303 unsigned long action, void *data)
1304{
1305 struct device *dev = data;
1306 struct pci_dev *pdev = to_pci_dev(dev);
1307 u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
1308 struct protection_domain *domain;
1309 struct dma_ops_domain *dma_domain;
1310 struct amd_iommu *iommu;
1ac4cbbc 1311 unsigned long flags;
e275a2a0
JR
1312
1313 if (devid > amd_iommu_last_bdf)
1314 goto out;
1315
1316 devid = amd_iommu_alias_table[devid];
1317
1318 iommu = amd_iommu_rlookup_table[devid];
1319 if (iommu == NULL)
1320 goto out;
1321
1322 domain = domain_for_device(devid);
1323
1324 if (domain && !dma_ops_domain(domain))
1325 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
1326 "to a non-dma-ops domain\n", dev_name(dev));
1327
1328 switch (action) {
c1eee67b 1329 case BUS_NOTIFY_UNBOUND_DRIVER:
e275a2a0
JR
1330 if (!domain)
1331 goto out;
a1ca331c
JR
1332 if (iommu_pass_through)
1333 break;
e275a2a0 1334 detach_device(domain, devid);
1ac4cbbc
JR
1335 break;
1336 case BUS_NOTIFY_ADD_DEVICE:
1337 /* allocate a protection domain if a device is added */
1338 dma_domain = find_protection_domain(devid);
1339 if (dma_domain)
1340 goto out;
d9cfed92 1341 dma_domain = dma_ops_domain_alloc(iommu);
1ac4cbbc
JR
1342 if (!dma_domain)
1343 goto out;
1344 dma_domain->target_dev = devid;
1345
1346 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1347 list_add_tail(&dma_domain->list, &iommu_pd_list);
1348 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1349
e275a2a0
JR
1350 break;
1351 default:
1352 goto out;
1353 }
1354
1355 iommu_queue_inv_dev_entry(iommu, devid);
1356 iommu_completion_wait(iommu);
1357
1358out:
1359 return 0;
1360}
1361
b25ae679 1362static struct notifier_block device_nb = {
e275a2a0
JR
1363 .notifier_call = device_change_notifier,
1364};
355bf553 1365
431b2a20
JR
1366/*****************************************************************************
1367 *
1368 * The next functions belong to the dma_ops mapping/unmapping code.
1369 *
1370 *****************************************************************************/
1371
dbcc112e
JR
1372/*
1373 * This function checks if the driver got a valid device from the caller to
1374 * avoid dereferencing invalid pointers.
1375 */
1376static bool check_device(struct device *dev)
1377{
420aef8a
JR
1378 u16 bdf;
1379 struct pci_dev *pcidev;
1380
dbcc112e
JR
1381 if (!dev || !dev->dma_mask)
1382 return false;
1383
420aef8a
JR
1384 /* No device or no PCI device */
1385 if (!dev || dev->bus != &pci_bus_type)
1386 return false;
1387
1388 pcidev = to_pci_dev(dev);
1389
1390 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1391
1392 /* Out of our scope? */
1393 if (bdf > amd_iommu_last_bdf)
1394 return false;
1395
1396 if (amd_iommu_rlookup_table[bdf] == NULL)
1397 return false;
1398
dbcc112e
JR
1399 return true;
1400}
1401
bd60b735
JR
1402/*
1403 * In this function the list of preallocated protection domains is traversed to
1404 * find the domain for a specific device
1405 */
1406static struct dma_ops_domain *find_protection_domain(u16 devid)
1407{
1408 struct dma_ops_domain *entry, *ret = NULL;
1409 unsigned long flags;
1410
1411 if (list_empty(&iommu_pd_list))
1412 return NULL;
1413
1414 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1415
1416 list_for_each_entry(entry, &iommu_pd_list, list) {
1417 if (entry->target_dev == devid) {
1418 ret = entry;
bd60b735
JR
1419 break;
1420 }
1421 }
1422
1423 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1424
1425 return ret;
1426}
1427
431b2a20
JR
1428/*
1429 * In the dma_ops path we only have the struct device. This function
1430 * finds the corresponding IOMMU, the protection domain and the
1431 * requestor id for a given device.
1432 * If the device is not yet associated with a domain this is also done
1433 * in this function.
1434 */
f99c0f1c
JR
1435static bool get_device_resources(struct device *dev,
1436 struct amd_iommu **iommu,
1437 struct protection_domain **domain,
1438 u16 *bdf)
b20ac0d4
JR
1439{
1440 struct dma_ops_domain *dma_dom;
1441 struct pci_dev *pcidev;
1442 u16 _bdf;
1443
f99c0f1c
JR
1444 if (!check_device(dev))
1445 return false;
b20ac0d4 1446
f99c0f1c
JR
1447 pcidev = to_pci_dev(dev);
1448 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1449 *bdf = amd_iommu_alias_table[_bdf];
1450 *iommu = amd_iommu_rlookup_table[*bdf];
b20ac0d4 1451 *domain = domain_for_device(*bdf);
f99c0f1c 1452
b20ac0d4 1453 if (*domain == NULL) {
bd60b735
JR
1454 dma_dom = find_protection_domain(*bdf);
1455 if (!dma_dom)
1456 dma_dom = (*iommu)->default_dom;
b20ac0d4 1457 *domain = &dma_dom->domain;
f1179dc0 1458 attach_device(*iommu, *domain, *bdf);
e9a22a13
JR
1459 DUMP_printk("Using protection domain %d for device %s\n",
1460 (*domain)->id, dev_name(dev));
b20ac0d4
JR
1461 }
1462
f91ba190 1463 if (domain_for_device(_bdf) == NULL)
f1179dc0 1464 attach_device(*iommu, *domain, _bdf);
f91ba190 1465
f99c0f1c 1466 return true;
b20ac0d4
JR
1467}
1468
04bfdd84
JR
1469static void update_device_table(struct protection_domain *domain)
1470{
2b681faf 1471 unsigned long flags;
04bfdd84
JR
1472 int i;
1473
1474 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
1475 if (amd_iommu_pd_table[i] != domain)
1476 continue;
2b681faf 1477 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
04bfdd84 1478 set_dte_entry(i, domain);
2b681faf 1479 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
04bfdd84
JR
1480 }
1481}
1482
1483static void update_domain(struct protection_domain *domain)
1484{
1485 if (!domain->updated)
1486 return;
1487
1488 update_device_table(domain);
1489 flush_devices_by_domain(domain);
601367d7 1490 iommu_flush_tlb_pde(domain);
04bfdd84
JR
1491
1492 domain->updated = false;
1493}
1494
8bda3092 1495/*
50020fb6
JR
1496 * This function is used to add another level to an IO page table. Adding
1497 * another level increases the size of the address space by 9 bits to a size up
1498 * to 64 bits.
8bda3092 1499 */
50020fb6
JR
1500static bool increase_address_space(struct protection_domain *domain,
1501 gfp_t gfp)
1502{
1503 u64 *pte;
1504
1505 if (domain->mode == PAGE_MODE_6_LEVEL)
1506 /* address space already 64 bit large */
1507 return false;
1508
1509 pte = (void *)get_zeroed_page(gfp);
1510 if (!pte)
1511 return false;
1512
1513 *pte = PM_LEVEL_PDE(domain->mode,
1514 virt_to_phys(domain->pt_root));
1515 domain->pt_root = pte;
1516 domain->mode += 1;
1517 domain->updated = true;
1518
1519 return true;
1520}
1521
8bc3e127 1522static u64 *alloc_pte(struct protection_domain *domain,
abdc5eb3
JR
1523 unsigned long address,
1524 int end_lvl,
1525 u64 **pte_page,
1526 gfp_t gfp)
8bda3092
JR
1527{
1528 u64 *pte, *page;
8bc3e127 1529 int level;
8bda3092 1530
8bc3e127
JR
1531 while (address > PM_LEVEL_SIZE(domain->mode))
1532 increase_address_space(domain, gfp);
8bda3092 1533
8bc3e127
JR
1534 level = domain->mode - 1;
1535 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
8bda3092 1536
abdc5eb3 1537 while (level > end_lvl) {
8bc3e127
JR
1538 if (!IOMMU_PTE_PRESENT(*pte)) {
1539 page = (u64 *)get_zeroed_page(gfp);
1540 if (!page)
1541 return NULL;
1542 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1543 }
8bda3092 1544
8bc3e127 1545 level -= 1;
8bda3092 1546
8bc3e127 1547 pte = IOMMU_PTE_PAGE(*pte);
8bda3092 1548
abdc5eb3 1549 if (pte_page && level == end_lvl)
8bc3e127 1550 *pte_page = pte;
8bda3092 1551
8bc3e127
JR
1552 pte = &pte[PM_LEVEL_INDEX(level, address)];
1553 }
8bda3092
JR
1554
1555 return pte;
1556}
1557
1558/*
1559 * This function fetches the PTE for a given address in the aperture
1560 */
1561static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1562 unsigned long address)
1563{
384de729 1564 struct aperture_range *aperture;
8bda3092
JR
1565 u64 *pte, *pte_page;
1566
384de729
JR
1567 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1568 if (!aperture)
1569 return NULL;
1570
1571 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
8bda3092 1572 if (!pte) {
abdc5eb3
JR
1573 pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
1574 GFP_ATOMIC);
384de729
JR
1575 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1576 } else
8c8c143c 1577 pte += PM_LEVEL_INDEX(0, address);
8bda3092 1578
04bfdd84 1579 update_domain(&dom->domain);
8bda3092
JR
1580
1581 return pte;
1582}
1583
431b2a20
JR
1584/*
1585 * This is the generic map function. It maps one 4kb page at paddr to
1586 * the given address in the DMA address space for the domain.
1587 */
680525e0 1588static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
cb76c322
JR
1589 unsigned long address,
1590 phys_addr_t paddr,
1591 int direction)
1592{
1593 u64 *pte, __pte;
1594
1595 WARN_ON(address > dom->aperture_size);
1596
1597 paddr &= PAGE_MASK;
1598
8bda3092 1599 pte = dma_ops_get_pte(dom, address);
53812c11 1600 if (!pte)
8fd524b3 1601 return DMA_ERROR_CODE;
cb76c322
JR
1602
1603 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1604
1605 if (direction == DMA_TO_DEVICE)
1606 __pte |= IOMMU_PTE_IR;
1607 else if (direction == DMA_FROM_DEVICE)
1608 __pte |= IOMMU_PTE_IW;
1609 else if (direction == DMA_BIDIRECTIONAL)
1610 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
1611
1612 WARN_ON(*pte);
1613
1614 *pte = __pte;
1615
1616 return (dma_addr_t)address;
1617}
1618
431b2a20
JR
1619/*
1620 * The generic unmapping function for on page in the DMA address space.
1621 */
680525e0 1622static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
cb76c322
JR
1623 unsigned long address)
1624{
384de729 1625 struct aperture_range *aperture;
cb76c322
JR
1626 u64 *pte;
1627
1628 if (address >= dom->aperture_size)
1629 return;
1630
384de729
JR
1631 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1632 if (!aperture)
1633 return;
1634
1635 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1636 if (!pte)
1637 return;
cb76c322 1638
8c8c143c 1639 pte += PM_LEVEL_INDEX(0, address);
cb76c322
JR
1640
1641 WARN_ON(!*pte);
1642
1643 *pte = 0ULL;
1644}
1645
431b2a20
JR
1646/*
1647 * This function contains common code for mapping of a physically
24f81160
JR
1648 * contiguous memory region into DMA address space. It is used by all
1649 * mapping functions provided with this IOMMU driver.
431b2a20
JR
1650 * Must be called with the domain lock held.
1651 */
cb76c322
JR
1652static dma_addr_t __map_single(struct device *dev,
1653 struct amd_iommu *iommu,
1654 struct dma_ops_domain *dma_dom,
1655 phys_addr_t paddr,
1656 size_t size,
6d4f343f 1657 int dir,
832a90c3
JR
1658 bool align,
1659 u64 dma_mask)
cb76c322
JR
1660{
1661 dma_addr_t offset = paddr & ~PAGE_MASK;
53812c11 1662 dma_addr_t address, start, ret;
cb76c322 1663 unsigned int pages;
6d4f343f 1664 unsigned long align_mask = 0;
cb76c322
JR
1665 int i;
1666
e3c449f5 1667 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
cb76c322
JR
1668 paddr &= PAGE_MASK;
1669
8ecaf8f1
JR
1670 INC_STATS_COUNTER(total_map_requests);
1671
c1858976
JR
1672 if (pages > 1)
1673 INC_STATS_COUNTER(cross_page);
1674
6d4f343f
JR
1675 if (align)
1676 align_mask = (1UL << get_order(size)) - 1;
1677
11b83888 1678retry:
832a90c3
JR
1679 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
1680 dma_mask);
8fd524b3 1681 if (unlikely(address == DMA_ERROR_CODE)) {
11b83888
JR
1682 /*
1683 * setting next_address here will let the address
1684 * allocator only scan the new allocated range in the
1685 * first run. This is a small optimization.
1686 */
1687 dma_dom->next_address = dma_dom->aperture_size;
1688
1689 if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
1690 goto out;
1691
1692 /*
1693 * aperture was sucessfully enlarged by 128 MB, try
1694 * allocation again
1695 */
1696 goto retry;
1697 }
cb76c322
JR
1698
1699 start = address;
1700 for (i = 0; i < pages; ++i) {
680525e0 1701 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
8fd524b3 1702 if (ret == DMA_ERROR_CODE)
53812c11
JR
1703 goto out_unmap;
1704
cb76c322
JR
1705 paddr += PAGE_SIZE;
1706 start += PAGE_SIZE;
1707 }
1708 address += offset;
1709
5774f7c5
JR
1710 ADD_STATS_COUNTER(alloced_io_mem, size);
1711
afa9fdc2 1712 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
dcd1e92e 1713 iommu_flush_tlb(&dma_dom->domain);
1c655773 1714 dma_dom->need_flush = false;
318afd41 1715 } else if (unlikely(amd_iommu_np_cache))
6de8ad9b 1716 iommu_flush_pages(&dma_dom->domain, address, size);
270cab24 1717
cb76c322
JR
1718out:
1719 return address;
53812c11
JR
1720
1721out_unmap:
1722
1723 for (--i; i >= 0; --i) {
1724 start -= PAGE_SIZE;
680525e0 1725 dma_ops_domain_unmap(dma_dom, start);
53812c11
JR
1726 }
1727
1728 dma_ops_free_addresses(dma_dom, address, pages);
1729
8fd524b3 1730 return DMA_ERROR_CODE;
cb76c322
JR
1731}
1732
431b2a20
JR
1733/*
1734 * Does the reverse of the __map_single function. Must be called with
1735 * the domain lock held too
1736 */
cb76c322
JR
1737static void __unmap_single(struct amd_iommu *iommu,
1738 struct dma_ops_domain *dma_dom,
1739 dma_addr_t dma_addr,
1740 size_t size,
1741 int dir)
1742{
1743 dma_addr_t i, start;
1744 unsigned int pages;
1745
8fd524b3 1746 if ((dma_addr == DMA_ERROR_CODE) ||
b8d9905d 1747 (dma_addr + size > dma_dom->aperture_size))
cb76c322
JR
1748 return;
1749
e3c449f5 1750 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
cb76c322
JR
1751 dma_addr &= PAGE_MASK;
1752 start = dma_addr;
1753
1754 for (i = 0; i < pages; ++i) {
680525e0 1755 dma_ops_domain_unmap(dma_dom, start);
cb76c322
JR
1756 start += PAGE_SIZE;
1757 }
1758
5774f7c5
JR
1759 SUB_STATS_COUNTER(alloced_io_mem, size);
1760
cb76c322 1761 dma_ops_free_addresses(dma_dom, dma_addr, pages);
270cab24 1762
80be308d 1763 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
6de8ad9b 1764 iommu_flush_pages(&dma_dom->domain, dma_addr, size);
80be308d
JR
1765 dma_dom->need_flush = false;
1766 }
cb76c322
JR
1767}
1768
431b2a20
JR
1769/*
1770 * The exported map_single function for dma_ops.
1771 */
51491367
FT
1772static dma_addr_t map_page(struct device *dev, struct page *page,
1773 unsigned long offset, size_t size,
1774 enum dma_data_direction dir,
1775 struct dma_attrs *attrs)
4da70b9e
JR
1776{
1777 unsigned long flags;
1778 struct amd_iommu *iommu;
1779 struct protection_domain *domain;
1780 u16 devid;
1781 dma_addr_t addr;
832a90c3 1782 u64 dma_mask;
51491367 1783 phys_addr_t paddr = page_to_phys(page) + offset;
4da70b9e 1784
0f2a86f2
JR
1785 INC_STATS_COUNTER(cnt_map_single);
1786
f99c0f1c 1787 if (!get_device_resources(dev, &iommu, &domain, &devid))
431b2a20 1788 /* device not handled by any AMD IOMMU */
4da70b9e
JR
1789 return (dma_addr_t)paddr;
1790
f99c0f1c
JR
1791 dma_mask = *dev->dma_mask;
1792
5b28df6f 1793 if (!dma_ops_domain(domain))
8fd524b3 1794 return DMA_ERROR_CODE;
5b28df6f 1795
4da70b9e 1796 spin_lock_irqsave(&domain->lock, flags);
832a90c3
JR
1797 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1798 dma_mask);
8fd524b3 1799 if (addr == DMA_ERROR_CODE)
4da70b9e
JR
1800 goto out;
1801
0518a3a4 1802 iommu_flush_complete(domain);
4da70b9e
JR
1803
1804out:
1805 spin_unlock_irqrestore(&domain->lock, flags);
1806
1807 return addr;
1808}
1809
431b2a20
JR
1810/*
1811 * The exported unmap_single function for dma_ops.
1812 */
51491367
FT
1813static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1814 enum dma_data_direction dir, struct dma_attrs *attrs)
4da70b9e
JR
1815{
1816 unsigned long flags;
1817 struct amd_iommu *iommu;
1818 struct protection_domain *domain;
1819 u16 devid;
1820
146a6917
JR
1821 INC_STATS_COUNTER(cnt_unmap_single);
1822
f99c0f1c 1823 if (!get_device_resources(dev, &iommu, &domain, &devid))
431b2a20 1824 /* device not handled by any AMD IOMMU */
4da70b9e
JR
1825 return;
1826
5b28df6f
JR
1827 if (!dma_ops_domain(domain))
1828 return;
1829
4da70b9e
JR
1830 spin_lock_irqsave(&domain->lock, flags);
1831
1832 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1833
0518a3a4 1834 iommu_flush_complete(domain);
4da70b9e
JR
1835
1836 spin_unlock_irqrestore(&domain->lock, flags);
1837}
1838
431b2a20
JR
1839/*
1840 * This is a special map_sg function which is used if we should map a
1841 * device which is not handled by an AMD IOMMU in the system.
1842 */
65b050ad
JR
1843static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1844 int nelems, int dir)
1845{
1846 struct scatterlist *s;
1847 int i;
1848
1849 for_each_sg(sglist, s, nelems, i) {
1850 s->dma_address = (dma_addr_t)sg_phys(s);
1851 s->dma_length = s->length;
1852 }
1853
1854 return nelems;
1855}
1856
431b2a20
JR
1857/*
1858 * The exported map_sg function for dma_ops (handles scatter-gather
1859 * lists).
1860 */
65b050ad 1861static int map_sg(struct device *dev, struct scatterlist *sglist,
160c1d8e
FT
1862 int nelems, enum dma_data_direction dir,
1863 struct dma_attrs *attrs)
65b050ad
JR
1864{
1865 unsigned long flags;
1866 struct amd_iommu *iommu;
1867 struct protection_domain *domain;
1868 u16 devid;
1869 int i;
1870 struct scatterlist *s;
1871 phys_addr_t paddr;
1872 int mapped_elems = 0;
832a90c3 1873 u64 dma_mask;
65b050ad 1874
d03f067a
JR
1875 INC_STATS_COUNTER(cnt_map_sg);
1876
f99c0f1c
JR
1877 if (!get_device_resources(dev, &iommu, &domain, &devid))
1878 return map_sg_no_iommu(dev, sglist, nelems, dir);
dbcc112e 1879
832a90c3 1880 dma_mask = *dev->dma_mask;
65b050ad 1881
5b28df6f
JR
1882 if (!dma_ops_domain(domain))
1883 return 0;
1884
65b050ad
JR
1885 spin_lock_irqsave(&domain->lock, flags);
1886
1887 for_each_sg(sglist, s, nelems, i) {
1888 paddr = sg_phys(s);
1889
1890 s->dma_address = __map_single(dev, iommu, domain->priv,
832a90c3
JR
1891 paddr, s->length, dir, false,
1892 dma_mask);
65b050ad
JR
1893
1894 if (s->dma_address) {
1895 s->dma_length = s->length;
1896 mapped_elems++;
1897 } else
1898 goto unmap;
65b050ad
JR
1899 }
1900
0518a3a4 1901 iommu_flush_complete(domain);
65b050ad
JR
1902
1903out:
1904 spin_unlock_irqrestore(&domain->lock, flags);
1905
1906 return mapped_elems;
1907unmap:
1908 for_each_sg(sglist, s, mapped_elems, i) {
1909 if (s->dma_address)
1910 __unmap_single(iommu, domain->priv, s->dma_address,
1911 s->dma_length, dir);
1912 s->dma_address = s->dma_length = 0;
1913 }
1914
1915 mapped_elems = 0;
1916
1917 goto out;
1918}
1919
431b2a20
JR
1920/*
1921 * The exported map_sg function for dma_ops (handles scatter-gather
1922 * lists).
1923 */
65b050ad 1924static void unmap_sg(struct device *dev, struct scatterlist *sglist,
160c1d8e
FT
1925 int nelems, enum dma_data_direction dir,
1926 struct dma_attrs *attrs)
65b050ad
JR
1927{
1928 unsigned long flags;
1929 struct amd_iommu *iommu;
1930 struct protection_domain *domain;
1931 struct scatterlist *s;
1932 u16 devid;
1933 int i;
1934
55877a6b
JR
1935 INC_STATS_COUNTER(cnt_unmap_sg);
1936
f99c0f1c 1937 if (!get_device_resources(dev, &iommu, &domain, &devid))
65b050ad
JR
1938 return;
1939
5b28df6f
JR
1940 if (!dma_ops_domain(domain))
1941 return;
1942
65b050ad
JR
1943 spin_lock_irqsave(&domain->lock, flags);
1944
1945 for_each_sg(sglist, s, nelems, i) {
1946 __unmap_single(iommu, domain->priv, s->dma_address,
1947 s->dma_length, dir);
65b050ad
JR
1948 s->dma_address = s->dma_length = 0;
1949 }
1950
0518a3a4 1951 iommu_flush_complete(domain);
65b050ad
JR
1952
1953 spin_unlock_irqrestore(&domain->lock, flags);
1954}
1955
431b2a20
JR
1956/*
1957 * The exported alloc_coherent function for dma_ops.
1958 */
5d8b53cf
JR
1959static void *alloc_coherent(struct device *dev, size_t size,
1960 dma_addr_t *dma_addr, gfp_t flag)
1961{
1962 unsigned long flags;
1963 void *virt_addr;
1964 struct amd_iommu *iommu;
1965 struct protection_domain *domain;
1966 u16 devid;
1967 phys_addr_t paddr;
832a90c3 1968 u64 dma_mask = dev->coherent_dma_mask;
5d8b53cf 1969
c8f0fb36
JR
1970 INC_STATS_COUNTER(cnt_alloc_coherent);
1971
f99c0f1c
JR
1972 if (!get_device_resources(dev, &iommu, &domain, &devid)) {
1973 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1974 *dma_addr = __pa(virt_addr);
1975 return virt_addr;
1976 }
5d8b53cf 1977
f99c0f1c
JR
1978 dma_mask = dev->coherent_dma_mask;
1979 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1980 flag |= __GFP_ZERO;
5d8b53cf
JR
1981
1982 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1983 if (!virt_addr)
b25ae679 1984 return NULL;
5d8b53cf 1985
5d8b53cf
JR
1986 paddr = virt_to_phys(virt_addr);
1987
5b28df6f
JR
1988 if (!dma_ops_domain(domain))
1989 goto out_free;
1990
832a90c3
JR
1991 if (!dma_mask)
1992 dma_mask = *dev->dma_mask;
1993
5d8b53cf
JR
1994 spin_lock_irqsave(&domain->lock, flags);
1995
1996 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
832a90c3 1997 size, DMA_BIDIRECTIONAL, true, dma_mask);
5d8b53cf 1998
8fd524b3 1999 if (*dma_addr == DMA_ERROR_CODE) {
367d04c4 2000 spin_unlock_irqrestore(&domain->lock, flags);
5b28df6f 2001 goto out_free;
367d04c4 2002 }
5d8b53cf 2003
0518a3a4 2004 iommu_flush_complete(domain);
5d8b53cf 2005
5d8b53cf
JR
2006 spin_unlock_irqrestore(&domain->lock, flags);
2007
2008 return virt_addr;
5b28df6f
JR
2009
2010out_free:
2011
2012 free_pages((unsigned long)virt_addr, get_order(size));
2013
2014 return NULL;
5d8b53cf
JR
2015}
2016
431b2a20
JR
2017/*
2018 * The exported free_coherent function for dma_ops.
431b2a20 2019 */
5d8b53cf
JR
2020static void free_coherent(struct device *dev, size_t size,
2021 void *virt_addr, dma_addr_t dma_addr)
2022{
2023 unsigned long flags;
2024 struct amd_iommu *iommu;
2025 struct protection_domain *domain;
2026 u16 devid;
2027
5d31ee7e
JR
2028 INC_STATS_COUNTER(cnt_free_coherent);
2029
f99c0f1c 2030 if (!get_device_resources(dev, &iommu, &domain, &devid))
5d8b53cf
JR
2031 goto free_mem;
2032
5b28df6f
JR
2033 if (!dma_ops_domain(domain))
2034 goto free_mem;
2035
5d8b53cf
JR
2036 spin_lock_irqsave(&domain->lock, flags);
2037
2038 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
5d8b53cf 2039
0518a3a4 2040 iommu_flush_complete(domain);
5d8b53cf
JR
2041
2042 spin_unlock_irqrestore(&domain->lock, flags);
2043
2044free_mem:
2045 free_pages((unsigned long)virt_addr, get_order(size));
2046}
2047
b39ba6ad
JR
2048/*
2049 * This function is called by the DMA layer to find out if we can handle a
2050 * particular device. It is part of the dma_ops.
2051 */
2052static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2053{
420aef8a 2054 return check_device(dev);
b39ba6ad
JR
2055}
2056
c432f3df 2057/*
431b2a20
JR
2058 * The function for pre-allocating protection domains.
2059 *
c432f3df
JR
2060 * If the driver core informs the DMA layer if a driver grabs a device
2061 * we don't need to preallocate the protection domains anymore.
2062 * For now we have to.
2063 */
0e93dd88 2064static void prealloc_protection_domains(void)
c432f3df
JR
2065{
2066 struct pci_dev *dev = NULL;
2067 struct dma_ops_domain *dma_dom;
2068 struct amd_iommu *iommu;
be831297 2069 u16 devid, __devid;
c432f3df
JR
2070
2071 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
be831297 2072 __devid = devid = calc_devid(dev->bus->number, dev->devfn);
3a61ec38 2073 if (devid > amd_iommu_last_bdf)
c432f3df
JR
2074 continue;
2075 devid = amd_iommu_alias_table[devid];
2076 if (domain_for_device(devid))
2077 continue;
2078 iommu = amd_iommu_rlookup_table[devid];
2079 if (!iommu)
2080 continue;
d9cfed92 2081 dma_dom = dma_ops_domain_alloc(iommu);
c432f3df
JR
2082 if (!dma_dom)
2083 continue;
2084 init_unity_mappings_for_device(dma_dom, devid);
bd60b735
JR
2085 dma_dom->target_dev = devid;
2086
be831297
JR
2087 attach_device(iommu, &dma_dom->domain, devid);
2088 if (__devid != devid)
2089 attach_device(iommu, &dma_dom->domain, __devid);
2090
bd60b735 2091 list_add_tail(&dma_dom->list, &iommu_pd_list);
c432f3df
JR
2092 }
2093}
2094
160c1d8e 2095static struct dma_map_ops amd_iommu_dma_ops = {
6631ee9d
JR
2096 .alloc_coherent = alloc_coherent,
2097 .free_coherent = free_coherent,
51491367
FT
2098 .map_page = map_page,
2099 .unmap_page = unmap_page,
6631ee9d
JR
2100 .map_sg = map_sg,
2101 .unmap_sg = unmap_sg,
b39ba6ad 2102 .dma_supported = amd_iommu_dma_supported,
6631ee9d
JR
2103};
2104
431b2a20
JR
2105/*
2106 * The function which clues the AMD IOMMU driver into dma_ops.
2107 */
6631ee9d
JR
2108int __init amd_iommu_init_dma_ops(void)
2109{
2110 struct amd_iommu *iommu;
6631ee9d
JR
2111 int ret;
2112
431b2a20
JR
2113 /*
2114 * first allocate a default protection domain for every IOMMU we
2115 * found in the system. Devices not assigned to any other
2116 * protection domain will be assigned to the default one.
2117 */
3bd22172 2118 for_each_iommu(iommu) {
d9cfed92 2119 iommu->default_dom = dma_ops_domain_alloc(iommu);
6631ee9d
JR
2120 if (iommu->default_dom == NULL)
2121 return -ENOMEM;
e2dc14a2 2122 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
6631ee9d
JR
2123 ret = iommu_init_unity_mappings(iommu);
2124 if (ret)
2125 goto free_domains;
2126 }
2127
431b2a20
JR
2128 /*
2129 * If device isolation is enabled, pre-allocate the protection
2130 * domains for each device.
2131 */
6631ee9d
JR
2132 if (amd_iommu_isolate)
2133 prealloc_protection_domains();
2134
2135 iommu_detected = 1;
75f1cdf1 2136 swiotlb = 0;
92af4e29 2137#ifdef CONFIG_GART_IOMMU
6631ee9d
JR
2138 gart_iommu_aperture_disabled = 1;
2139 gart_iommu_aperture = 0;
92af4e29 2140#endif
6631ee9d 2141
431b2a20 2142 /* Make the driver finally visible to the drivers */
6631ee9d
JR
2143 dma_ops = &amd_iommu_dma_ops;
2144
26961efe 2145 register_iommu(&amd_iommu_ops);
26961efe 2146
e275a2a0
JR
2147 bus_register_notifier(&pci_bus_type, &device_nb);
2148
7f26508b
JR
2149 amd_iommu_stats_init();
2150
6631ee9d
JR
2151 return 0;
2152
2153free_domains:
2154
3bd22172 2155 for_each_iommu(iommu) {
6631ee9d
JR
2156 if (iommu->default_dom)
2157 dma_ops_domain_free(iommu->default_dom);
2158 }
2159
2160 return ret;
2161}
6d98cd80
JR
2162
2163/*****************************************************************************
2164 *
2165 * The following functions belong to the exported interface of AMD IOMMU
2166 *
2167 * This interface allows access to lower level functions of the IOMMU
2168 * like protection domain handling and assignement of devices to domains
2169 * which is not possible with the dma_ops interface.
2170 *
2171 *****************************************************************************/
2172
6d98cd80
JR
2173static void cleanup_domain(struct protection_domain *domain)
2174{
2175 unsigned long flags;
2176 u16 devid;
2177
2178 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2179
2180 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2181 if (amd_iommu_pd_table[devid] == domain)
2182 __detach_device(domain, devid);
2183
2184 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2185}
2186
2650815f
JR
2187static void protection_domain_free(struct protection_domain *domain)
2188{
2189 if (!domain)
2190 return;
2191
aeb26f55
JR
2192 del_domain_from_list(domain);
2193
2650815f
JR
2194 if (domain->id)
2195 domain_id_free(domain->id);
2196
2197 kfree(domain);
2198}
2199
2200static struct protection_domain *protection_domain_alloc(void)
c156e347
JR
2201{
2202 struct protection_domain *domain;
2203
2204 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2205 if (!domain)
2650815f 2206 return NULL;
c156e347
JR
2207
2208 spin_lock_init(&domain->lock);
c156e347
JR
2209 domain->id = domain_id_alloc();
2210 if (!domain->id)
2650815f
JR
2211 goto out_err;
2212
aeb26f55
JR
2213 add_domain_to_list(domain);
2214
2650815f
JR
2215 return domain;
2216
2217out_err:
2218 kfree(domain);
2219
2220 return NULL;
2221}
2222
2223static int amd_iommu_domain_init(struct iommu_domain *dom)
2224{
2225 struct protection_domain *domain;
2226
2227 domain = protection_domain_alloc();
2228 if (!domain)
c156e347 2229 goto out_free;
2650815f
JR
2230
2231 domain->mode = PAGE_MODE_3_LEVEL;
c156e347
JR
2232 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2233 if (!domain->pt_root)
2234 goto out_free;
2235
2236 dom->priv = domain;
2237
2238 return 0;
2239
2240out_free:
2650815f 2241 protection_domain_free(domain);
c156e347
JR
2242
2243 return -ENOMEM;
2244}
2245
98383fc3
JR
2246static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2247{
2248 struct protection_domain *domain = dom->priv;
2249
2250 if (!domain)
2251 return;
2252
2253 if (domain->dev_cnt > 0)
2254 cleanup_domain(domain);
2255
2256 BUG_ON(domain->dev_cnt != 0);
2257
2258 free_pagetable(domain);
2259
2260 domain_id_free(domain->id);
2261
2262 kfree(domain);
2263
2264 dom->priv = NULL;
2265}
2266
684f2888
JR
2267static void amd_iommu_detach_device(struct iommu_domain *dom,
2268 struct device *dev)
2269{
2270 struct protection_domain *domain = dom->priv;
2271 struct amd_iommu *iommu;
2272 struct pci_dev *pdev;
2273 u16 devid;
2274
2275 if (dev->bus != &pci_bus_type)
2276 return;
2277
2278 pdev = to_pci_dev(dev);
2279
2280 devid = calc_devid(pdev->bus->number, pdev->devfn);
2281
2282 if (devid > 0)
2283 detach_device(domain, devid);
2284
2285 iommu = amd_iommu_rlookup_table[devid];
2286 if (!iommu)
2287 return;
2288
2289 iommu_queue_inv_dev_entry(iommu, devid);
2290 iommu_completion_wait(iommu);
2291}
2292
01106066
JR
2293static int amd_iommu_attach_device(struct iommu_domain *dom,
2294 struct device *dev)
2295{
2296 struct protection_domain *domain = dom->priv;
2297 struct protection_domain *old_domain;
2298 struct amd_iommu *iommu;
2299 struct pci_dev *pdev;
2300 u16 devid;
2301
2302 if (dev->bus != &pci_bus_type)
2303 return -EINVAL;
2304
2305 pdev = to_pci_dev(dev);
2306
2307 devid = calc_devid(pdev->bus->number, pdev->devfn);
2308
2309 if (devid >= amd_iommu_last_bdf ||
2310 devid != amd_iommu_alias_table[devid])
2311 return -EINVAL;
2312
2313 iommu = amd_iommu_rlookup_table[devid];
2314 if (!iommu)
2315 return -EINVAL;
2316
2317 old_domain = domain_for_device(devid);
2318 if (old_domain)
71ff3bca 2319 detach_device(old_domain, devid);
01106066
JR
2320
2321 attach_device(iommu, domain, devid);
2322
2323 iommu_completion_wait(iommu);
2324
2325 return 0;
2326}
2327
c6229ca6
JR
2328static int amd_iommu_map_range(struct iommu_domain *dom,
2329 unsigned long iova, phys_addr_t paddr,
2330 size_t size, int iommu_prot)
2331{
2332 struct protection_domain *domain = dom->priv;
2333 unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
2334 int prot = 0;
2335 int ret;
2336
2337 if (iommu_prot & IOMMU_READ)
2338 prot |= IOMMU_PROT_IR;
2339 if (iommu_prot & IOMMU_WRITE)
2340 prot |= IOMMU_PROT_IW;
2341
2342 iova &= PAGE_MASK;
2343 paddr &= PAGE_MASK;
2344
2345 for (i = 0; i < npages; ++i) {
abdc5eb3 2346 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
c6229ca6
JR
2347 if (ret)
2348 return ret;
2349
2350 iova += PAGE_SIZE;
2351 paddr += PAGE_SIZE;
2352 }
2353
2354 return 0;
2355}
2356
eb74ff6c
JR
2357static void amd_iommu_unmap_range(struct iommu_domain *dom,
2358 unsigned long iova, size_t size)
2359{
2360
2361 struct protection_domain *domain = dom->priv;
2362 unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
2363
2364 iova &= PAGE_MASK;
2365
2366 for (i = 0; i < npages; ++i) {
a6b256b4 2367 iommu_unmap_page(domain, iova, PM_MAP_4k);
eb74ff6c
JR
2368 iova += PAGE_SIZE;
2369 }
2370
601367d7 2371 iommu_flush_tlb_pde(domain);
eb74ff6c
JR
2372}
2373
645c4c8d
JR
2374static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2375 unsigned long iova)
2376{
2377 struct protection_domain *domain = dom->priv;
2378 unsigned long offset = iova & ~PAGE_MASK;
2379 phys_addr_t paddr;
2380 u64 *pte;
2381
a6b256b4 2382 pte = fetch_pte(domain, iova, PM_MAP_4k);
645c4c8d 2383
a6d41a40 2384 if (!pte || !IOMMU_PTE_PRESENT(*pte))
645c4c8d
JR
2385 return 0;
2386
2387 paddr = *pte & IOMMU_PAGE_MASK;
2388 paddr |= offset;
2389
2390 return paddr;
2391}
2392
dbb9fd86
SY
2393static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
2394 unsigned long cap)
2395{
2396 return 0;
2397}
2398
26961efe
JR
2399static struct iommu_ops amd_iommu_ops = {
2400 .domain_init = amd_iommu_domain_init,
2401 .domain_destroy = amd_iommu_domain_destroy,
2402 .attach_dev = amd_iommu_attach_device,
2403 .detach_dev = amd_iommu_detach_device,
2404 .map = amd_iommu_map_range,
2405 .unmap = amd_iommu_unmap_range,
2406 .iova_to_phys = amd_iommu_iova_to_phys,
dbb9fd86 2407 .domain_has_cap = amd_iommu_domain_has_cap,
26961efe
JR
2408};
2409
0feae533
JR
2410/*****************************************************************************
2411 *
2412 * The next functions do a basic initialization of IOMMU for pass through
2413 * mode
2414 *
2415 * In passthrough mode the IOMMU is initialized and enabled but not used for
2416 * DMA-API translation.
2417 *
2418 *****************************************************************************/
2419
2420int __init amd_iommu_init_passthrough(void)
2421{
2422 struct pci_dev *dev = NULL;
2423 u16 devid, devid2;
2424
2425 /* allocate passthroug domain */
2426 pt_domain = protection_domain_alloc();
2427 if (!pt_domain)
2428 return -ENOMEM;
2429
2430 pt_domain->mode |= PAGE_MODE_NONE;
2431
2432 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2433 struct amd_iommu *iommu;
2434
2435 devid = calc_devid(dev->bus->number, dev->devfn);
2436 if (devid > amd_iommu_last_bdf)
2437 continue;
2438
2439 devid2 = amd_iommu_alias_table[devid];
2440
2441 iommu = amd_iommu_rlookup_table[devid2];
2442 if (!iommu)
2443 continue;
2444
2445 __attach_device(iommu, pt_domain, devid);
2446 __attach_device(iommu, pt_domain, devid2);
2447 }
2448
2449 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2450
2451 return 0;
2452}