]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/iommu/amd_iommu_v2.c
Merge tag 'mmc-v4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-artful-kernel.git] / drivers / iommu / amd_iommu_v2.c
1 /*
2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/iommu.h>
27 #include <linux/wait.h>
28 #include <linux/pci.h>
29 #include <linux/gfp.h>
30
31 #include "amd_iommu_types.h"
32 #include "amd_iommu_proto.h"
33
34 MODULE_LICENSE("GPL v2");
35 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
36
37 #define MAX_DEVICES 0x10000
38 #define PRI_QUEUE_SIZE 512
39
40 struct pri_queue {
41 atomic_t inflight;
42 bool finish;
43 int status;
44 };
45
46 struct pasid_state {
47 struct list_head list; /* For global state-list */
48 atomic_t count; /* Reference count */
49 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
50 calls */
51 struct mm_struct *mm; /* mm_struct for the faults */
52 struct mmu_notifier mn; /* mmu_notifier handle */
53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
54 struct device_state *device_state; /* Link to our device_state */
55 int pasid; /* PASID index */
56 bool invalid; /* Used during setup and
57 teardown of the pasid */
58 spinlock_t lock; /* Protect pri_queues and
59 mmu_notifer_count */
60 wait_queue_head_t wq; /* To wait for count == 0 */
61 };
62
63 struct device_state {
64 struct list_head list;
65 u16 devid;
66 atomic_t count;
67 struct pci_dev *pdev;
68 struct pasid_state **states;
69 struct iommu_domain *domain;
70 int pasid_levels;
71 int max_pasids;
72 amd_iommu_invalid_ppr_cb inv_ppr_cb;
73 amd_iommu_invalidate_ctx inv_ctx_cb;
74 spinlock_t lock;
75 wait_queue_head_t wq;
76 };
77
78 struct fault {
79 struct work_struct work;
80 struct device_state *dev_state;
81 struct pasid_state *state;
82 struct mm_struct *mm;
83 u64 address;
84 u16 devid;
85 u16 pasid;
86 u16 tag;
87 u16 finish;
88 u16 flags;
89 };
90
91 static LIST_HEAD(state_list);
92 static spinlock_t state_lock;
93
94 static struct workqueue_struct *iommu_wq;
95
96 static void free_pasid_states(struct device_state *dev_state);
97
98 static u16 device_id(struct pci_dev *pdev)
99 {
100 u16 devid;
101
102 devid = pdev->bus->number;
103 devid = (devid << 8) | pdev->devfn;
104
105 return devid;
106 }
107
108 static struct device_state *__get_device_state(u16 devid)
109 {
110 struct device_state *dev_state;
111
112 list_for_each_entry(dev_state, &state_list, list) {
113 if (dev_state->devid == devid)
114 return dev_state;
115 }
116
117 return NULL;
118 }
119
120 static struct device_state *get_device_state(u16 devid)
121 {
122 struct device_state *dev_state;
123 unsigned long flags;
124
125 spin_lock_irqsave(&state_lock, flags);
126 dev_state = __get_device_state(devid);
127 if (dev_state != NULL)
128 atomic_inc(&dev_state->count);
129 spin_unlock_irqrestore(&state_lock, flags);
130
131 return dev_state;
132 }
133
134 static void free_device_state(struct device_state *dev_state)
135 {
136 struct iommu_group *group;
137
138 /*
139 * First detach device from domain - No more PRI requests will arrive
140 * from that device after it is unbound from the IOMMUv2 domain.
141 */
142 group = iommu_group_get(&dev_state->pdev->dev);
143 if (WARN_ON(!group))
144 return;
145
146 iommu_detach_group(dev_state->domain, group);
147
148 iommu_group_put(group);
149
150 /* Everything is down now, free the IOMMUv2 domain */
151 iommu_domain_free(dev_state->domain);
152
153 /* Finally get rid of the device-state */
154 kfree(dev_state);
155 }
156
157 static void put_device_state(struct device_state *dev_state)
158 {
159 if (atomic_dec_and_test(&dev_state->count))
160 wake_up(&dev_state->wq);
161 }
162
163 /* Must be called under dev_state->lock */
164 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
165 int pasid, bool alloc)
166 {
167 struct pasid_state **root, **ptr;
168 int level, index;
169
170 level = dev_state->pasid_levels;
171 root = dev_state->states;
172
173 while (true) {
174
175 index = (pasid >> (9 * level)) & 0x1ff;
176 ptr = &root[index];
177
178 if (level == 0)
179 break;
180
181 if (*ptr == NULL) {
182 if (!alloc)
183 return NULL;
184
185 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
186 if (*ptr == NULL)
187 return NULL;
188 }
189
190 root = (struct pasid_state **)*ptr;
191 level -= 1;
192 }
193
194 return ptr;
195 }
196
197 static int set_pasid_state(struct device_state *dev_state,
198 struct pasid_state *pasid_state,
199 int pasid)
200 {
201 struct pasid_state **ptr;
202 unsigned long flags;
203 int ret;
204
205 spin_lock_irqsave(&dev_state->lock, flags);
206 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
207
208 ret = -ENOMEM;
209 if (ptr == NULL)
210 goto out_unlock;
211
212 ret = -ENOMEM;
213 if (*ptr != NULL)
214 goto out_unlock;
215
216 *ptr = pasid_state;
217
218 ret = 0;
219
220 out_unlock:
221 spin_unlock_irqrestore(&dev_state->lock, flags);
222
223 return ret;
224 }
225
226 static void clear_pasid_state(struct device_state *dev_state, int pasid)
227 {
228 struct pasid_state **ptr;
229 unsigned long flags;
230
231 spin_lock_irqsave(&dev_state->lock, flags);
232 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
233
234 if (ptr == NULL)
235 goto out_unlock;
236
237 *ptr = NULL;
238
239 out_unlock:
240 spin_unlock_irqrestore(&dev_state->lock, flags);
241 }
242
243 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
244 int pasid)
245 {
246 struct pasid_state **ptr, *ret = NULL;
247 unsigned long flags;
248
249 spin_lock_irqsave(&dev_state->lock, flags);
250 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
251
252 if (ptr == NULL)
253 goto out_unlock;
254
255 ret = *ptr;
256 if (ret)
257 atomic_inc(&ret->count);
258
259 out_unlock:
260 spin_unlock_irqrestore(&dev_state->lock, flags);
261
262 return ret;
263 }
264
265 static void free_pasid_state(struct pasid_state *pasid_state)
266 {
267 kfree(pasid_state);
268 }
269
270 static void put_pasid_state(struct pasid_state *pasid_state)
271 {
272 if (atomic_dec_and_test(&pasid_state->count))
273 wake_up(&pasid_state->wq);
274 }
275
276 static void put_pasid_state_wait(struct pasid_state *pasid_state)
277 {
278 atomic_dec(&pasid_state->count);
279 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
280 free_pasid_state(pasid_state);
281 }
282
283 static void unbind_pasid(struct pasid_state *pasid_state)
284 {
285 struct iommu_domain *domain;
286
287 domain = pasid_state->device_state->domain;
288
289 /*
290 * Mark pasid_state as invalid, no more faults will we added to the
291 * work queue after this is visible everywhere.
292 */
293 pasid_state->invalid = true;
294
295 /* Make sure this is visible */
296 smp_wmb();
297
298 /* After this the device/pasid can't access the mm anymore */
299 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
300
301 /* Make sure no more pending faults are in the queue */
302 flush_workqueue(iommu_wq);
303 }
304
305 static void free_pasid_states_level1(struct pasid_state **tbl)
306 {
307 int i;
308
309 for (i = 0; i < 512; ++i) {
310 if (tbl[i] == NULL)
311 continue;
312
313 free_page((unsigned long)tbl[i]);
314 }
315 }
316
317 static void free_pasid_states_level2(struct pasid_state **tbl)
318 {
319 struct pasid_state **ptr;
320 int i;
321
322 for (i = 0; i < 512; ++i) {
323 if (tbl[i] == NULL)
324 continue;
325
326 ptr = (struct pasid_state **)tbl[i];
327 free_pasid_states_level1(ptr);
328 }
329 }
330
331 static void free_pasid_states(struct device_state *dev_state)
332 {
333 struct pasid_state *pasid_state;
334 int i;
335
336 for (i = 0; i < dev_state->max_pasids; ++i) {
337 pasid_state = get_pasid_state(dev_state, i);
338 if (pasid_state == NULL)
339 continue;
340
341 put_pasid_state(pasid_state);
342
343 /*
344 * This will call the mn_release function and
345 * unbind the PASID
346 */
347 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
348
349 put_pasid_state_wait(pasid_state); /* Reference taken in
350 amd_iommu_bind_pasid */
351
352 /* Drop reference taken in amd_iommu_bind_pasid */
353 put_device_state(dev_state);
354 }
355
356 if (dev_state->pasid_levels == 2)
357 free_pasid_states_level2(dev_state->states);
358 else if (dev_state->pasid_levels == 1)
359 free_pasid_states_level1(dev_state->states);
360 else
361 BUG_ON(dev_state->pasid_levels != 0);
362
363 free_page((unsigned long)dev_state->states);
364 }
365
366 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
367 {
368 return container_of(mn, struct pasid_state, mn);
369 }
370
371 static void __mn_flush_page(struct mmu_notifier *mn,
372 unsigned long address)
373 {
374 struct pasid_state *pasid_state;
375 struct device_state *dev_state;
376
377 pasid_state = mn_to_state(mn);
378 dev_state = pasid_state->device_state;
379
380 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
381 }
382
383 static int mn_clear_flush_young(struct mmu_notifier *mn,
384 struct mm_struct *mm,
385 unsigned long start,
386 unsigned long end)
387 {
388 for (; start < end; start += PAGE_SIZE)
389 __mn_flush_page(mn, start);
390
391 return 0;
392 }
393
394 static void mn_invalidate_range(struct mmu_notifier *mn,
395 struct mm_struct *mm,
396 unsigned long start, unsigned long end)
397 {
398 struct pasid_state *pasid_state;
399 struct device_state *dev_state;
400
401 pasid_state = mn_to_state(mn);
402 dev_state = pasid_state->device_state;
403
404 if ((start ^ (end - 1)) < PAGE_SIZE)
405 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
406 start);
407 else
408 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
409 }
410
411 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
412 {
413 struct pasid_state *pasid_state;
414 struct device_state *dev_state;
415 bool run_inv_ctx_cb;
416
417 might_sleep();
418
419 pasid_state = mn_to_state(mn);
420 dev_state = pasid_state->device_state;
421 run_inv_ctx_cb = !pasid_state->invalid;
422
423 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
424 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
425
426 unbind_pasid(pasid_state);
427 }
428
429 static const struct mmu_notifier_ops iommu_mn = {
430 .release = mn_release,
431 .clear_flush_young = mn_clear_flush_young,
432 .invalidate_range = mn_invalidate_range,
433 };
434
435 static void set_pri_tag_status(struct pasid_state *pasid_state,
436 u16 tag, int status)
437 {
438 unsigned long flags;
439
440 spin_lock_irqsave(&pasid_state->lock, flags);
441 pasid_state->pri[tag].status = status;
442 spin_unlock_irqrestore(&pasid_state->lock, flags);
443 }
444
445 static void finish_pri_tag(struct device_state *dev_state,
446 struct pasid_state *pasid_state,
447 u16 tag)
448 {
449 unsigned long flags;
450
451 spin_lock_irqsave(&pasid_state->lock, flags);
452 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
453 pasid_state->pri[tag].finish) {
454 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
455 pasid_state->pri[tag].status, tag);
456 pasid_state->pri[tag].finish = false;
457 pasid_state->pri[tag].status = PPR_SUCCESS;
458 }
459 spin_unlock_irqrestore(&pasid_state->lock, flags);
460 }
461
462 static void handle_fault_error(struct fault *fault)
463 {
464 int status;
465
466 if (!fault->dev_state->inv_ppr_cb) {
467 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
468 return;
469 }
470
471 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
472 fault->pasid,
473 fault->address,
474 fault->flags);
475 switch (status) {
476 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
477 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
478 break;
479 case AMD_IOMMU_INV_PRI_RSP_INVALID:
480 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
481 break;
482 case AMD_IOMMU_INV_PRI_RSP_FAIL:
483 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
484 break;
485 default:
486 BUG();
487 }
488 }
489
490 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
491 {
492 unsigned long requested = 0;
493
494 if (fault->flags & PPR_FAULT_EXEC)
495 requested |= VM_EXEC;
496
497 if (fault->flags & PPR_FAULT_READ)
498 requested |= VM_READ;
499
500 if (fault->flags & PPR_FAULT_WRITE)
501 requested |= VM_WRITE;
502
503 return (requested & ~vma->vm_flags) != 0;
504 }
505
506 static void do_fault(struct work_struct *work)
507 {
508 struct fault *fault = container_of(work, struct fault, work);
509 struct vm_area_struct *vma;
510 int ret = VM_FAULT_ERROR;
511 unsigned int flags = 0;
512 struct mm_struct *mm;
513 u64 address;
514
515 mm = fault->state->mm;
516 address = fault->address;
517
518 if (fault->flags & PPR_FAULT_USER)
519 flags |= FAULT_FLAG_USER;
520 if (fault->flags & PPR_FAULT_WRITE)
521 flags |= FAULT_FLAG_WRITE;
522 flags |= FAULT_FLAG_REMOTE;
523
524 down_read(&mm->mmap_sem);
525 vma = find_extend_vma(mm, address);
526 if (!vma || address < vma->vm_start)
527 /* failed to get a vma in the right range */
528 goto out;
529
530 /* Check if we have the right permissions on the vma */
531 if (access_error(vma, fault))
532 goto out;
533
534 ret = handle_mm_fault(vma, address, flags);
535 out:
536 up_read(&mm->mmap_sem);
537
538 if (ret & VM_FAULT_ERROR)
539 /* failed to service fault */
540 handle_fault_error(fault);
541
542 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
543
544 put_pasid_state(fault->state);
545
546 kfree(fault);
547 }
548
549 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
550 {
551 struct amd_iommu_fault *iommu_fault;
552 struct pasid_state *pasid_state;
553 struct device_state *dev_state;
554 unsigned long flags;
555 struct fault *fault;
556 bool finish;
557 u16 tag;
558 int ret;
559
560 iommu_fault = data;
561 tag = iommu_fault->tag & 0x1ff;
562 finish = (iommu_fault->tag >> 9) & 1;
563
564 ret = NOTIFY_DONE;
565 dev_state = get_device_state(iommu_fault->device_id);
566 if (dev_state == NULL)
567 goto out;
568
569 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
570 if (pasid_state == NULL || pasid_state->invalid) {
571 /* We know the device but not the PASID -> send INVALID */
572 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
573 PPR_INVALID, tag);
574 goto out_drop_state;
575 }
576
577 spin_lock_irqsave(&pasid_state->lock, flags);
578 atomic_inc(&pasid_state->pri[tag].inflight);
579 if (finish)
580 pasid_state->pri[tag].finish = true;
581 spin_unlock_irqrestore(&pasid_state->lock, flags);
582
583 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
584 if (fault == NULL) {
585 /* We are OOM - send success and let the device re-fault */
586 finish_pri_tag(dev_state, pasid_state, tag);
587 goto out_drop_state;
588 }
589
590 fault->dev_state = dev_state;
591 fault->address = iommu_fault->address;
592 fault->state = pasid_state;
593 fault->tag = tag;
594 fault->finish = finish;
595 fault->pasid = iommu_fault->pasid;
596 fault->flags = iommu_fault->flags;
597 INIT_WORK(&fault->work, do_fault);
598
599 queue_work(iommu_wq, &fault->work);
600
601 ret = NOTIFY_OK;
602
603 out_drop_state:
604
605 if (ret != NOTIFY_OK && pasid_state)
606 put_pasid_state(pasid_state);
607
608 put_device_state(dev_state);
609
610 out:
611 return ret;
612 }
613
614 static struct notifier_block ppr_nb = {
615 .notifier_call = ppr_notifier,
616 };
617
618 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
619 struct task_struct *task)
620 {
621 struct pasid_state *pasid_state;
622 struct device_state *dev_state;
623 struct mm_struct *mm;
624 u16 devid;
625 int ret;
626
627 might_sleep();
628
629 if (!amd_iommu_v2_supported())
630 return -ENODEV;
631
632 devid = device_id(pdev);
633 dev_state = get_device_state(devid);
634
635 if (dev_state == NULL)
636 return -EINVAL;
637
638 ret = -EINVAL;
639 if (pasid < 0 || pasid >= dev_state->max_pasids)
640 goto out;
641
642 ret = -ENOMEM;
643 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
644 if (pasid_state == NULL)
645 goto out;
646
647
648 atomic_set(&pasid_state->count, 1);
649 init_waitqueue_head(&pasid_state->wq);
650 spin_lock_init(&pasid_state->lock);
651
652 mm = get_task_mm(task);
653 pasid_state->mm = mm;
654 pasid_state->device_state = dev_state;
655 pasid_state->pasid = pasid;
656 pasid_state->invalid = true; /* Mark as valid only if we are
657 done with setting up the pasid */
658 pasid_state->mn.ops = &iommu_mn;
659
660 if (pasid_state->mm == NULL)
661 goto out_free;
662
663 mmu_notifier_register(&pasid_state->mn, mm);
664
665 ret = set_pasid_state(dev_state, pasid_state, pasid);
666 if (ret)
667 goto out_unregister;
668
669 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
670 __pa(pasid_state->mm->pgd));
671 if (ret)
672 goto out_clear_state;
673
674 /* Now we are ready to handle faults */
675 pasid_state->invalid = false;
676
677 /*
678 * Drop the reference to the mm_struct here. We rely on the
679 * mmu_notifier release call-back to inform us when the mm
680 * is going away.
681 */
682 mmput(mm);
683
684 return 0;
685
686 out_clear_state:
687 clear_pasid_state(dev_state, pasid);
688
689 out_unregister:
690 mmu_notifier_unregister(&pasid_state->mn, mm);
691 mmput(mm);
692
693 out_free:
694 free_pasid_state(pasid_state);
695
696 out:
697 put_device_state(dev_state);
698
699 return ret;
700 }
701 EXPORT_SYMBOL(amd_iommu_bind_pasid);
702
703 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
704 {
705 struct pasid_state *pasid_state;
706 struct device_state *dev_state;
707 u16 devid;
708
709 might_sleep();
710
711 if (!amd_iommu_v2_supported())
712 return;
713
714 devid = device_id(pdev);
715 dev_state = get_device_state(devid);
716 if (dev_state == NULL)
717 return;
718
719 if (pasid < 0 || pasid >= dev_state->max_pasids)
720 goto out;
721
722 pasid_state = get_pasid_state(dev_state, pasid);
723 if (pasid_state == NULL)
724 goto out;
725 /*
726 * Drop reference taken here. We are safe because we still hold
727 * the reference taken in the amd_iommu_bind_pasid function.
728 */
729 put_pasid_state(pasid_state);
730
731 /* Clear the pasid state so that the pasid can be re-used */
732 clear_pasid_state(dev_state, pasid_state->pasid);
733
734 /*
735 * Call mmu_notifier_unregister to drop our reference
736 * to pasid_state->mm
737 */
738 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
739
740 put_pasid_state_wait(pasid_state); /* Reference taken in
741 amd_iommu_bind_pasid */
742 out:
743 /* Drop reference taken in this function */
744 put_device_state(dev_state);
745
746 /* Drop reference taken in amd_iommu_bind_pasid */
747 put_device_state(dev_state);
748 }
749 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
750
751 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
752 {
753 struct device_state *dev_state;
754 struct iommu_group *group;
755 unsigned long flags;
756 int ret, tmp;
757 u16 devid;
758
759 might_sleep();
760
761 if (!amd_iommu_v2_supported())
762 return -ENODEV;
763
764 if (pasids <= 0 || pasids > (PASID_MASK + 1))
765 return -EINVAL;
766
767 devid = device_id(pdev);
768
769 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
770 if (dev_state == NULL)
771 return -ENOMEM;
772
773 spin_lock_init(&dev_state->lock);
774 init_waitqueue_head(&dev_state->wq);
775 dev_state->pdev = pdev;
776 dev_state->devid = devid;
777
778 tmp = pasids;
779 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
780 dev_state->pasid_levels += 1;
781
782 atomic_set(&dev_state->count, 1);
783 dev_state->max_pasids = pasids;
784
785 ret = -ENOMEM;
786 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
787 if (dev_state->states == NULL)
788 goto out_free_dev_state;
789
790 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
791 if (dev_state->domain == NULL)
792 goto out_free_states;
793
794 amd_iommu_domain_direct_map(dev_state->domain);
795
796 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
797 if (ret)
798 goto out_free_domain;
799
800 group = iommu_group_get(&pdev->dev);
801 if (!group) {
802 ret = -EINVAL;
803 goto out_free_domain;
804 }
805
806 ret = iommu_attach_group(dev_state->domain, group);
807 if (ret != 0)
808 goto out_drop_group;
809
810 iommu_group_put(group);
811
812 spin_lock_irqsave(&state_lock, flags);
813
814 if (__get_device_state(devid) != NULL) {
815 spin_unlock_irqrestore(&state_lock, flags);
816 ret = -EBUSY;
817 goto out_free_domain;
818 }
819
820 list_add_tail(&dev_state->list, &state_list);
821
822 spin_unlock_irqrestore(&state_lock, flags);
823
824 return 0;
825
826 out_drop_group:
827 iommu_group_put(group);
828
829 out_free_domain:
830 iommu_domain_free(dev_state->domain);
831
832 out_free_states:
833 free_page((unsigned long)dev_state->states);
834
835 out_free_dev_state:
836 kfree(dev_state);
837
838 return ret;
839 }
840 EXPORT_SYMBOL(amd_iommu_init_device);
841
842 void amd_iommu_free_device(struct pci_dev *pdev)
843 {
844 struct device_state *dev_state;
845 unsigned long flags;
846 u16 devid;
847
848 if (!amd_iommu_v2_supported())
849 return;
850
851 devid = device_id(pdev);
852
853 spin_lock_irqsave(&state_lock, flags);
854
855 dev_state = __get_device_state(devid);
856 if (dev_state == NULL) {
857 spin_unlock_irqrestore(&state_lock, flags);
858 return;
859 }
860
861 list_del(&dev_state->list);
862
863 spin_unlock_irqrestore(&state_lock, flags);
864
865 /* Get rid of any remaining pasid states */
866 free_pasid_states(dev_state);
867
868 put_device_state(dev_state);
869 /*
870 * Wait until the last reference is dropped before freeing
871 * the device state.
872 */
873 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
874 free_device_state(dev_state);
875 }
876 EXPORT_SYMBOL(amd_iommu_free_device);
877
878 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
879 amd_iommu_invalid_ppr_cb cb)
880 {
881 struct device_state *dev_state;
882 unsigned long flags;
883 u16 devid;
884 int ret;
885
886 if (!amd_iommu_v2_supported())
887 return -ENODEV;
888
889 devid = device_id(pdev);
890
891 spin_lock_irqsave(&state_lock, flags);
892
893 ret = -EINVAL;
894 dev_state = __get_device_state(devid);
895 if (dev_state == NULL)
896 goto out_unlock;
897
898 dev_state->inv_ppr_cb = cb;
899
900 ret = 0;
901
902 out_unlock:
903 spin_unlock_irqrestore(&state_lock, flags);
904
905 return ret;
906 }
907 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
908
909 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
910 amd_iommu_invalidate_ctx cb)
911 {
912 struct device_state *dev_state;
913 unsigned long flags;
914 u16 devid;
915 int ret;
916
917 if (!amd_iommu_v2_supported())
918 return -ENODEV;
919
920 devid = device_id(pdev);
921
922 spin_lock_irqsave(&state_lock, flags);
923
924 ret = -EINVAL;
925 dev_state = __get_device_state(devid);
926 if (dev_state == NULL)
927 goto out_unlock;
928
929 dev_state->inv_ctx_cb = cb;
930
931 ret = 0;
932
933 out_unlock:
934 spin_unlock_irqrestore(&state_lock, flags);
935
936 return ret;
937 }
938 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
939
940 static int __init amd_iommu_v2_init(void)
941 {
942 int ret;
943
944 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
945
946 if (!amd_iommu_v2_supported()) {
947 pr_info("AMD IOMMUv2 functionality not available on this system\n");
948 /*
949 * Load anyway to provide the symbols to other modules
950 * which may use AMD IOMMUv2 optionally.
951 */
952 return 0;
953 }
954
955 spin_lock_init(&state_lock);
956
957 ret = -ENOMEM;
958 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
959 if (iommu_wq == NULL)
960 goto out;
961
962 amd_iommu_register_ppr_notifier(&ppr_nb);
963
964 return 0;
965
966 out:
967 return ret;
968 }
969
970 static void __exit amd_iommu_v2_exit(void)
971 {
972 struct device_state *dev_state;
973 int i;
974
975 if (!amd_iommu_v2_supported())
976 return;
977
978 amd_iommu_unregister_ppr_notifier(&ppr_nb);
979
980 flush_workqueue(iommu_wq);
981
982 /*
983 * The loop below might call flush_workqueue(), so call
984 * destroy_workqueue() after it
985 */
986 for (i = 0; i < MAX_DEVICES; ++i) {
987 dev_state = get_device_state(i);
988
989 if (dev_state == NULL)
990 continue;
991
992 WARN_ON_ONCE(1);
993
994 put_device_state(dev_state);
995 amd_iommu_free_device(dev_state->pdev);
996 }
997
998 destroy_workqueue(iommu_wq);
999 }
1000
1001 module_init(amd_iommu_v2_init);
1002 module_exit(amd_iommu_v2_exit);