2 * Copyright © 2006-2009, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 #include <linux/iova.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/bitops.h>
25 #include <linux/cpu.h>
27 static bool iova_rcache_insert(struct iova_domain
*iovad
,
30 static unsigned long iova_rcache_get(struct iova_domain
*iovad
,
32 unsigned long limit_pfn
);
33 static void init_iova_rcaches(struct iova_domain
*iovad
);
34 static void free_iova_rcaches(struct iova_domain
*iovad
);
37 init_iova_domain(struct iova_domain
*iovad
, unsigned long granule
,
38 unsigned long start_pfn
, unsigned long pfn_32bit
)
41 * IOVA granularity will normally be equal to the smallest
42 * supported IOMMU page size; both *must* be capable of
43 * representing individual CPU pages exactly.
45 BUG_ON((granule
> PAGE_SIZE
) || !is_power_of_2(granule
));
47 spin_lock_init(&iovad
->iova_rbtree_lock
);
48 iovad
->rbroot
= RB_ROOT
;
49 iovad
->cached32_node
= NULL
;
50 iovad
->granule
= granule
;
51 iovad
->start_pfn
= start_pfn
;
52 iovad
->dma_32bit_pfn
= pfn_32bit
+ 1;
53 init_iova_rcaches(iovad
);
55 EXPORT_SYMBOL_GPL(init_iova_domain
);
57 static struct rb_node
*
58 __get_cached_rbnode(struct iova_domain
*iovad
, unsigned long *limit_pfn
)
60 if ((*limit_pfn
> iovad
->dma_32bit_pfn
) ||
61 (iovad
->cached32_node
== NULL
))
62 return rb_last(&iovad
->rbroot
);
64 struct rb_node
*prev_node
= rb_prev(iovad
->cached32_node
);
65 struct iova
*curr_iova
=
66 rb_entry(iovad
->cached32_node
, struct iova
, node
);
67 *limit_pfn
= curr_iova
->pfn_lo
;
73 __cached_rbnode_insert_update(struct iova_domain
*iovad
,
74 unsigned long limit_pfn
, struct iova
*new)
76 if (limit_pfn
!= iovad
->dma_32bit_pfn
)
78 iovad
->cached32_node
= &new->node
;
82 __cached_rbnode_delete_update(struct iova_domain
*iovad
, struct iova
*free
)
84 struct iova
*cached_iova
;
87 if (!iovad
->cached32_node
)
89 curr
= iovad
->cached32_node
;
90 cached_iova
= rb_entry(curr
, struct iova
, node
);
92 if (free
->pfn_lo
>= cached_iova
->pfn_lo
) {
93 struct rb_node
*node
= rb_next(&free
->node
);
94 struct iova
*iova
= rb_entry(node
, struct iova
, node
);
96 /* only cache if it's below 32bit pfn */
97 if (node
&& iova
->pfn_lo
< iovad
->dma_32bit_pfn
)
98 iovad
->cached32_node
= node
;
100 iovad
->cached32_node
= NULL
;
104 /* Insert the iova into domain rbtree by holding writer lock */
106 iova_insert_rbtree(struct rb_root
*root
, struct iova
*iova
,
107 struct rb_node
*start
)
109 struct rb_node
**new, *parent
= NULL
;
111 new = (start
) ? &start
: &(root
->rb_node
);
112 /* Figure out where to put new node */
114 struct iova
*this = rb_entry(*new, struct iova
, node
);
118 if (iova
->pfn_lo
< this->pfn_lo
)
119 new = &((*new)->rb_left
);
120 else if (iova
->pfn_lo
> this->pfn_lo
)
121 new = &((*new)->rb_right
);
123 WARN_ON(1); /* this should not happen */
127 /* Add new node and rebalance tree. */
128 rb_link_node(&iova
->node
, parent
, new);
129 rb_insert_color(&iova
->node
, root
);
133 * Computes the padding size required, to make the start address
134 * naturally aligned on the power-of-two order of its size
137 iova_get_pad_size(unsigned int size
, unsigned int limit_pfn
)
139 return (limit_pfn
- size
) & (__roundup_pow_of_two(size
) - 1);
142 static int __alloc_and_insert_iova_range(struct iova_domain
*iovad
,
143 unsigned long size
, unsigned long limit_pfn
,
144 struct iova
*new, bool size_aligned
)
146 struct rb_node
*prev
, *curr
= NULL
;
148 unsigned long saved_pfn
;
149 unsigned int pad_size
= 0;
151 /* Walk the tree backwards */
152 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
153 saved_pfn
= limit_pfn
;
154 curr
= __get_cached_rbnode(iovad
, &limit_pfn
);
157 struct iova
*curr_iova
= rb_entry(curr
, struct iova
, node
);
159 if (limit_pfn
<= curr_iova
->pfn_lo
) {
161 } else if (limit_pfn
> curr_iova
->pfn_hi
) {
163 pad_size
= iova_get_pad_size(size
, limit_pfn
);
164 if ((curr_iova
->pfn_hi
+ size
+ pad_size
) < limit_pfn
)
165 break; /* found a free slot */
167 limit_pfn
= curr_iova
->pfn_lo
;
170 curr
= rb_prev(curr
);
175 pad_size
= iova_get_pad_size(size
, limit_pfn
);
176 if ((iovad
->start_pfn
+ size
+ pad_size
) > limit_pfn
) {
177 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
182 /* pfn_lo will point to size aligned address if size_aligned is set */
183 new->pfn_lo
= limit_pfn
- (size
+ pad_size
);
184 new->pfn_hi
= new->pfn_lo
+ size
- 1;
186 /* If we have 'prev', it's a valid place to start the insertion. */
187 iova_insert_rbtree(&iovad
->rbroot
, new, prev
);
188 __cached_rbnode_insert_update(iovad
, saved_pfn
, new);
190 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
196 static struct kmem_cache
*iova_cache
;
197 static unsigned int iova_cache_users
;
198 static DEFINE_MUTEX(iova_cache_mutex
);
200 struct iova
*alloc_iova_mem(void)
202 return kmem_cache_alloc(iova_cache
, GFP_ATOMIC
);
204 EXPORT_SYMBOL(alloc_iova_mem
);
206 void free_iova_mem(struct iova
*iova
)
208 kmem_cache_free(iova_cache
, iova
);
210 EXPORT_SYMBOL(free_iova_mem
);
212 int iova_cache_get(void)
214 mutex_lock(&iova_cache_mutex
);
215 if (!iova_cache_users
) {
216 iova_cache
= kmem_cache_create(
217 "iommu_iova", sizeof(struct iova
), 0,
218 SLAB_HWCACHE_ALIGN
, NULL
);
220 mutex_unlock(&iova_cache_mutex
);
221 printk(KERN_ERR
"Couldn't create iova cache\n");
227 mutex_unlock(&iova_cache_mutex
);
231 EXPORT_SYMBOL_GPL(iova_cache_get
);
233 void iova_cache_put(void)
235 mutex_lock(&iova_cache_mutex
);
236 if (WARN_ON(!iova_cache_users
)) {
237 mutex_unlock(&iova_cache_mutex
);
241 if (!iova_cache_users
)
242 kmem_cache_destroy(iova_cache
);
243 mutex_unlock(&iova_cache_mutex
);
245 EXPORT_SYMBOL_GPL(iova_cache_put
);
248 * alloc_iova - allocates an iova
249 * @iovad: - iova domain in question
250 * @size: - size of page frames to allocate
251 * @limit_pfn: - max limit address
252 * @size_aligned: - set if size_aligned address range is required
253 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
254 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
255 * flag is set then the allocated address iova->pfn_lo will be naturally
256 * aligned on roundup_power_of_two(size).
259 alloc_iova(struct iova_domain
*iovad
, unsigned long size
,
260 unsigned long limit_pfn
,
263 struct iova
*new_iova
;
266 new_iova
= alloc_iova_mem();
270 ret
= __alloc_and_insert_iova_range(iovad
, size
, limit_pfn
+ 1,
271 new_iova
, size_aligned
);
274 free_iova_mem(new_iova
);
280 EXPORT_SYMBOL_GPL(alloc_iova
);
283 private_find_iova(struct iova_domain
*iovad
, unsigned long pfn
)
285 struct rb_node
*node
= iovad
->rbroot
.rb_node
;
287 assert_spin_locked(&iovad
->iova_rbtree_lock
);
290 struct iova
*iova
= rb_entry(node
, struct iova
, node
);
292 /* If pfn falls within iova's range, return iova */
293 if ((pfn
>= iova
->pfn_lo
) && (pfn
<= iova
->pfn_hi
)) {
297 if (pfn
< iova
->pfn_lo
)
298 node
= node
->rb_left
;
299 else if (pfn
> iova
->pfn_lo
)
300 node
= node
->rb_right
;
306 static void private_free_iova(struct iova_domain
*iovad
, struct iova
*iova
)
308 assert_spin_locked(&iovad
->iova_rbtree_lock
);
309 __cached_rbnode_delete_update(iovad
, iova
);
310 rb_erase(&iova
->node
, &iovad
->rbroot
);
315 * find_iova - finds an iova for a given pfn
316 * @iovad: - iova domain in question.
317 * @pfn: - page frame number
318 * This function finds and returns an iova belonging to the
319 * given doamin which matches the given pfn.
321 struct iova
*find_iova(struct iova_domain
*iovad
, unsigned long pfn
)
326 /* Take the lock so that no other thread is manipulating the rbtree */
327 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
328 iova
= private_find_iova(iovad
, pfn
);
329 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
332 EXPORT_SYMBOL_GPL(find_iova
);
335 * __free_iova - frees the given iova
336 * @iovad: iova domain in question.
337 * @iova: iova in question.
338 * Frees the given iova belonging to the giving domain
341 __free_iova(struct iova_domain
*iovad
, struct iova
*iova
)
345 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
346 private_free_iova(iovad
, iova
);
347 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
349 EXPORT_SYMBOL_GPL(__free_iova
);
352 * free_iova - finds and frees the iova for a given pfn
353 * @iovad: - iova domain in question.
354 * @pfn: - pfn that is allocated previously
355 * This functions finds an iova for a given pfn and then
356 * frees the iova from that domain.
359 free_iova(struct iova_domain
*iovad
, unsigned long pfn
)
361 struct iova
*iova
= find_iova(iovad
, pfn
);
364 __free_iova(iovad
, iova
);
367 EXPORT_SYMBOL_GPL(free_iova
);
370 * alloc_iova_fast - allocates an iova from rcache
371 * @iovad: - iova domain in question
372 * @size: - size of page frames to allocate
373 * @limit_pfn: - max limit address
374 * This function tries to satisfy an iova allocation from the rcache,
375 * and falls back to regular allocation on failure.
378 alloc_iova_fast(struct iova_domain
*iovad
, unsigned long size
,
379 unsigned long limit_pfn
)
381 bool flushed_rcache
= false;
382 unsigned long iova_pfn
;
383 struct iova
*new_iova
;
385 iova_pfn
= iova_rcache_get(iovad
, size
, limit_pfn
);
390 new_iova
= alloc_iova(iovad
, size
, limit_pfn
, true);
397 /* Try replenishing IOVAs by flushing rcache. */
398 flushed_rcache
= true;
399 for_each_online_cpu(cpu
)
400 free_cpu_cached_iovas(cpu
, iovad
);
404 return new_iova
->pfn_lo
;
406 EXPORT_SYMBOL_GPL(alloc_iova_fast
);
409 * free_iova_fast - free iova pfn range into rcache
410 * @iovad: - iova domain in question.
411 * @pfn: - pfn that is allocated previously
412 * @size: - # of pages in range
413 * This functions frees an iova range by trying to put it into the rcache,
414 * falling back to regular iova deallocation via free_iova() if this fails.
417 free_iova_fast(struct iova_domain
*iovad
, unsigned long pfn
, unsigned long size
)
419 if (iova_rcache_insert(iovad
, pfn
, size
))
422 free_iova(iovad
, pfn
);
424 EXPORT_SYMBOL_GPL(free_iova_fast
);
427 * put_iova_domain - destroys the iova doamin
428 * @iovad: - iova domain in question.
429 * All the iova's in that domain are destroyed.
431 void put_iova_domain(struct iova_domain
*iovad
)
433 struct rb_node
*node
;
436 free_iova_rcaches(iovad
);
437 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
438 node
= rb_first(&iovad
->rbroot
);
440 struct iova
*iova
= rb_entry(node
, struct iova
, node
);
442 rb_erase(node
, &iovad
->rbroot
);
444 node
= rb_first(&iovad
->rbroot
);
446 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
448 EXPORT_SYMBOL_GPL(put_iova_domain
);
451 __is_range_overlap(struct rb_node
*node
,
452 unsigned long pfn_lo
, unsigned long pfn_hi
)
454 struct iova
*iova
= rb_entry(node
, struct iova
, node
);
456 if ((pfn_lo
<= iova
->pfn_hi
) && (pfn_hi
>= iova
->pfn_lo
))
461 static inline struct iova
*
462 alloc_and_init_iova(unsigned long pfn_lo
, unsigned long pfn_hi
)
466 iova
= alloc_iova_mem();
468 iova
->pfn_lo
= pfn_lo
;
469 iova
->pfn_hi
= pfn_hi
;
476 __insert_new_range(struct iova_domain
*iovad
,
477 unsigned long pfn_lo
, unsigned long pfn_hi
)
481 iova
= alloc_and_init_iova(pfn_lo
, pfn_hi
);
483 iova_insert_rbtree(&iovad
->rbroot
, iova
, NULL
);
489 __adjust_overlap_range(struct iova
*iova
,
490 unsigned long *pfn_lo
, unsigned long *pfn_hi
)
492 if (*pfn_lo
< iova
->pfn_lo
)
493 iova
->pfn_lo
= *pfn_lo
;
494 if (*pfn_hi
> iova
->pfn_hi
)
495 *pfn_lo
= iova
->pfn_hi
+ 1;
499 * reserve_iova - reserves an iova in the given range
500 * @iovad: - iova domain pointer
501 * @pfn_lo: - lower page frame address
502 * @pfn_hi:- higher pfn adderss
503 * This function allocates reserves the address range from pfn_lo to pfn_hi so
504 * that this address is not dished out as part of alloc_iova.
507 reserve_iova(struct iova_domain
*iovad
,
508 unsigned long pfn_lo
, unsigned long pfn_hi
)
510 struct rb_node
*node
;
513 unsigned int overlap
= 0;
515 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
516 for (node
= rb_first(&iovad
->rbroot
); node
; node
= rb_next(node
)) {
517 if (__is_range_overlap(node
, pfn_lo
, pfn_hi
)) {
518 iova
= rb_entry(node
, struct iova
, node
);
519 __adjust_overlap_range(iova
, &pfn_lo
, &pfn_hi
);
520 if ((pfn_lo
>= iova
->pfn_lo
) &&
521 (pfn_hi
<= iova
->pfn_hi
))
529 /* We are here either because this is the first reserver node
530 * or need to insert remaining non overlap addr range
532 iova
= __insert_new_range(iovad
, pfn_lo
, pfn_hi
);
535 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
538 EXPORT_SYMBOL_GPL(reserve_iova
);
541 * copy_reserved_iova - copies the reserved between domains
542 * @from: - source doamin from where to copy
543 * @to: - destination domin where to copy
544 * This function copies reserved iova's from one doamin to
548 copy_reserved_iova(struct iova_domain
*from
, struct iova_domain
*to
)
551 struct rb_node
*node
;
553 spin_lock_irqsave(&from
->iova_rbtree_lock
, flags
);
554 for (node
= rb_first(&from
->rbroot
); node
; node
= rb_next(node
)) {
555 struct iova
*iova
= rb_entry(node
, struct iova
, node
);
556 struct iova
*new_iova
;
558 new_iova
= reserve_iova(to
, iova
->pfn_lo
, iova
->pfn_hi
);
560 printk(KERN_ERR
"Reserve iova range %lx@%lx failed\n",
561 iova
->pfn_lo
, iova
->pfn_lo
);
563 spin_unlock_irqrestore(&from
->iova_rbtree_lock
, flags
);
565 EXPORT_SYMBOL_GPL(copy_reserved_iova
);
568 split_and_remove_iova(struct iova_domain
*iovad
, struct iova
*iova
,
569 unsigned long pfn_lo
, unsigned long pfn_hi
)
572 struct iova
*prev
= NULL
, *next
= NULL
;
574 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
575 if (iova
->pfn_lo
< pfn_lo
) {
576 prev
= alloc_and_init_iova(iova
->pfn_lo
, pfn_lo
- 1);
580 if (iova
->pfn_hi
> pfn_hi
) {
581 next
= alloc_and_init_iova(pfn_hi
+ 1, iova
->pfn_hi
);
586 __cached_rbnode_delete_update(iovad
, iova
);
587 rb_erase(&iova
->node
, &iovad
->rbroot
);
590 iova_insert_rbtree(&iovad
->rbroot
, prev
, NULL
);
591 iova
->pfn_lo
= pfn_lo
;
594 iova_insert_rbtree(&iovad
->rbroot
, next
, NULL
);
595 iova
->pfn_hi
= pfn_hi
;
597 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
602 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
609 * Magazine caches for IOVA ranges. For an introduction to magazines,
610 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
611 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
612 * For simplicity, we use a static magazine size and don't implement the
613 * dynamic size tuning described in the paper.
616 #define IOVA_MAG_SIZE 128
618 struct iova_magazine
{
620 unsigned long pfns
[IOVA_MAG_SIZE
];
623 struct iova_cpu_rcache
{
625 struct iova_magazine
*loaded
;
626 struct iova_magazine
*prev
;
629 static struct iova_magazine
*iova_magazine_alloc(gfp_t flags
)
631 return kzalloc(sizeof(struct iova_magazine
), flags
);
634 static void iova_magazine_free(struct iova_magazine
*mag
)
640 iova_magazine_free_pfns(struct iova_magazine
*mag
, struct iova_domain
*iovad
)
648 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
650 for (i
= 0 ; i
< mag
->size
; ++i
) {
651 struct iova
*iova
= private_find_iova(iovad
, mag
->pfns
[i
]);
654 private_free_iova(iovad
, iova
);
657 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
662 static bool iova_magazine_full(struct iova_magazine
*mag
)
664 return (mag
&& mag
->size
== IOVA_MAG_SIZE
);
667 static bool iova_magazine_empty(struct iova_magazine
*mag
)
669 return (!mag
|| mag
->size
== 0);
672 static unsigned long iova_magazine_pop(struct iova_magazine
*mag
,
673 unsigned long limit_pfn
)
675 BUG_ON(iova_magazine_empty(mag
));
677 if (mag
->pfns
[mag
->size
- 1] >= limit_pfn
)
680 return mag
->pfns
[--mag
->size
];
683 static void iova_magazine_push(struct iova_magazine
*mag
, unsigned long pfn
)
685 BUG_ON(iova_magazine_full(mag
));
687 mag
->pfns
[mag
->size
++] = pfn
;
690 static void init_iova_rcaches(struct iova_domain
*iovad
)
692 struct iova_cpu_rcache
*cpu_rcache
;
693 struct iova_rcache
*rcache
;
697 for (i
= 0; i
< IOVA_RANGE_CACHE_MAX_SIZE
; ++i
) {
698 rcache
= &iovad
->rcaches
[i
];
699 spin_lock_init(&rcache
->lock
);
700 rcache
->depot_size
= 0;
701 rcache
->cpu_rcaches
= __alloc_percpu(sizeof(*cpu_rcache
), cache_line_size());
702 if (WARN_ON(!rcache
->cpu_rcaches
))
704 for_each_possible_cpu(cpu
) {
705 cpu_rcache
= per_cpu_ptr(rcache
->cpu_rcaches
, cpu
);
706 spin_lock_init(&cpu_rcache
->lock
);
707 cpu_rcache
->loaded
= iova_magazine_alloc(GFP_KERNEL
);
708 cpu_rcache
->prev
= iova_magazine_alloc(GFP_KERNEL
);
714 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
715 * return true on success. Can fail if rcache is full and we can't free
716 * space, and free_iova() (our only caller) will then return the IOVA
717 * range to the rbtree instead.
719 static bool __iova_rcache_insert(struct iova_domain
*iovad
,
720 struct iova_rcache
*rcache
,
721 unsigned long iova_pfn
)
723 struct iova_magazine
*mag_to_free
= NULL
;
724 struct iova_cpu_rcache
*cpu_rcache
;
725 bool can_insert
= false;
728 cpu_rcache
= raw_cpu_ptr(rcache
->cpu_rcaches
);
729 spin_lock_irqsave(&cpu_rcache
->lock
, flags
);
731 if (!iova_magazine_full(cpu_rcache
->loaded
)) {
733 } else if (!iova_magazine_full(cpu_rcache
->prev
)) {
734 swap(cpu_rcache
->prev
, cpu_rcache
->loaded
);
737 struct iova_magazine
*new_mag
= iova_magazine_alloc(GFP_ATOMIC
);
740 spin_lock(&rcache
->lock
);
741 if (rcache
->depot_size
< MAX_GLOBAL_MAGS
) {
742 rcache
->depot
[rcache
->depot_size
++] =
745 mag_to_free
= cpu_rcache
->loaded
;
747 spin_unlock(&rcache
->lock
);
749 cpu_rcache
->loaded
= new_mag
;
755 iova_magazine_push(cpu_rcache
->loaded
, iova_pfn
);
757 spin_unlock_irqrestore(&cpu_rcache
->lock
, flags
);
760 iova_magazine_free_pfns(mag_to_free
, iovad
);
761 iova_magazine_free(mag_to_free
);
767 static bool iova_rcache_insert(struct iova_domain
*iovad
, unsigned long pfn
,
770 unsigned int log_size
= order_base_2(size
);
772 if (log_size
>= IOVA_RANGE_CACHE_MAX_SIZE
)
775 return __iova_rcache_insert(iovad
, &iovad
->rcaches
[log_size
], pfn
);
779 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
780 * satisfy the request, return a matching non-NULL range and remove
781 * it from the 'rcache'.
783 static unsigned long __iova_rcache_get(struct iova_rcache
*rcache
,
784 unsigned long limit_pfn
)
786 struct iova_cpu_rcache
*cpu_rcache
;
787 unsigned long iova_pfn
= 0;
788 bool has_pfn
= false;
791 cpu_rcache
= raw_cpu_ptr(rcache
->cpu_rcaches
);
792 spin_lock_irqsave(&cpu_rcache
->lock
, flags
);
794 if (!iova_magazine_empty(cpu_rcache
->loaded
)) {
796 } else if (!iova_magazine_empty(cpu_rcache
->prev
)) {
797 swap(cpu_rcache
->prev
, cpu_rcache
->loaded
);
800 spin_lock(&rcache
->lock
);
801 if (rcache
->depot_size
> 0) {
802 iova_magazine_free(cpu_rcache
->loaded
);
803 cpu_rcache
->loaded
= rcache
->depot
[--rcache
->depot_size
];
806 spin_unlock(&rcache
->lock
);
810 iova_pfn
= iova_magazine_pop(cpu_rcache
->loaded
, limit_pfn
);
812 spin_unlock_irqrestore(&cpu_rcache
->lock
, flags
);
818 * Try to satisfy IOVA allocation range from rcache. Fail if requested
819 * size is too big or the DMA limit we are given isn't satisfied by the
820 * top element in the magazine.
822 static unsigned long iova_rcache_get(struct iova_domain
*iovad
,
824 unsigned long limit_pfn
)
826 unsigned int log_size
= order_base_2(size
);
828 if (log_size
>= IOVA_RANGE_CACHE_MAX_SIZE
)
831 return __iova_rcache_get(&iovad
->rcaches
[log_size
], limit_pfn
);
835 * Free a cpu's rcache.
837 static void free_cpu_iova_rcache(unsigned int cpu
, struct iova_domain
*iovad
,
838 struct iova_rcache
*rcache
)
840 struct iova_cpu_rcache
*cpu_rcache
= per_cpu_ptr(rcache
->cpu_rcaches
, cpu
);
843 spin_lock_irqsave(&cpu_rcache
->lock
, flags
);
845 iova_magazine_free_pfns(cpu_rcache
->loaded
, iovad
);
846 iova_magazine_free(cpu_rcache
->loaded
);
848 iova_magazine_free_pfns(cpu_rcache
->prev
, iovad
);
849 iova_magazine_free(cpu_rcache
->prev
);
851 spin_unlock_irqrestore(&cpu_rcache
->lock
, flags
);
855 * free rcache data structures.
857 static void free_iova_rcaches(struct iova_domain
*iovad
)
859 struct iova_rcache
*rcache
;
864 for (i
= 0; i
< IOVA_RANGE_CACHE_MAX_SIZE
; ++i
) {
865 rcache
= &iovad
->rcaches
[i
];
866 for_each_possible_cpu(cpu
)
867 free_cpu_iova_rcache(cpu
, iovad
, rcache
);
868 spin_lock_irqsave(&rcache
->lock
, flags
);
869 free_percpu(rcache
->cpu_rcaches
);
870 for (j
= 0; j
< rcache
->depot_size
; ++j
) {
871 iova_magazine_free_pfns(rcache
->depot
[j
], iovad
);
872 iova_magazine_free(rcache
->depot
[j
]);
874 spin_unlock_irqrestore(&rcache
->lock
, flags
);
879 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
881 void free_cpu_cached_iovas(unsigned int cpu
, struct iova_domain
*iovad
)
883 struct iova_cpu_rcache
*cpu_rcache
;
884 struct iova_rcache
*rcache
;
888 for (i
= 0; i
< IOVA_RANGE_CACHE_MAX_SIZE
; ++i
) {
889 rcache
= &iovad
->rcaches
[i
];
890 cpu_rcache
= per_cpu_ptr(rcache
->cpu_rcaches
, cpu
);
891 spin_lock_irqsave(&cpu_rcache
->lock
, flags
);
892 iova_magazine_free_pfns(cpu_rcache
->loaded
, iovad
);
893 iova_magazine_free_pfns(cpu_rcache
->prev
, iovad
);
894 spin_unlock_irqrestore(&cpu_rcache
->lock
, flags
);
898 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
899 MODULE_LICENSE("GPL");