]>
Commit | Line | Data |
---|---|---|
f8de50eb | 1 | /* |
a15a519e | 2 | * Copyright © 2006-2009, Intel Corporation. |
f8de50eb | 3 | * |
a15a519e DW |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
f8de50eb | 16 | * |
98bcef56 | 17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
f8de50eb KA |
18 | */ |
19 | ||
38717946 | 20 | #include <linux/iova.h> |
15bbdec3 | 21 | #include <linux/module.h> |
85b45456 RM |
22 | #include <linux/slab.h> |
23 | ||
f8de50eb | 24 | void |
0fb5fe87 RM |
25 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
26 | unsigned long start_pfn, unsigned long pfn_32bit) | |
f8de50eb | 27 | { |
0fb5fe87 RM |
28 | /* |
29 | * IOVA granularity will normally be equal to the smallest | |
30 | * supported IOMMU page size; both *must* be capable of | |
31 | * representing individual CPU pages exactly. | |
32 | */ | |
33 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); | |
34 | ||
f8de50eb KA |
35 | spin_lock_init(&iovad->iova_rbtree_lock); |
36 | iovad->rbroot = RB_ROOT; | |
37 | iovad->cached32_node = NULL; | |
0fb5fe87 | 38 | iovad->granule = granule; |
1b722500 | 39 | iovad->start_pfn = start_pfn; |
f661197e | 40 | iovad->dma_32bit_pfn = pfn_32bit; |
f8de50eb | 41 | } |
9b41760b | 42 | EXPORT_SYMBOL_GPL(init_iova_domain); |
f8de50eb KA |
43 | |
44 | static struct rb_node * | |
45 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | |
46 | { | |
f661197e | 47 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
f8de50eb KA |
48 | (iovad->cached32_node == NULL)) |
49 | return rb_last(&iovad->rbroot); | |
50 | else { | |
51 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); | |
52 | struct iova *curr_iova = | |
53 | container_of(iovad->cached32_node, struct iova, node); | |
54 | *limit_pfn = curr_iova->pfn_lo - 1; | |
55 | return prev_node; | |
56 | } | |
57 | } | |
58 | ||
59 | static void | |
60 | __cached_rbnode_insert_update(struct iova_domain *iovad, | |
61 | unsigned long limit_pfn, struct iova *new) | |
62 | { | |
f661197e | 63 | if (limit_pfn != iovad->dma_32bit_pfn) |
f8de50eb KA |
64 | return; |
65 | iovad->cached32_node = &new->node; | |
66 | } | |
67 | ||
68 | static void | |
69 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |
70 | { | |
71 | struct iova *cached_iova; | |
72 | struct rb_node *curr; | |
73 | ||
74 | if (!iovad->cached32_node) | |
75 | return; | |
76 | curr = iovad->cached32_node; | |
77 | cached_iova = container_of(curr, struct iova, node); | |
78 | ||
1c9fc3d1 CW |
79 | if (free->pfn_lo >= cached_iova->pfn_lo) { |
80 | struct rb_node *node = rb_next(&free->node); | |
81 | struct iova *iova = container_of(node, struct iova, node); | |
82 | ||
83 | /* only cache if it's below 32bit pfn */ | |
84 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) | |
85 | iovad->cached32_node = node; | |
86 | else | |
87 | iovad->cached32_node = NULL; | |
88 | } | |
f8de50eb KA |
89 | } |
90 | ||
8f6429c7 RM |
91 | /* |
92 | * Computes the padding size required, to make the start address | |
93 | * naturally aligned on the power-of-two order of its size | |
f76aec76 | 94 | */ |
8f6429c7 RM |
95 | static unsigned int |
96 | iova_get_pad_size(unsigned int size, unsigned int limit_pfn) | |
f76aec76 | 97 | { |
8f6429c7 | 98 | return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); |
f76aec76 KA |
99 | } |
100 | ||
ddf02886 | 101 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
102 | unsigned long size, unsigned long limit_pfn, | |
103 | struct iova *new, bool size_aligned) | |
f8de50eb | 104 | { |
ddf02886 | 105 | struct rb_node *prev, *curr = NULL; |
f8de50eb KA |
106 | unsigned long flags; |
107 | unsigned long saved_pfn; | |
f76aec76 | 108 | unsigned int pad_size = 0; |
f8de50eb KA |
109 | |
110 | /* Walk the tree backwards */ | |
111 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
112 | saved_pfn = limit_pfn; | |
113 | curr = __get_cached_rbnode(iovad, &limit_pfn); | |
ddf02886 | 114 | prev = curr; |
f8de50eb KA |
115 | while (curr) { |
116 | struct iova *curr_iova = container_of(curr, struct iova, node); | |
ddf02886 | 117 | |
f8de50eb KA |
118 | if (limit_pfn < curr_iova->pfn_lo) |
119 | goto move_left; | |
f76aec76 | 120 | else if (limit_pfn < curr_iova->pfn_hi) |
f8de50eb | 121 | goto adjust_limit_pfn; |
f76aec76 KA |
122 | else { |
123 | if (size_aligned) | |
124 | pad_size = iova_get_pad_size(size, limit_pfn); | |
125 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) | |
126 | break; /* found a free slot */ | |
127 | } | |
f8de50eb KA |
128 | adjust_limit_pfn: |
129 | limit_pfn = curr_iova->pfn_lo - 1; | |
130 | move_left: | |
ddf02886 | 131 | prev = curr; |
f8de50eb KA |
132 | curr = rb_prev(curr); |
133 | } | |
134 | ||
f76aec76 KA |
135 | if (!curr) { |
136 | if (size_aligned) | |
137 | pad_size = iova_get_pad_size(size, limit_pfn); | |
1b722500 | 138 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
f76aec76 KA |
139 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
140 | return -ENOMEM; | |
141 | } | |
f8de50eb | 142 | } |
f76aec76 KA |
143 | |
144 | /* pfn_lo will point to size aligned address if size_aligned is set */ | |
145 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | |
146 | new->pfn_hi = new->pfn_lo + size - 1; | |
f8de50eb | 147 | |
ddf02886 | 148 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
149 | /* Add new node and rebalance tree. */ | |
150 | { | |
a15a519e DW |
151 | struct rb_node **entry, *parent = NULL; |
152 | ||
153 | /* If we have 'prev', it's a valid place to start the | |
154 | insertion. Otherwise, start from the root. */ | |
155 | if (prev) | |
156 | entry = &prev; | |
157 | else | |
158 | entry = &iovad->rbroot.rb_node; | |
159 | ||
ddf02886 | 160 | /* Figure out where to put new node */ |
161 | while (*entry) { | |
162 | struct iova *this = container_of(*entry, | |
163 | struct iova, node); | |
164 | parent = *entry; | |
165 | ||
166 | if (new->pfn_lo < this->pfn_lo) | |
167 | entry = &((*entry)->rb_left); | |
168 | else if (new->pfn_lo > this->pfn_lo) | |
169 | entry = &((*entry)->rb_right); | |
170 | else | |
171 | BUG(); /* this should not happen */ | |
172 | } | |
173 | ||
174 | /* Add new node and rebalance tree. */ | |
175 | rb_link_node(&new->node, parent, entry); | |
176 | rb_insert_color(&new->node, &iovad->rbroot); | |
177 | } | |
178 | __cached_rbnode_insert_update(iovad, saved_pfn, new); | |
179 | ||
f8de50eb | 180 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
ddf02886 | 181 | |
182 | ||
f8de50eb KA |
183 | return 0; |
184 | } | |
185 | ||
186 | static void | |
187 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |
188 | { | |
189 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
190 | /* Figure out where to put new node */ | |
191 | while (*new) { | |
192 | struct iova *this = container_of(*new, struct iova, node); | |
733cac2a | 193 | |
f8de50eb KA |
194 | parent = *new; |
195 | ||
196 | if (iova->pfn_lo < this->pfn_lo) | |
197 | new = &((*new)->rb_left); | |
198 | else if (iova->pfn_lo > this->pfn_lo) | |
199 | new = &((*new)->rb_right); | |
200 | else | |
201 | BUG(); /* this should not happen */ | |
202 | } | |
203 | /* Add new node and rebalance tree. */ | |
204 | rb_link_node(&iova->node, parent, new); | |
205 | rb_insert_color(&iova->node, root); | |
206 | } | |
207 | ||
ae1ff3d6 SA |
208 | static struct kmem_cache *iova_cache; |
209 | static unsigned int iova_cache_users; | |
210 | static DEFINE_MUTEX(iova_cache_mutex); | |
211 | ||
212 | struct iova *alloc_iova_mem(void) | |
213 | { | |
214 | return kmem_cache_alloc(iova_cache, GFP_ATOMIC); | |
215 | } | |
216 | EXPORT_SYMBOL(alloc_iova_mem); | |
217 | ||
218 | void free_iova_mem(struct iova *iova) | |
219 | { | |
220 | kmem_cache_free(iova_cache, iova); | |
221 | } | |
222 | EXPORT_SYMBOL(free_iova_mem); | |
223 | ||
224 | int iova_cache_get(void) | |
225 | { | |
226 | mutex_lock(&iova_cache_mutex); | |
227 | if (!iova_cache_users) { | |
228 | iova_cache = kmem_cache_create( | |
229 | "iommu_iova", sizeof(struct iova), 0, | |
230 | SLAB_HWCACHE_ALIGN, NULL); | |
231 | if (!iova_cache) { | |
232 | mutex_unlock(&iova_cache_mutex); | |
233 | printk(KERN_ERR "Couldn't create iova cache\n"); | |
234 | return -ENOMEM; | |
235 | } | |
236 | } | |
237 | ||
238 | iova_cache_users++; | |
239 | mutex_unlock(&iova_cache_mutex); | |
240 | ||
241 | return 0; | |
242 | } | |
9b41760b | 243 | EXPORT_SYMBOL_GPL(iova_cache_get); |
ae1ff3d6 SA |
244 | |
245 | void iova_cache_put(void) | |
246 | { | |
247 | mutex_lock(&iova_cache_mutex); | |
248 | if (WARN_ON(!iova_cache_users)) { | |
249 | mutex_unlock(&iova_cache_mutex); | |
250 | return; | |
251 | } | |
252 | iova_cache_users--; | |
253 | if (!iova_cache_users) | |
254 | kmem_cache_destroy(iova_cache); | |
255 | mutex_unlock(&iova_cache_mutex); | |
256 | } | |
9b41760b | 257 | EXPORT_SYMBOL_GPL(iova_cache_put); |
ae1ff3d6 | 258 | |
f8de50eb KA |
259 | /** |
260 | * alloc_iova - allocates an iova | |
07db0409 MI |
261 | * @iovad: - iova domain in question |
262 | * @size: - size of page frames to allocate | |
263 | * @limit_pfn: - max limit address | |
264 | * @size_aligned: - set if size_aligned address range is required | |
1b722500 RM |
265 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
266 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned | |
f76aec76 KA |
267 | * flag is set then the allocated address iova->pfn_lo will be naturally |
268 | * aligned on roundup_power_of_two(size). | |
f8de50eb KA |
269 | */ |
270 | struct iova * | |
271 | alloc_iova(struct iova_domain *iovad, unsigned long size, | |
f76aec76 KA |
272 | unsigned long limit_pfn, |
273 | bool size_aligned) | |
f8de50eb | 274 | { |
f8de50eb KA |
275 | struct iova *new_iova; |
276 | int ret; | |
277 | ||
278 | new_iova = alloc_iova_mem(); | |
279 | if (!new_iova) | |
280 | return NULL; | |
281 | ||
ddf02886 | 282 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
283 | new_iova, size_aligned); | |
f8de50eb KA |
284 | |
285 | if (ret) { | |
f8de50eb KA |
286 | free_iova_mem(new_iova); |
287 | return NULL; | |
288 | } | |
289 | ||
f8de50eb KA |
290 | return new_iova; |
291 | } | |
9b41760b | 292 | EXPORT_SYMBOL_GPL(alloc_iova); |
f8de50eb KA |
293 | |
294 | /** | |
295 | * find_iova - find's an iova for a given pfn | |
07db0409 MI |
296 | * @iovad: - iova domain in question. |
297 | * @pfn: - page frame number | |
f8de50eb KA |
298 | * This function finds and returns an iova belonging to the |
299 | * given doamin which matches the given pfn. | |
300 | */ | |
301 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |
302 | { | |
303 | unsigned long flags; | |
304 | struct rb_node *node; | |
305 | ||
306 | /* Take the lock so that no other thread is manipulating the rbtree */ | |
307 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
308 | node = iovad->rbroot.rb_node; | |
309 | while (node) { | |
310 | struct iova *iova = container_of(node, struct iova, node); | |
311 | ||
312 | /* If pfn falls within iova's range, return iova */ | |
313 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | |
314 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
315 | /* We are not holding the lock while this iova | |
316 | * is referenced by the caller as the same thread | |
317 | * which called this function also calls __free_iova() | |
07db0409 | 318 | * and it is by design that only one thread can possibly |
f8de50eb KA |
319 | * reference a particular iova and hence no conflict. |
320 | */ | |
321 | return iova; | |
322 | } | |
323 | ||
324 | if (pfn < iova->pfn_lo) | |
325 | node = node->rb_left; | |
326 | else if (pfn > iova->pfn_lo) | |
327 | node = node->rb_right; | |
328 | } | |
329 | ||
330 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
331 | return NULL; | |
332 | } | |
9b41760b | 333 | EXPORT_SYMBOL_GPL(find_iova); |
f8de50eb KA |
334 | |
335 | /** | |
336 | * __free_iova - frees the given iova | |
337 | * @iovad: iova domain in question. | |
338 | * @iova: iova in question. | |
339 | * Frees the given iova belonging to the giving domain | |
340 | */ | |
341 | void | |
342 | __free_iova(struct iova_domain *iovad, struct iova *iova) | |
343 | { | |
344 | unsigned long flags; | |
345 | ||
346 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
347 | __cached_rbnode_delete_update(iovad, iova); | |
348 | rb_erase(&iova->node, &iovad->rbroot); | |
349 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
350 | free_iova_mem(iova); | |
351 | } | |
9b41760b | 352 | EXPORT_SYMBOL_GPL(__free_iova); |
f8de50eb KA |
353 | |
354 | /** | |
355 | * free_iova - finds and frees the iova for a given pfn | |
356 | * @iovad: - iova domain in question. | |
357 | * @pfn: - pfn that is allocated previously | |
358 | * This functions finds an iova for a given pfn and then | |
359 | * frees the iova from that domain. | |
360 | */ | |
361 | void | |
362 | free_iova(struct iova_domain *iovad, unsigned long pfn) | |
363 | { | |
364 | struct iova *iova = find_iova(iovad, pfn); | |
733cac2a | 365 | |
f8de50eb KA |
366 | if (iova) |
367 | __free_iova(iovad, iova); | |
368 | ||
369 | } | |
9b41760b | 370 | EXPORT_SYMBOL_GPL(free_iova); |
f8de50eb KA |
371 | |
372 | /** | |
373 | * put_iova_domain - destroys the iova doamin | |
374 | * @iovad: - iova domain in question. | |
375 | * All the iova's in that domain are destroyed. | |
376 | */ | |
377 | void put_iova_domain(struct iova_domain *iovad) | |
378 | { | |
379 | struct rb_node *node; | |
380 | unsigned long flags; | |
381 | ||
382 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
383 | node = rb_first(&iovad->rbroot); | |
384 | while (node) { | |
385 | struct iova *iova = container_of(node, struct iova, node); | |
733cac2a | 386 | |
f8de50eb KA |
387 | rb_erase(node, &iovad->rbroot); |
388 | free_iova_mem(iova); | |
389 | node = rb_first(&iovad->rbroot); | |
390 | } | |
391 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
392 | } | |
9b41760b | 393 | EXPORT_SYMBOL_GPL(put_iova_domain); |
f8de50eb KA |
394 | |
395 | static int | |
396 | __is_range_overlap(struct rb_node *node, | |
397 | unsigned long pfn_lo, unsigned long pfn_hi) | |
398 | { | |
399 | struct iova *iova = container_of(node, struct iova, node); | |
400 | ||
401 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) | |
402 | return 1; | |
403 | return 0; | |
404 | } | |
405 | ||
75f05569 JL |
406 | static inline struct iova * |
407 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) | |
408 | { | |
409 | struct iova *iova; | |
410 | ||
411 | iova = alloc_iova_mem(); | |
412 | if (iova) { | |
413 | iova->pfn_lo = pfn_lo; | |
414 | iova->pfn_hi = pfn_hi; | |
415 | } | |
416 | ||
417 | return iova; | |
418 | } | |
419 | ||
f8de50eb KA |
420 | static struct iova * |
421 | __insert_new_range(struct iova_domain *iovad, | |
422 | unsigned long pfn_lo, unsigned long pfn_hi) | |
423 | { | |
424 | struct iova *iova; | |
425 | ||
75f05569 JL |
426 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
427 | if (iova) | |
428 | iova_insert_rbtree(&iovad->rbroot, iova); | |
f8de50eb | 429 | |
f8de50eb KA |
430 | return iova; |
431 | } | |
432 | ||
433 | static void | |
434 | __adjust_overlap_range(struct iova *iova, | |
435 | unsigned long *pfn_lo, unsigned long *pfn_hi) | |
436 | { | |
437 | if (*pfn_lo < iova->pfn_lo) | |
438 | iova->pfn_lo = *pfn_lo; | |
439 | if (*pfn_hi > iova->pfn_hi) | |
440 | *pfn_lo = iova->pfn_hi + 1; | |
441 | } | |
442 | ||
443 | /** | |
444 | * reserve_iova - reserves an iova in the given range | |
445 | * @iovad: - iova domain pointer | |
446 | * @pfn_lo: - lower page frame address | |
447 | * @pfn_hi:- higher pfn adderss | |
448 | * This function allocates reserves the address range from pfn_lo to pfn_hi so | |
449 | * that this address is not dished out as part of alloc_iova. | |
450 | */ | |
451 | struct iova * | |
452 | reserve_iova(struct iova_domain *iovad, | |
453 | unsigned long pfn_lo, unsigned long pfn_hi) | |
454 | { | |
455 | struct rb_node *node; | |
456 | unsigned long flags; | |
457 | struct iova *iova; | |
458 | unsigned int overlap = 0; | |
459 | ||
3d39cecc | 460 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
461 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
462 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | |
463 | iova = container_of(node, struct iova, node); | |
464 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); | |
465 | if ((pfn_lo >= iova->pfn_lo) && | |
466 | (pfn_hi <= iova->pfn_hi)) | |
467 | goto finish; | |
468 | overlap = 1; | |
469 | ||
470 | } else if (overlap) | |
471 | break; | |
472 | } | |
473 | ||
25985edc | 474 | /* We are here either because this is the first reserver node |
f8de50eb KA |
475 | * or need to insert remaining non overlap addr range |
476 | */ | |
477 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); | |
478 | finish: | |
479 | ||
3d39cecc | 480 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
481 | return iova; |
482 | } | |
9b41760b | 483 | EXPORT_SYMBOL_GPL(reserve_iova); |
f8de50eb KA |
484 | |
485 | /** | |
486 | * copy_reserved_iova - copies the reserved between domains | |
487 | * @from: - source doamin from where to copy | |
488 | * @to: - destination domin where to copy | |
489 | * This function copies reserved iova's from one doamin to | |
490 | * other. | |
491 | */ | |
492 | void | |
493 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |
494 | { | |
495 | unsigned long flags; | |
496 | struct rb_node *node; | |
497 | ||
3d39cecc | 498 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
f8de50eb KA |
499 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
500 | struct iova *iova = container_of(node, struct iova, node); | |
501 | struct iova *new_iova; | |
733cac2a | 502 | |
f8de50eb KA |
503 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
504 | if (!new_iova) | |
505 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", | |
506 | iova->pfn_lo, iova->pfn_lo); | |
507 | } | |
3d39cecc | 508 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
f8de50eb | 509 | } |
9b41760b | 510 | EXPORT_SYMBOL_GPL(copy_reserved_iova); |
75f05569 JL |
511 | |
512 | struct iova * | |
513 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | |
514 | unsigned long pfn_lo, unsigned long pfn_hi) | |
515 | { | |
516 | unsigned long flags; | |
517 | struct iova *prev = NULL, *next = NULL; | |
518 | ||
519 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
520 | if (iova->pfn_lo < pfn_lo) { | |
521 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); | |
522 | if (prev == NULL) | |
523 | goto error; | |
524 | } | |
525 | if (iova->pfn_hi > pfn_hi) { | |
526 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); | |
527 | if (next == NULL) | |
528 | goto error; | |
529 | } | |
530 | ||
531 | __cached_rbnode_delete_update(iovad, iova); | |
532 | rb_erase(&iova->node, &iovad->rbroot); | |
533 | ||
534 | if (prev) { | |
535 | iova_insert_rbtree(&iovad->rbroot, prev); | |
536 | iova->pfn_lo = pfn_lo; | |
537 | } | |
538 | if (next) { | |
539 | iova_insert_rbtree(&iovad->rbroot, next); | |
540 | iova->pfn_hi = pfn_hi; | |
541 | } | |
542 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
543 | ||
544 | return iova; | |
545 | ||
546 | error: | |
547 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
548 | if (prev) | |
549 | free_iova_mem(prev); | |
550 | return NULL; | |
551 | } | |
15bbdec3 SA |
552 | |
553 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); | |
554 | MODULE_LICENSE("GPL"); |