Commit | Line | Data |
---|---|---|
f8de50eb | 1 | /* |
a15a519e | 2 | * Copyright © 2006-2009, Intel Corporation. |
f8de50eb | 3 | * |
a15a519e DW |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
f8de50eb | 16 | * |
98bcef56 | 17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
f8de50eb KA |
18 | */ |
19 | ||
38717946 | 20 | #include <linux/iova.h> |
85b45456 RM |
21 | #include <linux/slab.h> |
22 | ||
f8de50eb | 23 | void |
0fb5fe87 RM |
24 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
25 | unsigned long start_pfn, unsigned long pfn_32bit) | |
f8de50eb | 26 | { |
0fb5fe87 RM |
27 | /* |
28 | * IOVA granularity will normally be equal to the smallest | |
29 | * supported IOMMU page size; both *must* be capable of | |
30 | * representing individual CPU pages exactly. | |
31 | */ | |
32 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); | |
33 | ||
f8de50eb KA |
34 | spin_lock_init(&iovad->iova_rbtree_lock); |
35 | iovad->rbroot = RB_ROOT; | |
36 | iovad->cached32_node = NULL; | |
0fb5fe87 | 37 | iovad->granule = granule; |
1b722500 | 38 | iovad->start_pfn = start_pfn; |
f661197e | 39 | iovad->dma_32bit_pfn = pfn_32bit; |
f8de50eb | 40 | } |
9b41760b | 41 | EXPORT_SYMBOL_GPL(init_iova_domain); |
f8de50eb KA |
42 | |
43 | static struct rb_node * | |
44 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | |
45 | { | |
f661197e | 46 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
f8de50eb KA |
47 | (iovad->cached32_node == NULL)) |
48 | return rb_last(&iovad->rbroot); | |
49 | else { | |
50 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); | |
51 | struct iova *curr_iova = | |
52 | container_of(iovad->cached32_node, struct iova, node); | |
53 | *limit_pfn = curr_iova->pfn_lo - 1; | |
54 | return prev_node; | |
55 | } | |
56 | } | |
57 | ||
58 | static void | |
59 | __cached_rbnode_insert_update(struct iova_domain *iovad, | |
60 | unsigned long limit_pfn, struct iova *new) | |
61 | { | |
f661197e | 62 | if (limit_pfn != iovad->dma_32bit_pfn) |
f8de50eb KA |
63 | return; |
64 | iovad->cached32_node = &new->node; | |
65 | } | |
66 | ||
67 | static void | |
68 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |
69 | { | |
70 | struct iova *cached_iova; | |
71 | struct rb_node *curr; | |
72 | ||
73 | if (!iovad->cached32_node) | |
74 | return; | |
75 | curr = iovad->cached32_node; | |
76 | cached_iova = container_of(curr, struct iova, node); | |
77 | ||
1c9fc3d1 CW |
78 | if (free->pfn_lo >= cached_iova->pfn_lo) { |
79 | struct rb_node *node = rb_next(&free->node); | |
80 | struct iova *iova = container_of(node, struct iova, node); | |
81 | ||
82 | /* only cache if it's below 32bit pfn */ | |
83 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) | |
84 | iovad->cached32_node = node; | |
85 | else | |
86 | iovad->cached32_node = NULL; | |
87 | } | |
f8de50eb KA |
88 | } |
89 | ||
8f6429c7 RM |
90 | /* |
91 | * Computes the padding size required, to make the start address | |
92 | * naturally aligned on the power-of-two order of its size | |
f76aec76 | 93 | */ |
8f6429c7 RM |
94 | static unsigned int |
95 | iova_get_pad_size(unsigned int size, unsigned int limit_pfn) | |
f76aec76 | 96 | { |
8f6429c7 | 97 | return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); |
f76aec76 KA |
98 | } |
99 | ||
ddf02886 | 100 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
101 | unsigned long size, unsigned long limit_pfn, | |
102 | struct iova *new, bool size_aligned) | |
f8de50eb | 103 | { |
ddf02886 | 104 | struct rb_node *prev, *curr = NULL; |
f8de50eb KA |
105 | unsigned long flags; |
106 | unsigned long saved_pfn; | |
f76aec76 | 107 | unsigned int pad_size = 0; |
f8de50eb KA |
108 | |
109 | /* Walk the tree backwards */ | |
110 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
111 | saved_pfn = limit_pfn; | |
112 | curr = __get_cached_rbnode(iovad, &limit_pfn); | |
ddf02886 | 113 | prev = curr; |
f8de50eb KA |
114 | while (curr) { |
115 | struct iova *curr_iova = container_of(curr, struct iova, node); | |
ddf02886 | 116 | |
f8de50eb KA |
117 | if (limit_pfn < curr_iova->pfn_lo) |
118 | goto move_left; | |
f76aec76 | 119 | else if (limit_pfn < curr_iova->pfn_hi) |
f8de50eb | 120 | goto adjust_limit_pfn; |
f76aec76 KA |
121 | else { |
122 | if (size_aligned) | |
123 | pad_size = iova_get_pad_size(size, limit_pfn); | |
124 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) | |
125 | break; /* found a free slot */ | |
126 | } | |
f8de50eb KA |
127 | adjust_limit_pfn: |
128 | limit_pfn = curr_iova->pfn_lo - 1; | |
129 | move_left: | |
ddf02886 | 130 | prev = curr; |
f8de50eb KA |
131 | curr = rb_prev(curr); |
132 | } | |
133 | ||
f76aec76 KA |
134 | if (!curr) { |
135 | if (size_aligned) | |
136 | pad_size = iova_get_pad_size(size, limit_pfn); | |
1b722500 | 137 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
f76aec76 KA |
138 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
139 | return -ENOMEM; | |
140 | } | |
f8de50eb | 141 | } |
f76aec76 KA |
142 | |
143 | /* pfn_lo will point to size aligned address if size_aligned is set */ | |
144 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | |
145 | new->pfn_hi = new->pfn_lo + size - 1; | |
f8de50eb | 146 | |
ddf02886 | 147 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
148 | /* Add new node and rebalance tree. */ | |
149 | { | |
a15a519e DW |
150 | struct rb_node **entry, *parent = NULL; |
151 | ||
152 | /* If we have 'prev', it's a valid place to start the | |
153 | insertion. Otherwise, start from the root. */ | |
154 | if (prev) | |
155 | entry = &prev; | |
156 | else | |
157 | entry = &iovad->rbroot.rb_node; | |
158 | ||
ddf02886 | 159 | /* Figure out where to put new node */ |
160 | while (*entry) { | |
161 | struct iova *this = container_of(*entry, | |
162 | struct iova, node); | |
163 | parent = *entry; | |
164 | ||
165 | if (new->pfn_lo < this->pfn_lo) | |
166 | entry = &((*entry)->rb_left); | |
167 | else if (new->pfn_lo > this->pfn_lo) | |
168 | entry = &((*entry)->rb_right); | |
169 | else | |
170 | BUG(); /* this should not happen */ | |
171 | } | |
172 | ||
173 | /* Add new node and rebalance tree. */ | |
174 | rb_link_node(&new->node, parent, entry); | |
175 | rb_insert_color(&new->node, &iovad->rbroot); | |
176 | } | |
177 | __cached_rbnode_insert_update(iovad, saved_pfn, new); | |
178 | ||
f8de50eb | 179 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
ddf02886 | 180 | |
181 | ||
f8de50eb KA |
182 | return 0; |
183 | } | |
184 | ||
185 | static void | |
186 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |
187 | { | |
188 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
189 | /* Figure out where to put new node */ | |
190 | while (*new) { | |
191 | struct iova *this = container_of(*new, struct iova, node); | |
733cac2a | 192 | |
f8de50eb KA |
193 | parent = *new; |
194 | ||
195 | if (iova->pfn_lo < this->pfn_lo) | |
196 | new = &((*new)->rb_left); | |
197 | else if (iova->pfn_lo > this->pfn_lo) | |
198 | new = &((*new)->rb_right); | |
199 | else | |
200 | BUG(); /* this should not happen */ | |
201 | } | |
202 | /* Add new node and rebalance tree. */ | |
203 | rb_link_node(&iova->node, parent, new); | |
204 | rb_insert_color(&iova->node, root); | |
205 | } | |
206 | ||
ae1ff3d6 SA |
207 | static struct kmem_cache *iova_cache; |
208 | static unsigned int iova_cache_users; | |
209 | static DEFINE_MUTEX(iova_cache_mutex); | |
210 | ||
211 | struct iova *alloc_iova_mem(void) | |
212 | { | |
213 | return kmem_cache_alloc(iova_cache, GFP_ATOMIC); | |
214 | } | |
215 | EXPORT_SYMBOL(alloc_iova_mem); | |
216 | ||
217 | void free_iova_mem(struct iova *iova) | |
218 | { | |
219 | kmem_cache_free(iova_cache, iova); | |
220 | } | |
221 | EXPORT_SYMBOL(free_iova_mem); | |
222 | ||
223 | int iova_cache_get(void) | |
224 | { | |
225 | mutex_lock(&iova_cache_mutex); | |
226 | if (!iova_cache_users) { | |
227 | iova_cache = kmem_cache_create( | |
228 | "iommu_iova", sizeof(struct iova), 0, | |
229 | SLAB_HWCACHE_ALIGN, NULL); | |
230 | if (!iova_cache) { | |
231 | mutex_unlock(&iova_cache_mutex); | |
232 | printk(KERN_ERR "Couldn't create iova cache\n"); | |
233 | return -ENOMEM; | |
234 | } | |
235 | } | |
236 | ||
237 | iova_cache_users++; | |
238 | mutex_unlock(&iova_cache_mutex); | |
239 | ||
240 | return 0; | |
241 | } | |
9b41760b | 242 | EXPORT_SYMBOL_GPL(iova_cache_get); |
ae1ff3d6 SA |
243 | |
244 | void iova_cache_put(void) | |
245 | { | |
246 | mutex_lock(&iova_cache_mutex); | |
247 | if (WARN_ON(!iova_cache_users)) { | |
248 | mutex_unlock(&iova_cache_mutex); | |
249 | return; | |
250 | } | |
251 | iova_cache_users--; | |
252 | if (!iova_cache_users) | |
253 | kmem_cache_destroy(iova_cache); | |
254 | mutex_unlock(&iova_cache_mutex); | |
255 | } | |
9b41760b | 256 | EXPORT_SYMBOL_GPL(iova_cache_put); |
ae1ff3d6 | 257 | |
f8de50eb KA |
258 | /** |
259 | * alloc_iova - allocates an iova | |
07db0409 MI |
260 | * @iovad: - iova domain in question |
261 | * @size: - size of page frames to allocate | |
262 | * @limit_pfn: - max limit address | |
263 | * @size_aligned: - set if size_aligned address range is required | |
1b722500 RM |
264 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
265 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned | |
f76aec76 KA |
266 | * flag is set then the allocated address iova->pfn_lo will be naturally |
267 | * aligned on roundup_power_of_two(size). | |
f8de50eb KA |
268 | */ |
269 | struct iova * | |
270 | alloc_iova(struct iova_domain *iovad, unsigned long size, | |
f76aec76 KA |
271 | unsigned long limit_pfn, |
272 | bool size_aligned) | |
f8de50eb | 273 | { |
f8de50eb KA |
274 | struct iova *new_iova; |
275 | int ret; | |
276 | ||
277 | new_iova = alloc_iova_mem(); | |
278 | if (!new_iova) | |
279 | return NULL; | |
280 | ||
ddf02886 | 281 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
282 | new_iova, size_aligned); | |
f8de50eb KA |
283 | |
284 | if (ret) { | |
f8de50eb KA |
285 | free_iova_mem(new_iova); |
286 | return NULL; | |
287 | } | |
288 | ||
f8de50eb KA |
289 | return new_iova; |
290 | } | |
9b41760b | 291 | EXPORT_SYMBOL_GPL(alloc_iova); |
f8de50eb KA |
292 | |
293 | /** | |
294 | * find_iova - find's an iova for a given pfn | |
07db0409 MI |
295 | * @iovad: - iova domain in question. |
296 | * @pfn: - page frame number | |
f8de50eb KA |
297 | * This function finds and returns an iova belonging to the |
298 | * given doamin which matches the given pfn. | |
299 | */ | |
300 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |
301 | { | |
302 | unsigned long flags; | |
303 | struct rb_node *node; | |
304 | ||
305 | /* Take the lock so that no other thread is manipulating the rbtree */ | |
306 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
307 | node = iovad->rbroot.rb_node; | |
308 | while (node) { | |
309 | struct iova *iova = container_of(node, struct iova, node); | |
310 | ||
311 | /* If pfn falls within iova's range, return iova */ | |
312 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | |
313 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
314 | /* We are not holding the lock while this iova | |
315 | * is referenced by the caller as the same thread | |
316 | * which called this function also calls __free_iova() | |
07db0409 | 317 | * and it is by design that only one thread can possibly |
f8de50eb KA |
318 | * reference a particular iova and hence no conflict. |
319 | */ | |
320 | return iova; | |
321 | } | |
322 | ||
323 | if (pfn < iova->pfn_lo) | |
324 | node = node->rb_left; | |
325 | else if (pfn > iova->pfn_lo) | |
326 | node = node->rb_right; | |
327 | } | |
328 | ||
329 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
330 | return NULL; | |
331 | } | |
9b41760b | 332 | EXPORT_SYMBOL_GPL(find_iova); |
f8de50eb KA |
333 | |
334 | /** | |
335 | * __free_iova - frees the given iova | |
336 | * @iovad: iova domain in question. | |
337 | * @iova: iova in question. | |
338 | * Frees the given iova belonging to the giving domain | |
339 | */ | |
340 | void | |
341 | __free_iova(struct iova_domain *iovad, struct iova *iova) | |
342 | { | |
343 | unsigned long flags; | |
344 | ||
345 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
346 | __cached_rbnode_delete_update(iovad, iova); | |
347 | rb_erase(&iova->node, &iovad->rbroot); | |
348 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
349 | free_iova_mem(iova); | |
350 | } | |
9b41760b | 351 | EXPORT_SYMBOL_GPL(__free_iova); |
f8de50eb KA |
352 | |
353 | /** | |
354 | * free_iova - finds and frees the iova for a given pfn | |
355 | * @iovad: - iova domain in question. | |
356 | * @pfn: - pfn that is allocated previously | |
357 | * This functions finds an iova for a given pfn and then | |
358 | * frees the iova from that domain. | |
359 | */ | |
360 | void | |
361 | free_iova(struct iova_domain *iovad, unsigned long pfn) | |
362 | { | |
363 | struct iova *iova = find_iova(iovad, pfn); | |
733cac2a | 364 | |
f8de50eb KA |
365 | if (iova) |
366 | __free_iova(iovad, iova); | |
367 | ||
368 | } | |
9b41760b | 369 | EXPORT_SYMBOL_GPL(free_iova); |
f8de50eb KA |
370 | |
371 | /** | |
372 | * put_iova_domain - destroys the iova doamin | |
373 | * @iovad: - iova domain in question. | |
374 | * All the iova's in that domain are destroyed. | |
375 | */ | |
376 | void put_iova_domain(struct iova_domain *iovad) | |
377 | { | |
378 | struct rb_node *node; | |
379 | unsigned long flags; | |
380 | ||
381 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
382 | node = rb_first(&iovad->rbroot); | |
383 | while (node) { | |
384 | struct iova *iova = container_of(node, struct iova, node); | |
733cac2a | 385 | |
f8de50eb KA |
386 | rb_erase(node, &iovad->rbroot); |
387 | free_iova_mem(iova); | |
388 | node = rb_first(&iovad->rbroot); | |
389 | } | |
390 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
391 | } | |
9b41760b | 392 | EXPORT_SYMBOL_GPL(put_iova_domain); |
f8de50eb KA |
393 | |
394 | static int | |
395 | __is_range_overlap(struct rb_node *node, | |
396 | unsigned long pfn_lo, unsigned long pfn_hi) | |
397 | { | |
398 | struct iova *iova = container_of(node, struct iova, node); | |
399 | ||
400 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) | |
401 | return 1; | |
402 | return 0; | |
403 | } | |
404 | ||
75f05569 JL |
405 | static inline struct iova * |
406 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) | |
407 | { | |
408 | struct iova *iova; | |
409 | ||
410 | iova = alloc_iova_mem(); | |
411 | if (iova) { | |
412 | iova->pfn_lo = pfn_lo; | |
413 | iova->pfn_hi = pfn_hi; | |
414 | } | |
415 | ||
416 | return iova; | |
417 | } | |
418 | ||
f8de50eb KA |
419 | static struct iova * |
420 | __insert_new_range(struct iova_domain *iovad, | |
421 | unsigned long pfn_lo, unsigned long pfn_hi) | |
422 | { | |
423 | struct iova *iova; | |
424 | ||
75f05569 JL |
425 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
426 | if (iova) | |
427 | iova_insert_rbtree(&iovad->rbroot, iova); | |
f8de50eb | 428 | |
f8de50eb KA |
429 | return iova; |
430 | } | |
431 | ||
432 | static void | |
433 | __adjust_overlap_range(struct iova *iova, | |
434 | unsigned long *pfn_lo, unsigned long *pfn_hi) | |
435 | { | |
436 | if (*pfn_lo < iova->pfn_lo) | |
437 | iova->pfn_lo = *pfn_lo; | |
438 | if (*pfn_hi > iova->pfn_hi) | |
439 | *pfn_lo = iova->pfn_hi + 1; | |
440 | } | |
441 | ||
442 | /** | |
443 | * reserve_iova - reserves an iova in the given range | |
444 | * @iovad: - iova domain pointer | |
445 | * @pfn_lo: - lower page frame address | |
446 | * @pfn_hi:- higher pfn adderss | |
447 | * This function allocates reserves the address range from pfn_lo to pfn_hi so | |
448 | * that this address is not dished out as part of alloc_iova. | |
449 | */ | |
450 | struct iova * | |
451 | reserve_iova(struct iova_domain *iovad, | |
452 | unsigned long pfn_lo, unsigned long pfn_hi) | |
453 | { | |
454 | struct rb_node *node; | |
455 | unsigned long flags; | |
456 | struct iova *iova; | |
457 | unsigned int overlap = 0; | |
458 | ||
3d39cecc | 459 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
460 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
461 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | |
462 | iova = container_of(node, struct iova, node); | |
463 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); | |
464 | if ((pfn_lo >= iova->pfn_lo) && | |
465 | (pfn_hi <= iova->pfn_hi)) | |
466 | goto finish; | |
467 | overlap = 1; | |
468 | ||
469 | } else if (overlap) | |
470 | break; | |
471 | } | |
472 | ||
25985edc | 473 | /* We are here either because this is the first reserver node |
f8de50eb KA |
474 | * or need to insert remaining non overlap addr range |
475 | */ | |
476 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); | |
477 | finish: | |
478 | ||
3d39cecc | 479 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
480 | return iova; |
481 | } | |
9b41760b | 482 | EXPORT_SYMBOL_GPL(reserve_iova); |
f8de50eb KA |
483 | |
484 | /** | |
485 | * copy_reserved_iova - copies the reserved between domains | |
486 | * @from: - source doamin from where to copy | |
487 | * @to: - destination domin where to copy | |
488 | * This function copies reserved iova's from one doamin to | |
489 | * other. | |
490 | */ | |
491 | void | |
492 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |
493 | { | |
494 | unsigned long flags; | |
495 | struct rb_node *node; | |
496 | ||
3d39cecc | 497 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
f8de50eb KA |
498 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
499 | struct iova *iova = container_of(node, struct iova, node); | |
500 | struct iova *new_iova; | |
733cac2a | 501 | |
f8de50eb KA |
502 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
503 | if (!new_iova) | |
504 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", | |
505 | iova->pfn_lo, iova->pfn_lo); | |
506 | } | |
3d39cecc | 507 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
f8de50eb | 508 | } |
9b41760b | 509 | EXPORT_SYMBOL_GPL(copy_reserved_iova); |
75f05569 JL |
510 | |
511 | struct iova * | |
512 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | |
513 | unsigned long pfn_lo, unsigned long pfn_hi) | |
514 | { | |
515 | unsigned long flags; | |
516 | struct iova *prev = NULL, *next = NULL; | |
517 | ||
518 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
519 | if (iova->pfn_lo < pfn_lo) { | |
520 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); | |
521 | if (prev == NULL) | |
522 | goto error; | |
523 | } | |
524 | if (iova->pfn_hi > pfn_hi) { | |
525 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); | |
526 | if (next == NULL) | |
527 | goto error; | |
528 | } | |
529 | ||
530 | __cached_rbnode_delete_update(iovad, iova); | |
531 | rb_erase(&iova->node, &iovad->rbroot); | |
532 | ||
533 | if (prev) { | |
534 | iova_insert_rbtree(&iovad->rbroot, prev); | |
535 | iova->pfn_lo = pfn_lo; | |
536 | } | |
537 | if (next) { | |
538 | iova_insert_rbtree(&iovad->rbroot, next); | |
539 | iova->pfn_hi = pfn_hi; | |
540 | } | |
541 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
542 | ||
543 | return iova; | |
544 | ||
545 | error: | |
546 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
547 | if (prev) | |
548 | free_iova_mem(prev); | |
549 | return NULL; | |
550 | } |