Commit | Line | Data |
---|---|---|
f8de50eb | 1 | /* |
a15a519e | 2 | * Copyright © 2006-2009, Intel Corporation. |
f8de50eb | 3 | * |
a15a519e DW |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
f8de50eb | 16 | * |
98bcef56 | 17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
f8de50eb KA |
18 | */ |
19 | ||
38717946 | 20 | #include <linux/iova.h> |
85b45456 RM |
21 | #include <linux/slab.h> |
22 | ||
23 | static struct kmem_cache *iommu_iova_cache; | |
24 | ||
25 | int iommu_iova_cache_init(void) | |
26 | { | |
27 | int ret = 0; | |
28 | ||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | |
30 | sizeof(struct iova), | |
31 | 0, | |
32 | SLAB_HWCACHE_ALIGN, | |
33 | NULL); | |
34 | if (!iommu_iova_cache) { | |
35 | pr_err("Couldn't create iova cache\n"); | |
36 | ret = -ENOMEM; | |
37 | } | |
38 | ||
39 | return ret; | |
40 | } | |
41 | ||
42 | void iommu_iova_cache_destroy(void) | |
43 | { | |
44 | kmem_cache_destroy(iommu_iova_cache); | |
45 | } | |
46 | ||
47 | struct iova *alloc_iova_mem(void) | |
48 | { | |
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | |
50 | } | |
51 | ||
52 | void free_iova_mem(struct iova *iova) | |
53 | { | |
54 | kmem_cache_free(iommu_iova_cache, iova); | |
55 | } | |
f8de50eb KA |
56 | |
57 | void | |
0fb5fe87 RM |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | |
f8de50eb | 60 | { |
0fb5fe87 RM |
61 | /* |
62 | * IOVA granularity will normally be equal to the smallest | |
63 | * supported IOMMU page size; both *must* be capable of | |
64 | * representing individual CPU pages exactly. | |
65 | */ | |
66 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); | |
67 | ||
f8de50eb KA |
68 | spin_lock_init(&iovad->iova_rbtree_lock); |
69 | iovad->rbroot = RB_ROOT; | |
70 | iovad->cached32_node = NULL; | |
0fb5fe87 | 71 | iovad->granule = granule; |
1b722500 | 72 | iovad->start_pfn = start_pfn; |
f661197e | 73 | iovad->dma_32bit_pfn = pfn_32bit; |
f8de50eb KA |
74 | } |
75 | ||
76 | static struct rb_node * | |
77 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | |
78 | { | |
f661197e | 79 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
f8de50eb KA |
80 | (iovad->cached32_node == NULL)) |
81 | return rb_last(&iovad->rbroot); | |
82 | else { | |
83 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); | |
84 | struct iova *curr_iova = | |
85 | container_of(iovad->cached32_node, struct iova, node); | |
86 | *limit_pfn = curr_iova->pfn_lo - 1; | |
87 | return prev_node; | |
88 | } | |
89 | } | |
90 | ||
91 | static void | |
92 | __cached_rbnode_insert_update(struct iova_domain *iovad, | |
93 | unsigned long limit_pfn, struct iova *new) | |
94 | { | |
f661197e | 95 | if (limit_pfn != iovad->dma_32bit_pfn) |
f8de50eb KA |
96 | return; |
97 | iovad->cached32_node = &new->node; | |
98 | } | |
99 | ||
100 | static void | |
101 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |
102 | { | |
103 | struct iova *cached_iova; | |
104 | struct rb_node *curr; | |
105 | ||
106 | if (!iovad->cached32_node) | |
107 | return; | |
108 | curr = iovad->cached32_node; | |
109 | cached_iova = container_of(curr, struct iova, node); | |
110 | ||
1c9fc3d1 CW |
111 | if (free->pfn_lo >= cached_iova->pfn_lo) { |
112 | struct rb_node *node = rb_next(&free->node); | |
113 | struct iova *iova = container_of(node, struct iova, node); | |
114 | ||
115 | /* only cache if it's below 32bit pfn */ | |
116 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) | |
117 | iovad->cached32_node = node; | |
118 | else | |
119 | iovad->cached32_node = NULL; | |
120 | } | |
f8de50eb KA |
121 | } |
122 | ||
8f6429c7 RM |
123 | /* |
124 | * Computes the padding size required, to make the start address | |
125 | * naturally aligned on the power-of-two order of its size | |
f76aec76 | 126 | */ |
8f6429c7 RM |
127 | static unsigned int |
128 | iova_get_pad_size(unsigned int size, unsigned int limit_pfn) | |
f76aec76 | 129 | { |
8f6429c7 | 130 | return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); |
f76aec76 KA |
131 | } |
132 | ||
ddf02886 | 133 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
134 | unsigned long size, unsigned long limit_pfn, | |
135 | struct iova *new, bool size_aligned) | |
f8de50eb | 136 | { |
ddf02886 | 137 | struct rb_node *prev, *curr = NULL; |
f8de50eb KA |
138 | unsigned long flags; |
139 | unsigned long saved_pfn; | |
f76aec76 | 140 | unsigned int pad_size = 0; |
f8de50eb KA |
141 | |
142 | /* Walk the tree backwards */ | |
143 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
144 | saved_pfn = limit_pfn; | |
145 | curr = __get_cached_rbnode(iovad, &limit_pfn); | |
ddf02886 | 146 | prev = curr; |
f8de50eb KA |
147 | while (curr) { |
148 | struct iova *curr_iova = container_of(curr, struct iova, node); | |
ddf02886 | 149 | |
f8de50eb KA |
150 | if (limit_pfn < curr_iova->pfn_lo) |
151 | goto move_left; | |
f76aec76 | 152 | else if (limit_pfn < curr_iova->pfn_hi) |
f8de50eb | 153 | goto adjust_limit_pfn; |
f76aec76 KA |
154 | else { |
155 | if (size_aligned) | |
156 | pad_size = iova_get_pad_size(size, limit_pfn); | |
157 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) | |
158 | break; /* found a free slot */ | |
159 | } | |
f8de50eb KA |
160 | adjust_limit_pfn: |
161 | limit_pfn = curr_iova->pfn_lo - 1; | |
162 | move_left: | |
ddf02886 | 163 | prev = curr; |
f8de50eb KA |
164 | curr = rb_prev(curr); |
165 | } | |
166 | ||
f76aec76 KA |
167 | if (!curr) { |
168 | if (size_aligned) | |
169 | pad_size = iova_get_pad_size(size, limit_pfn); | |
1b722500 | 170 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
f76aec76 KA |
171 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
172 | return -ENOMEM; | |
173 | } | |
f8de50eb | 174 | } |
f76aec76 KA |
175 | |
176 | /* pfn_lo will point to size aligned address if size_aligned is set */ | |
177 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | |
178 | new->pfn_hi = new->pfn_lo + size - 1; | |
f8de50eb | 179 | |
ddf02886 | 180 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
181 | /* Add new node and rebalance tree. */ | |
182 | { | |
a15a519e DW |
183 | struct rb_node **entry, *parent = NULL; |
184 | ||
185 | /* If we have 'prev', it's a valid place to start the | |
186 | insertion. Otherwise, start from the root. */ | |
187 | if (prev) | |
188 | entry = &prev; | |
189 | else | |
190 | entry = &iovad->rbroot.rb_node; | |
191 | ||
ddf02886 | 192 | /* Figure out where to put new node */ |
193 | while (*entry) { | |
194 | struct iova *this = container_of(*entry, | |
195 | struct iova, node); | |
196 | parent = *entry; | |
197 | ||
198 | if (new->pfn_lo < this->pfn_lo) | |
199 | entry = &((*entry)->rb_left); | |
200 | else if (new->pfn_lo > this->pfn_lo) | |
201 | entry = &((*entry)->rb_right); | |
202 | else | |
203 | BUG(); /* this should not happen */ | |
204 | } | |
205 | ||
206 | /* Add new node and rebalance tree. */ | |
207 | rb_link_node(&new->node, parent, entry); | |
208 | rb_insert_color(&new->node, &iovad->rbroot); | |
209 | } | |
210 | __cached_rbnode_insert_update(iovad, saved_pfn, new); | |
211 | ||
f8de50eb | 212 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
ddf02886 | 213 | |
214 | ||
f8de50eb KA |
215 | return 0; |
216 | } | |
217 | ||
218 | static void | |
219 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |
220 | { | |
221 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
222 | /* Figure out where to put new node */ | |
223 | while (*new) { | |
224 | struct iova *this = container_of(*new, struct iova, node); | |
733cac2a | 225 | |
f8de50eb KA |
226 | parent = *new; |
227 | ||
228 | if (iova->pfn_lo < this->pfn_lo) | |
229 | new = &((*new)->rb_left); | |
230 | else if (iova->pfn_lo > this->pfn_lo) | |
231 | new = &((*new)->rb_right); | |
232 | else | |
233 | BUG(); /* this should not happen */ | |
234 | } | |
235 | /* Add new node and rebalance tree. */ | |
236 | rb_link_node(&iova->node, parent, new); | |
237 | rb_insert_color(&iova->node, root); | |
238 | } | |
239 | ||
240 | /** | |
241 | * alloc_iova - allocates an iova | |
07db0409 MI |
242 | * @iovad: - iova domain in question |
243 | * @size: - size of page frames to allocate | |
244 | * @limit_pfn: - max limit address | |
245 | * @size_aligned: - set if size_aligned address range is required | |
1b722500 RM |
246 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
247 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned | |
f76aec76 KA |
248 | * flag is set then the allocated address iova->pfn_lo will be naturally |
249 | * aligned on roundup_power_of_two(size). | |
f8de50eb KA |
250 | */ |
251 | struct iova * | |
252 | alloc_iova(struct iova_domain *iovad, unsigned long size, | |
f76aec76 KA |
253 | unsigned long limit_pfn, |
254 | bool size_aligned) | |
f8de50eb | 255 | { |
f8de50eb KA |
256 | struct iova *new_iova; |
257 | int ret; | |
258 | ||
259 | new_iova = alloc_iova_mem(); | |
260 | if (!new_iova) | |
261 | return NULL; | |
262 | ||
ddf02886 | 263 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
264 | new_iova, size_aligned); | |
f8de50eb KA |
265 | |
266 | if (ret) { | |
f8de50eb KA |
267 | free_iova_mem(new_iova); |
268 | return NULL; | |
269 | } | |
270 | ||
f8de50eb KA |
271 | return new_iova; |
272 | } | |
273 | ||
274 | /** | |
275 | * find_iova - find's an iova for a given pfn | |
07db0409 MI |
276 | * @iovad: - iova domain in question. |
277 | * @pfn: - page frame number | |
f8de50eb KA |
278 | * This function finds and returns an iova belonging to the |
279 | * given doamin which matches the given pfn. | |
280 | */ | |
281 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |
282 | { | |
283 | unsigned long flags; | |
284 | struct rb_node *node; | |
285 | ||
286 | /* Take the lock so that no other thread is manipulating the rbtree */ | |
287 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
288 | node = iovad->rbroot.rb_node; | |
289 | while (node) { | |
290 | struct iova *iova = container_of(node, struct iova, node); | |
291 | ||
292 | /* If pfn falls within iova's range, return iova */ | |
293 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | |
294 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
295 | /* We are not holding the lock while this iova | |
296 | * is referenced by the caller as the same thread | |
297 | * which called this function also calls __free_iova() | |
07db0409 | 298 | * and it is by design that only one thread can possibly |
f8de50eb KA |
299 | * reference a particular iova and hence no conflict. |
300 | */ | |
301 | return iova; | |
302 | } | |
303 | ||
304 | if (pfn < iova->pfn_lo) | |
305 | node = node->rb_left; | |
306 | else if (pfn > iova->pfn_lo) | |
307 | node = node->rb_right; | |
308 | } | |
309 | ||
310 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
311 | return NULL; | |
312 | } | |
313 | ||
314 | /** | |
315 | * __free_iova - frees the given iova | |
316 | * @iovad: iova domain in question. | |
317 | * @iova: iova in question. | |
318 | * Frees the given iova belonging to the giving domain | |
319 | */ | |
320 | void | |
321 | __free_iova(struct iova_domain *iovad, struct iova *iova) | |
322 | { | |
323 | unsigned long flags; | |
324 | ||
325 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
326 | __cached_rbnode_delete_update(iovad, iova); | |
327 | rb_erase(&iova->node, &iovad->rbroot); | |
328 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
329 | free_iova_mem(iova); | |
330 | } | |
331 | ||
332 | /** | |
333 | * free_iova - finds and frees the iova for a given pfn | |
334 | * @iovad: - iova domain in question. | |
335 | * @pfn: - pfn that is allocated previously | |
336 | * This functions finds an iova for a given pfn and then | |
337 | * frees the iova from that domain. | |
338 | */ | |
339 | void | |
340 | free_iova(struct iova_domain *iovad, unsigned long pfn) | |
341 | { | |
342 | struct iova *iova = find_iova(iovad, pfn); | |
733cac2a | 343 | |
f8de50eb KA |
344 | if (iova) |
345 | __free_iova(iovad, iova); | |
346 | ||
347 | } | |
348 | ||
349 | /** | |
350 | * put_iova_domain - destroys the iova doamin | |
351 | * @iovad: - iova domain in question. | |
352 | * All the iova's in that domain are destroyed. | |
353 | */ | |
354 | void put_iova_domain(struct iova_domain *iovad) | |
355 | { | |
356 | struct rb_node *node; | |
357 | unsigned long flags; | |
358 | ||
359 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
360 | node = rb_first(&iovad->rbroot); | |
361 | while (node) { | |
362 | struct iova *iova = container_of(node, struct iova, node); | |
733cac2a | 363 | |
f8de50eb KA |
364 | rb_erase(node, &iovad->rbroot); |
365 | free_iova_mem(iova); | |
366 | node = rb_first(&iovad->rbroot); | |
367 | } | |
368 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
369 | } | |
370 | ||
371 | static int | |
372 | __is_range_overlap(struct rb_node *node, | |
373 | unsigned long pfn_lo, unsigned long pfn_hi) | |
374 | { | |
375 | struct iova *iova = container_of(node, struct iova, node); | |
376 | ||
377 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) | |
378 | return 1; | |
379 | return 0; | |
380 | } | |
381 | ||
75f05569 JL |
382 | static inline struct iova * |
383 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) | |
384 | { | |
385 | struct iova *iova; | |
386 | ||
387 | iova = alloc_iova_mem(); | |
388 | if (iova) { | |
389 | iova->pfn_lo = pfn_lo; | |
390 | iova->pfn_hi = pfn_hi; | |
391 | } | |
392 | ||
393 | return iova; | |
394 | } | |
395 | ||
f8de50eb KA |
396 | static struct iova * |
397 | __insert_new_range(struct iova_domain *iovad, | |
398 | unsigned long pfn_lo, unsigned long pfn_hi) | |
399 | { | |
400 | struct iova *iova; | |
401 | ||
75f05569 JL |
402 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
403 | if (iova) | |
404 | iova_insert_rbtree(&iovad->rbroot, iova); | |
f8de50eb | 405 | |
f8de50eb KA |
406 | return iova; |
407 | } | |
408 | ||
409 | static void | |
410 | __adjust_overlap_range(struct iova *iova, | |
411 | unsigned long *pfn_lo, unsigned long *pfn_hi) | |
412 | { | |
413 | if (*pfn_lo < iova->pfn_lo) | |
414 | iova->pfn_lo = *pfn_lo; | |
415 | if (*pfn_hi > iova->pfn_hi) | |
416 | *pfn_lo = iova->pfn_hi + 1; | |
417 | } | |
418 | ||
419 | /** | |
420 | * reserve_iova - reserves an iova in the given range | |
421 | * @iovad: - iova domain pointer | |
422 | * @pfn_lo: - lower page frame address | |
423 | * @pfn_hi:- higher pfn adderss | |
424 | * This function allocates reserves the address range from pfn_lo to pfn_hi so | |
425 | * that this address is not dished out as part of alloc_iova. | |
426 | */ | |
427 | struct iova * | |
428 | reserve_iova(struct iova_domain *iovad, | |
429 | unsigned long pfn_lo, unsigned long pfn_hi) | |
430 | { | |
431 | struct rb_node *node; | |
432 | unsigned long flags; | |
433 | struct iova *iova; | |
434 | unsigned int overlap = 0; | |
435 | ||
3d39cecc | 436 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
437 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
438 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | |
439 | iova = container_of(node, struct iova, node); | |
440 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); | |
441 | if ((pfn_lo >= iova->pfn_lo) && | |
442 | (pfn_hi <= iova->pfn_hi)) | |
443 | goto finish; | |
444 | overlap = 1; | |
445 | ||
446 | } else if (overlap) | |
447 | break; | |
448 | } | |
449 | ||
25985edc | 450 | /* We are here either because this is the first reserver node |
f8de50eb KA |
451 | * or need to insert remaining non overlap addr range |
452 | */ | |
453 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); | |
454 | finish: | |
455 | ||
3d39cecc | 456 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
f8de50eb KA |
457 | return iova; |
458 | } | |
459 | ||
460 | /** | |
461 | * copy_reserved_iova - copies the reserved between domains | |
462 | * @from: - source doamin from where to copy | |
463 | * @to: - destination domin where to copy | |
464 | * This function copies reserved iova's from one doamin to | |
465 | * other. | |
466 | */ | |
467 | void | |
468 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |
469 | { | |
470 | unsigned long flags; | |
471 | struct rb_node *node; | |
472 | ||
3d39cecc | 473 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
f8de50eb KA |
474 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
475 | struct iova *iova = container_of(node, struct iova, node); | |
476 | struct iova *new_iova; | |
733cac2a | 477 | |
f8de50eb KA |
478 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
479 | if (!new_iova) | |
480 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", | |
481 | iova->pfn_lo, iova->pfn_lo); | |
482 | } | |
3d39cecc | 483 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
f8de50eb | 484 | } |
75f05569 JL |
485 | |
486 | struct iova * | |
487 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | |
488 | unsigned long pfn_lo, unsigned long pfn_hi) | |
489 | { | |
490 | unsigned long flags; | |
491 | struct iova *prev = NULL, *next = NULL; | |
492 | ||
493 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | |
494 | if (iova->pfn_lo < pfn_lo) { | |
495 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); | |
496 | if (prev == NULL) | |
497 | goto error; | |
498 | } | |
499 | if (iova->pfn_hi > pfn_hi) { | |
500 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); | |
501 | if (next == NULL) | |
502 | goto error; | |
503 | } | |
504 | ||
505 | __cached_rbnode_delete_update(iovad, iova); | |
506 | rb_erase(&iova->node, &iovad->rbroot); | |
507 | ||
508 | if (prev) { | |
509 | iova_insert_rbtree(&iovad->rbroot, prev); | |
510 | iova->pfn_lo = pfn_lo; | |
511 | } | |
512 | if (next) { | |
513 | iova_insert_rbtree(&iovad->rbroot, next); | |
514 | iova->pfn_hi = pfn_hi; | |
515 | } | |
516 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
517 | ||
518 | return iova; | |
519 | ||
520 | error: | |
521 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | |
522 | if (prev) | |
523 | free_iova_mem(prev); | |
524 | return NULL; | |
525 | } |