2 * IOMMU mmap management and range allocation functions.
3 * Based almost entirely upon the powerpc iommu allocator.
6 #include <linux/export.h>
7 #include <linux/bitmap.h>
9 #include <linux/iommu-helper.h>
10 #include <linux/iommu-common.h>
11 #include <linux/dma-mapping.h>
13 #ifndef DMA_ERROR_CODE
14 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
17 #define IOMMU_LARGE_ALLOC 15
20 * Initialize iommu_pool entries for the iommu_table. `num_entries'
21 * is the number of table entries. If `large_pool' is set to true,
22 * the top 1/4 of the table will be set aside for pool allocations
23 * of more than IOMMU_LARGE_ALLOC pages.
25 extern void iommu_tbl_pool_init(struct iommu_table
*iommu
,
26 unsigned long num_entries
,
28 const struct iommu_tbl_ops
*iommu_tbl_ops
,
29 bool large_pool
, u32 npools
)
31 unsigned int start
, i
;
32 struct iommu_pool
*p
= &(iommu
->large_pool
);
35 iommu
->nr_pools
= IOMMU_NR_POOLS
;
37 iommu
->nr_pools
= npools
;
38 BUG_ON(npools
> IOMMU_NR_POOLS
);
40 iommu
->page_table_shift
= page_table_shift
;
41 iommu
->iommu_tbl_ops
= iommu_tbl_ops
;
44 iommu
->flags
|= IOMMU_HAS_LARGE_POOL
;
47 iommu
->poolsize
= num_entries
/iommu
->nr_pools
;
49 iommu
->poolsize
= (num_entries
* 3 / 4)/iommu
->nr_pools
;
50 for (i
= 0; i
< iommu
->nr_pools
; i
++) {
51 spin_lock_init(&(iommu
->arena_pool
[i
].lock
));
52 iommu
->arena_pool
[i
].start
= start
;
53 iommu
->arena_pool
[i
].hint
= start
;
54 start
+= iommu
->poolsize
; /* start for next pool */
55 iommu
->arena_pool
[i
].end
= start
- 1;
59 /* initialize large_pool */
60 spin_lock_init(&(p
->lock
));
65 EXPORT_SYMBOL(iommu_tbl_pool_init
);
67 unsigned long iommu_tbl_range_alloc(struct device
*dev
,
68 struct iommu_table
*iommu
,
70 unsigned long *handle
,
71 unsigned int pool_hash
)
73 unsigned long n
, end
, start
, limit
, boundary_size
;
74 struct iommu_pool
*arena
;
77 unsigned int npools
= iommu
->nr_pools
;
79 bool large_pool
= ((iommu
->flags
& IOMMU_HAS_LARGE_POOL
) != 0);
80 bool largealloc
= (large_pool
&& npages
> IOMMU_LARGE_ALLOC
);
84 if (unlikely(npages
== 0)) {
85 printk_ratelimited("npages == 0\n");
86 return DMA_ERROR_CODE
;
90 arena
= &(iommu
->large_pool
);
91 spin_lock_irqsave(&arena
->lock
, flags
);
92 pool_nr
= 0; /* to keep compiler happy */
94 /* pick out pool_nr */
95 pool_nr
= pool_hash
& (npools
- 1);
96 arena
= &(iommu
->arena_pool
[pool_nr
]);
98 /* find first available unlocked pool */
99 while (!spin_trylock_irqsave(&(arena
->lock
), flags
)) {
100 pool_nr
= (pool_nr
+ 1) & (iommu
->nr_pools
- 1);
101 arena
= &(iommu
->arena_pool
[pool_nr
]);
106 if (pass
== 0 && handle
&& *handle
&&
107 (*handle
>= arena
->start
) && (*handle
< arena
->end
))
114 /* The case below can happen if we have a small segment appended
115 * to a large, or when the previous alloc was at the very end of
116 * the available space. If so, go back to the beginning and flush.
118 if (start
>= limit
) {
119 start
= arena
->start
;
120 if (iommu
->iommu_tbl_ops
->reset
!= NULL
)
121 iommu
->iommu_tbl_ops
->reset(iommu
);
125 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
126 1 << iommu
->page_table_shift
);
128 boundary_size
= ALIGN(1ULL << 32, 1 << iommu
->page_table_shift
);
130 shift
= iommu
->page_table_map_base
>> iommu
->page_table_shift
;
131 boundary_size
= boundary_size
>> iommu
->page_table_shift
;
133 * if the iommu has a non-trivial cookie <-> index mapping, we set
134 * things up so that iommu_is_span_boundary() merely checks if the
135 * (index + npages) < num_tsb_entries
137 if (iommu
->iommu_tbl_ops
->cookie_to_index
!= NULL
) {
139 boundary_size
= iommu
->poolsize
* iommu
->nr_pools
;
141 n
= iommu_area_alloc(iommu
->map
, limit
, start
, npages
, shift
,
144 if (likely(pass
== 0)) {
145 /* First failure, rescan from the beginning. */
146 arena
->hint
= arena
->start
;
147 if (iommu
->iommu_tbl_ops
->reset
!= NULL
)
148 iommu
->iommu_tbl_ops
->reset(iommu
);
151 } else if (!largealloc
&& pass
<= iommu
->nr_pools
) {
152 spin_unlock(&(arena
->lock
));
153 pool_nr
= (pool_nr
+ 1) & (iommu
->nr_pools
- 1);
154 arena
= &(iommu
->arena_pool
[pool_nr
]);
155 while (!spin_trylock(&(arena
->lock
))) {
156 pool_nr
= (pool_nr
+ 1) & (iommu
->nr_pools
- 1);
157 arena
= &(iommu
->arena_pool
[pool_nr
]);
159 arena
->hint
= arena
->start
;
164 spin_unlock_irqrestore(&(arena
->lock
), flags
);
165 return DMA_ERROR_CODE
;
173 /* Update handle for SG allocations */
176 spin_unlock_irqrestore(&(arena
->lock
), flags
);
180 EXPORT_SYMBOL(iommu_tbl_range_alloc
);
182 static struct iommu_pool
*get_pool(struct iommu_table
*tbl
,
185 struct iommu_pool
*p
;
186 unsigned long largepool_start
= tbl
->large_pool
.start
;
187 bool large_pool
= ((tbl
->flags
& IOMMU_HAS_LARGE_POOL
) != 0);
189 /* The large pool is the last pool at the top of the table */
190 if (large_pool
&& entry
>= largepool_start
) {
191 p
= &tbl
->large_pool
;
193 unsigned int pool_nr
= entry
/ tbl
->poolsize
;
195 BUG_ON(pool_nr
>= tbl
->nr_pools
);
196 p
= &tbl
->arena_pool
[pool_nr
];
201 void iommu_tbl_range_free(struct iommu_table
*iommu
, u64 dma_addr
,
202 unsigned long npages
, bool do_demap
, void *demap_arg
)
205 struct iommu_pool
*pool
;
207 unsigned long shift
= iommu
->page_table_shift
;
209 if (iommu
->iommu_tbl_ops
->cookie_to_index
!= NULL
) {
210 entry
= (*iommu
->iommu_tbl_ops
->cookie_to_index
)(dma_addr
,
213 entry
= (dma_addr
- iommu
->page_table_map_base
) >> shift
;
215 pool
= get_pool(iommu
, entry
);
217 spin_lock_irqsave(&(pool
->lock
), flags
);
218 if (do_demap
&& iommu
->iommu_tbl_ops
->demap
!= NULL
)
219 (*iommu
->iommu_tbl_ops
->demap
)(demap_arg
, entry
, npages
);
221 bitmap_clear(iommu
->map
, entry
, npages
);
222 spin_unlock_irqrestore(&(pool
->lock
), flags
);
224 EXPORT_SYMBOL(iommu_tbl_range_free
);
This page took 0.035907 seconds and 6 git commands to generate.