sparc: Break up monolithic iommu table/lock into finer graularity pools and lock
[deliverable/linux.git] / lib / iommu-common.c
1 /*
2 * IOMMU mmap management and range allocation functions.
3 * Based almost entirely upon the powerpc iommu allocator.
4 */
5
6 #include <linux/export.h>
7 #include <linux/bitmap.h>
8 #include <linux/bug.h>
9 #include <linux/iommu-helper.h>
10 #include <linux/iommu-common.h>
11 #include <linux/dma-mapping.h>
12
13 #define IOMMU_LARGE_ALLOC 15
14
15 /*
16 * Initialize iommu_pool entries for the iommu_table. `num_entries'
17 * is the number of table entries. If `large_pool' is set to true,
18 * the top 1/4 of the table will be set aside for pool allocations
19 * of more than IOMMU_LARGE_ALLOC pages.
20 */
21 extern void iommu_tbl_pool_init(struct iommu_table *iommu,
22 unsigned long num_entries,
23 u32 page_table_shift,
24 const struct iommu_tbl_ops *iommu_tbl_ops,
25 bool large_pool, u32 npools)
26 {
27 unsigned int start, i;
28 struct iommu_pool *p = &(iommu->large_pool);
29
30 if (npools == 0)
31 iommu->nr_pools = IOMMU_NR_POOLS;
32 else
33 iommu->nr_pools = npools;
34 BUG_ON(npools > IOMMU_NR_POOLS);
35
36 iommu->page_table_shift = page_table_shift;
37 iommu->iommu_tbl_ops = iommu_tbl_ops;
38 start = 0;
39 if (large_pool)
40 iommu->flags |= IOMMU_HAS_LARGE_POOL;
41
42 if (!large_pool)
43 iommu->poolsize = num_entries/iommu->nr_pools;
44 else
45 iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
46 for (i = 0; i < iommu->nr_pools; i++) {
47 spin_lock_init(&(iommu->arena_pool[i].lock));
48 iommu->arena_pool[i].start = start;
49 iommu->arena_pool[i].hint = start;
50 start += iommu->poolsize; /* start for next pool */
51 iommu->arena_pool[i].end = start - 1;
52 }
53 if (!large_pool)
54 return;
55 /* initialize large_pool */
56 spin_lock_init(&(p->lock));
57 p->start = start;
58 p->hint = p->start;
59 p->end = num_entries;
60 }
61 EXPORT_SYMBOL(iommu_tbl_pool_init);
62
63 unsigned long iommu_tbl_range_alloc(struct device *dev,
64 struct iommu_table *iommu,
65 unsigned long npages,
66 unsigned long *handle,
67 unsigned int pool_hash)
68 {
69 unsigned long n, end, start, limit, boundary_size;
70 struct iommu_pool *arena;
71 int pass = 0;
72 unsigned int pool_nr;
73 unsigned int npools = iommu->nr_pools;
74 unsigned long flags;
75 bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
76 bool largealloc = (large_pool && npages > IOMMU_LARGE_ALLOC);
77 unsigned long shift;
78
79 /* Sanity check */
80 if (unlikely(npages == 0)) {
81 printk_ratelimited("npages == 0\n");
82 return DMA_ERROR_CODE;
83 }
84
85 if (largealloc) {
86 arena = &(iommu->large_pool);
87 spin_lock_irqsave(&arena->lock, flags);
88 pool_nr = 0; /* to keep compiler happy */
89 } else {
90 /* pick out pool_nr */
91 pool_nr = pool_hash & (npools - 1);
92 arena = &(iommu->arena_pool[pool_nr]);
93
94 /* find first available unlocked pool */
95 while (!spin_trylock_irqsave(&(arena->lock), flags)) {
96 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
97 arena = &(iommu->arena_pool[pool_nr]);
98 }
99 }
100
101 again:
102 if (pass == 0 && handle && *handle &&
103 (*handle >= arena->start) && (*handle < arena->end))
104 start = *handle;
105 else
106 start = arena->hint;
107
108 limit = arena->end;
109
110 /* The case below can happen if we have a small segment appended
111 * to a large, or when the previous alloc was at the very end of
112 * the available space. If so, go back to the beginning and flush.
113 */
114 if (start >= limit) {
115 start = arena->start;
116 if (iommu->iommu_tbl_ops->reset != NULL)
117 iommu->iommu_tbl_ops->reset(iommu);
118 }
119
120 if (dev)
121 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
122 1 << iommu->page_table_shift);
123 else
124 boundary_size = ALIGN(1UL << 32, 1 << iommu->page_table_shift);
125
126 shift = iommu->page_table_map_base >> iommu->page_table_shift;
127 boundary_size = boundary_size >> iommu->page_table_shift;
128 /*
129 * if the iommu has a non-trivial cookie <-> index mapping, we set
130 * things up so that iommu_is_span_boundary() merely checks if the
131 * (index + npages) < num_tsb_entries
132 */
133 if (iommu->iommu_tbl_ops->cookie_to_index != NULL) {
134 shift = 0;
135 boundary_size = iommu->poolsize * iommu->nr_pools;
136 }
137 n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
138 boundary_size, 0);
139 if (n == -1) {
140 if (likely(pass == 0)) {
141 /* First failure, rescan from the beginning. */
142 arena->hint = arena->start;
143 if (iommu->iommu_tbl_ops->reset != NULL)
144 iommu->iommu_tbl_ops->reset(iommu);
145 pass++;
146 goto again;
147 } else if (!largealloc && pass <= iommu->nr_pools) {
148 spin_unlock(&(arena->lock));
149 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
150 arena = &(iommu->arena_pool[pool_nr]);
151 while (!spin_trylock(&(arena->lock))) {
152 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
153 arena = &(iommu->arena_pool[pool_nr]);
154 }
155 arena->hint = arena->start;
156 pass++;
157 goto again;
158 } else {
159 /* give up */
160 spin_unlock_irqrestore(&(arena->lock), flags);
161 return DMA_ERROR_CODE;
162 }
163 }
164
165 end = n + npages;
166
167 arena->hint = end;
168
169 /* Update handle for SG allocations */
170 if (handle)
171 *handle = end;
172 spin_unlock_irqrestore(&(arena->lock), flags);
173
174 return n;
175 }
176 EXPORT_SYMBOL(iommu_tbl_range_alloc);
177
178 static struct iommu_pool *get_pool(struct iommu_table *tbl,
179 unsigned long entry)
180 {
181 struct iommu_pool *p;
182 unsigned long largepool_start = tbl->large_pool.start;
183 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
184
185 /* The large pool is the last pool at the top of the table */
186 if (large_pool && entry >= largepool_start) {
187 p = &tbl->large_pool;
188 } else {
189 unsigned int pool_nr = entry / tbl->poolsize;
190
191 BUG_ON(pool_nr >= tbl->nr_pools);
192 p = &tbl->arena_pool[pool_nr];
193 }
194 return p;
195 }
196
197 void iommu_tbl_range_free(struct iommu_table *iommu, u64 dma_addr,
198 unsigned long npages, bool do_demap, void *demap_arg)
199 {
200 unsigned long entry;
201 struct iommu_pool *pool;
202 unsigned long flags;
203 unsigned long shift = iommu->page_table_shift;
204
205 if (iommu->iommu_tbl_ops->cookie_to_index != NULL) {
206 entry = (*iommu->iommu_tbl_ops->cookie_to_index)(dma_addr,
207 demap_arg);
208 } else {
209 entry = (dma_addr - iommu->page_table_map_base) >> shift;
210 }
211 pool = get_pool(iommu, entry);
212
213 spin_lock_irqsave(&(pool->lock), flags);
214 if (do_demap && iommu->iommu_tbl_ops->demap != NULL)
215 (*iommu->iommu_tbl_ops->demap)(demap_arg, entry, npages);
216
217 bitmap_clear(iommu->map, entry, npages);
218 spin_unlock_irqrestore(&(pool->lock), flags);
219 }
220 EXPORT_SYMBOL(iommu_tbl_range_free);
This page took 0.045951 seconds and 6 git commands to generate.