Commit | Line | Data |
---|---|---|
15b244a8 AK |
1 | /* |
2 | * IOMMU helpers in MMU context. | |
3 | * | |
4 | * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <linux/sched.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/rculist.h> | |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/mutex.h> | |
18 | #include <asm/mmu_context.h> | |
19 | ||
20 | static DEFINE_MUTEX(mem_list_mutex); | |
21 | ||
22 | struct mm_iommu_table_group_mem_t { | |
23 | struct list_head next; | |
24 | struct rcu_head rcu; | |
25 | unsigned long used; | |
26 | atomic64_t mapped; | |
27 | u64 ua; /* userspace address */ | |
28 | u64 entries; /* number of entries in hpas[] */ | |
29 | u64 *hpas; /* vmalloc'ed */ | |
30 | }; | |
31 | ||
32 | static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, | |
33 | unsigned long npages, bool incr) | |
34 | { | |
35 | long ret = 0, locked, lock_limit; | |
36 | ||
37 | if (!npages) | |
38 | return 0; | |
39 | ||
40 | down_write(&mm->mmap_sem); | |
41 | ||
42 | if (incr) { | |
43 | locked = mm->locked_vm + npages; | |
44 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
45 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | |
46 | ret = -ENOMEM; | |
47 | else | |
48 | mm->locked_vm += npages; | |
49 | } else { | |
50 | if (WARN_ON_ONCE(npages > mm->locked_vm)) | |
51 | npages = mm->locked_vm; | |
52 | mm->locked_vm -= npages; | |
53 | } | |
54 | ||
55 | pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", | |
56 | current->pid, | |
57 | incr ? '+' : '-', | |
58 | npages << PAGE_SHIFT, | |
59 | mm->locked_vm << PAGE_SHIFT, | |
60 | rlimit(RLIMIT_MEMLOCK)); | |
61 | up_write(&mm->mmap_sem); | |
62 | ||
63 | return ret; | |
64 | } | |
65 | ||
66 | bool mm_iommu_preregistered(void) | |
67 | { | |
68 | if (!current || !current->mm) | |
69 | return false; | |
70 | ||
71 | return !list_empty(¤t->mm->context.iommu_group_mem_list); | |
72 | } | |
73 | EXPORT_SYMBOL_GPL(mm_iommu_preregistered); | |
74 | ||
75 | long mm_iommu_get(unsigned long ua, unsigned long entries, | |
76 | struct mm_iommu_table_group_mem_t **pmem) | |
77 | { | |
78 | struct mm_iommu_table_group_mem_t *mem; | |
79 | long i, j, ret = 0, locked_entries = 0; | |
80 | struct page *page = NULL; | |
81 | ||
82 | if (!current || !current->mm) | |
83 | return -ESRCH; /* process exited */ | |
84 | ||
85 | mutex_lock(&mem_list_mutex); | |
86 | ||
87 | list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, | |
88 | next) { | |
89 | if ((mem->ua == ua) && (mem->entries == entries)) { | |
90 | ++mem->used; | |
91 | *pmem = mem; | |
92 | goto unlock_exit; | |
93 | } | |
94 | ||
95 | /* Overlap? */ | |
96 | if ((mem->ua < (ua + (entries << PAGE_SHIFT))) && | |
97 | (ua < (mem->ua + | |
98 | (mem->entries << PAGE_SHIFT)))) { | |
99 | ret = -EINVAL; | |
100 | goto unlock_exit; | |
101 | } | |
102 | ||
103 | } | |
104 | ||
105 | ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); | |
106 | if (ret) | |
107 | goto unlock_exit; | |
108 | ||
109 | locked_entries = entries; | |
110 | ||
111 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); | |
112 | if (!mem) { | |
113 | ret = -ENOMEM; | |
114 | goto unlock_exit; | |
115 | } | |
116 | ||
117 | mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); | |
118 | if (!mem->hpas) { | |
119 | kfree(mem); | |
120 | ret = -ENOMEM; | |
121 | goto unlock_exit; | |
122 | } | |
123 | ||
124 | for (i = 0; i < entries; ++i) { | |
125 | if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), | |
126 | 1/* pages */, 1/* iswrite */, &page)) { | |
127 | for (j = 0; j < i; ++j) | |
128 | put_page(pfn_to_page( | |
129 | mem->hpas[j] >> PAGE_SHIFT)); | |
130 | vfree(mem->hpas); | |
131 | kfree(mem); | |
132 | ret = -EFAULT; | |
133 | goto unlock_exit; | |
134 | } | |
135 | ||
136 | mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; | |
137 | } | |
138 | ||
139 | atomic64_set(&mem->mapped, 1); | |
140 | mem->used = 1; | |
141 | mem->ua = ua; | |
142 | mem->entries = entries; | |
143 | *pmem = mem; | |
144 | ||
145 | list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); | |
146 | ||
147 | unlock_exit: | |
148 | if (locked_entries && ret) | |
149 | mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); | |
150 | ||
151 | mutex_unlock(&mem_list_mutex); | |
152 | ||
153 | return ret; | |
154 | } | |
155 | EXPORT_SYMBOL_GPL(mm_iommu_get); | |
156 | ||
157 | static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) | |
158 | { | |
159 | long i; | |
160 | struct page *page = NULL; | |
161 | ||
162 | for (i = 0; i < mem->entries; ++i) { | |
163 | if (!mem->hpas[i]) | |
164 | continue; | |
165 | ||
166 | page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT); | |
167 | if (!page) | |
168 | continue; | |
169 | ||
170 | put_page(page); | |
171 | mem->hpas[i] = 0; | |
172 | } | |
173 | } | |
174 | ||
175 | static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem) | |
176 | { | |
177 | ||
178 | mm_iommu_unpin(mem); | |
179 | vfree(mem->hpas); | |
180 | kfree(mem); | |
181 | } | |
182 | ||
183 | static void mm_iommu_free(struct rcu_head *head) | |
184 | { | |
185 | struct mm_iommu_table_group_mem_t *mem = container_of(head, | |
186 | struct mm_iommu_table_group_mem_t, rcu); | |
187 | ||
188 | mm_iommu_do_free(mem); | |
189 | } | |
190 | ||
191 | static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) | |
192 | { | |
193 | list_del_rcu(&mem->next); | |
194 | mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); | |
195 | call_rcu(&mem->rcu, mm_iommu_free); | |
196 | } | |
197 | ||
198 | long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) | |
199 | { | |
200 | long ret = 0; | |
201 | ||
202 | if (!current || !current->mm) | |
203 | return -ESRCH; /* process exited */ | |
204 | ||
205 | mutex_lock(&mem_list_mutex); | |
206 | ||
207 | if (mem->used == 0) { | |
208 | ret = -ENOENT; | |
209 | goto unlock_exit; | |
210 | } | |
211 | ||
212 | --mem->used; | |
213 | /* There are still users, exit */ | |
214 | if (mem->used) | |
215 | goto unlock_exit; | |
216 | ||
217 | /* Are there still mappings? */ | |
218 | if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) { | |
219 | ++mem->used; | |
220 | ret = -EBUSY; | |
221 | goto unlock_exit; | |
222 | } | |
223 | ||
224 | /* @mapped became 0 so now mappings are disabled, release the region */ | |
225 | mm_iommu_release(mem); | |
226 | ||
227 | unlock_exit: | |
228 | mutex_unlock(&mem_list_mutex); | |
229 | ||
230 | return ret; | |
231 | } | |
232 | EXPORT_SYMBOL_GPL(mm_iommu_put); | |
233 | ||
234 | struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | |
235 | unsigned long size) | |
236 | { | |
237 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; | |
238 | ||
239 | list_for_each_entry_rcu(mem, | |
240 | ¤t->mm->context.iommu_group_mem_list, | |
241 | next) { | |
242 | if ((mem->ua <= ua) && | |
243 | (ua + size <= mem->ua + | |
244 | (mem->entries << PAGE_SHIFT))) { | |
245 | ret = mem; | |
246 | break; | |
247 | } | |
248 | } | |
249 | ||
250 | return ret; | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(mm_iommu_lookup); | |
253 | ||
254 | struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, | |
255 | unsigned long entries) | |
256 | { | |
257 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; | |
258 | ||
259 | list_for_each_entry_rcu(mem, | |
260 | ¤t->mm->context.iommu_group_mem_list, | |
261 | next) { | |
262 | if ((mem->ua == ua) && (mem->entries == entries)) { | |
263 | ret = mem; | |
264 | break; | |
265 | } | |
266 | } | |
267 | ||
268 | return ret; | |
269 | } | |
270 | EXPORT_SYMBOL_GPL(mm_iommu_find); | |
271 | ||
272 | long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |
273 | unsigned long ua, unsigned long *hpa) | |
274 | { | |
275 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; | |
276 | u64 *va = &mem->hpas[entry]; | |
277 | ||
278 | if (entry >= mem->entries) | |
279 | return -EFAULT; | |
280 | ||
281 | *hpa = *va | (ua & ~PAGE_MASK); | |
282 | ||
283 | return 0; | |
284 | } | |
285 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); | |
286 | ||
287 | long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) | |
288 | { | |
289 | if (atomic64_inc_not_zero(&mem->mapped)) | |
290 | return 0; | |
291 | ||
292 | /* Last mm_iommu_put() has been called, no more mappings allowed() */ | |
293 | return -ENXIO; | |
294 | } | |
295 | EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc); | |
296 | ||
297 | void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) | |
298 | { | |
299 | atomic64_add_unless(&mem->mapped, -1, 1); | |
300 | } | |
301 | EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec); | |
302 | ||
303 | void mm_iommu_init(mm_context_t *ctx) | |
304 | { | |
305 | INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list); | |
306 | } | |
307 | ||
308 | void mm_iommu_cleanup(mm_context_t *ctx) | |
309 | { | |
310 | struct mm_iommu_table_group_mem_t *mem, *tmp; | |
311 | ||
312 | list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) { | |
313 | list_del_rcu(&mem->next); | |
314 | mm_iommu_do_free(mem); | |
315 | } | |
316 | } |