Commit | Line | Data |
---|---|---|
11980c2a RL |
1 | /* mm/ashmem.c |
2 | ** | |
3 | ** Anonymous Shared Memory Subsystem, ashmem | |
4 | ** | |
5 | ** Copyright (C) 2008 Google, Inc. | |
6 | ** | |
7 | ** Robert Love <rlove@google.com> | |
8 | ** | |
9 | ** This software is licensed under the terms of the GNU General Public | |
10 | ** License version 2, as published by the Free Software Foundation, and | |
11 | ** may be copied, distributed, and modified under those terms. | |
12 | ** | |
13 | ** This program is distributed in the hope that it will be useful, | |
14 | ** but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | ** GNU General Public License for more details. | |
17 | */ | |
18 | ||
19 | #include <linux/module.h> | |
20 | #include <linux/file.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/miscdevice.h> | |
23 | #include <linux/security.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/mman.h> | |
26 | #include <linux/uaccess.h> | |
27 | #include <linux/personality.h> | |
28 | #include <linux/bitops.h> | |
29 | #include <linux/mutex.h> | |
30 | #include <linux/shmem_fs.h> | |
31 | #include "ashmem.h" | |
32 | ||
33 | #define ASHMEM_NAME_PREFIX "dev/ashmem/" | |
34 | #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) | |
35 | #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) | |
36 | ||
37 | /* | |
38 | * ashmem_area - anonymous shared memory area | |
39 | * Lifecycle: From our parent file's open() until its release() | |
40 | * Locking: Protected by `ashmem_mutex' | |
41 | * Big Note: Mappings do NOT pin this structure; it dies on close() | |
42 | */ | |
43 | struct ashmem_area { | |
44 | char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ | |
45 | struct list_head unpinned_list; /* list of all ashmem areas */ | |
46 | struct file *file; /* the shmem-based backing file */ | |
47 | size_t size; /* size of the mapping, in bytes */ | |
48 | unsigned long prot_mask; /* allowed prot bits, as vm_flags */ | |
49 | }; | |
50 | ||
51 | /* | |
52 | * ashmem_range - represents an interval of unpinned (evictable) pages | |
53 | * Lifecycle: From unpin to pin | |
54 | * Locking: Protected by `ashmem_mutex' | |
55 | */ | |
56 | struct ashmem_range { | |
57 | struct list_head lru; /* entry in LRU list */ | |
58 | struct list_head unpinned; /* entry in its area's unpinned list */ | |
59 | struct ashmem_area *asma; /* associated area */ | |
60 | size_t pgstart; /* starting page, inclusive */ | |
61 | size_t pgend; /* ending page, inclusive */ | |
62 | unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ | |
63 | }; | |
64 | ||
65 | /* LRU list of unpinned pages, protected by ashmem_mutex */ | |
66 | static LIST_HEAD(ashmem_lru_list); | |
67 | ||
68 | /* Count of pages on our LRU list, protected by ashmem_mutex */ | |
69 | static unsigned long lru_count; | |
70 | ||
71 | /* | |
72 | * ashmem_mutex - protects the list of and each individual ashmem_area | |
73 | * | |
74 | * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem | |
75 | */ | |
76 | static DEFINE_MUTEX(ashmem_mutex); | |
77 | ||
78 | static struct kmem_cache *ashmem_area_cachep __read_mostly; | |
79 | static struct kmem_cache *ashmem_range_cachep __read_mostly; | |
80 | ||
81 | #define range_size(range) \ | |
82 | ((range)->pgend - (range)->pgstart + 1) | |
83 | ||
84 | #define range_on_lru(range) \ | |
85 | ((range)->purged == ASHMEM_NOT_PURGED) | |
86 | ||
87 | #define page_range_subsumes_range(range, start, end) \ | |
88 | (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) | |
89 | ||
90 | #define page_range_subsumed_by_range(range, start, end) \ | |
91 | (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) | |
92 | ||
93 | #define page_in_range(range, page) \ | |
94 | (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) | |
95 | ||
96 | #define page_range_in_range(range, start, end) \ | |
97 | (page_in_range(range, start) || page_in_range(range, end) || \ | |
98 | page_range_subsumes_range(range, start, end)) | |
99 | ||
100 | #define range_before_page(range, page) \ | |
101 | ((range)->pgend < (page)) | |
102 | ||
103 | #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) | |
104 | ||
105 | static inline void lru_add(struct ashmem_range *range) | |
106 | { | |
107 | list_add_tail(&range->lru, &ashmem_lru_list); | |
108 | lru_count += range_size(range); | |
109 | } | |
110 | ||
111 | static inline void lru_del(struct ashmem_range *range) | |
112 | { | |
113 | list_del(&range->lru); | |
114 | lru_count -= range_size(range); | |
115 | } | |
116 | ||
117 | /* | |
118 | * range_alloc - allocate and initialize a new ashmem_range structure | |
119 | * | |
120 | * 'asma' - associated ashmem_area | |
121 | * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list | |
122 | * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) | |
123 | * 'start' - starting page, inclusive | |
124 | * 'end' - ending page, inclusive | |
125 | * | |
126 | * Caller must hold ashmem_mutex. | |
127 | */ | |
128 | static int range_alloc(struct ashmem_area *asma, | |
129 | struct ashmem_range *prev_range, unsigned int purged, | |
130 | size_t start, size_t end) | |
131 | { | |
132 | struct ashmem_range *range; | |
133 | ||
134 | range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); | |
135 | if (unlikely(!range)) | |
136 | return -ENOMEM; | |
137 | ||
138 | range->asma = asma; | |
139 | range->pgstart = start; | |
140 | range->pgend = end; | |
141 | range->purged = purged; | |
142 | ||
143 | list_add_tail(&range->unpinned, &prev_range->unpinned); | |
144 | ||
145 | if (range_on_lru(range)) | |
146 | lru_add(range); | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
151 | static void range_del(struct ashmem_range *range) | |
152 | { | |
153 | list_del(&range->unpinned); | |
154 | if (range_on_lru(range)) | |
155 | lru_del(range); | |
156 | kmem_cache_free(ashmem_range_cachep, range); | |
157 | } | |
158 | ||
159 | /* | |
160 | * range_shrink - shrinks a range | |
161 | * | |
162 | * Caller must hold ashmem_mutex. | |
163 | */ | |
164 | static inline void range_shrink(struct ashmem_range *range, | |
165 | size_t start, size_t end) | |
166 | { | |
167 | size_t pre = range_size(range); | |
168 | ||
169 | range->pgstart = start; | |
170 | range->pgend = end; | |
171 | ||
172 | if (range_on_lru(range)) | |
173 | lru_count -= pre - range_size(range); | |
174 | } | |
175 | ||
176 | static int ashmem_open(struct inode *inode, struct file *file) | |
177 | { | |
178 | struct ashmem_area *asma; | |
179 | int ret; | |
180 | ||
5154b93b | 181 | ret = generic_file_open(inode, file); |
11980c2a RL |
182 | if (unlikely(ret)) |
183 | return ret; | |
184 | ||
185 | asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); | |
186 | if (unlikely(!asma)) | |
187 | return -ENOMEM; | |
188 | ||
189 | INIT_LIST_HEAD(&asma->unpinned_list); | |
190 | memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); | |
191 | asma->prot_mask = PROT_MASK; | |
192 | file->private_data = asma; | |
193 | ||
194 | return 0; | |
195 | } | |
196 | ||
197 | static int ashmem_release(struct inode *ignored, struct file *file) | |
198 | { | |
199 | struct ashmem_area *asma = file->private_data; | |
200 | struct ashmem_range *range, *next; | |
201 | ||
202 | mutex_lock(&ashmem_mutex); | |
203 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) | |
204 | range_del(range); | |
205 | mutex_unlock(&ashmem_mutex); | |
206 | ||
207 | if (asma->file) | |
208 | fput(asma->file); | |
209 | kmem_cache_free(ashmem_area_cachep, asma); | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
853ca7ae BB |
214 | static ssize_t ashmem_read(struct file *file, char __user *buf, |
215 | size_t len, loff_t *pos) | |
216 | { | |
217 | struct ashmem_area *asma = file->private_data; | |
218 | int ret = 0; | |
219 | ||
220 | mutex_lock(&ashmem_mutex); | |
221 | ||
222 | /* If size is not set, or set to 0, always return EOF. */ | |
223 | if (asma->size == 0) { | |
224 | goto out; | |
225 | } | |
226 | ||
227 | if (!asma->file) { | |
228 | ret = -EBADF; | |
229 | goto out; | |
230 | } | |
231 | ||
232 | ret = asma->file->f_op->read(asma->file, buf, len, pos); | |
5154b93b BB |
233 | if (ret < 0) { |
234 | goto out; | |
235 | } | |
236 | ||
237 | /** Update backing file pos, since f_ops->read() doesn't */ | |
238 | asma->file->f_pos = *pos; | |
239 | ||
240 | out: | |
241 | mutex_unlock(&ashmem_mutex); | |
242 | return ret; | |
243 | } | |
244 | ||
245 | static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) | |
246 | { | |
247 | struct ashmem_area *asma = file->private_data; | |
248 | int ret; | |
249 | ||
250 | mutex_lock(&ashmem_mutex); | |
251 | ||
252 | if (asma->size == 0) { | |
253 | ret = -EINVAL; | |
254 | goto out; | |
255 | } | |
256 | ||
257 | if (!asma->file) { | |
258 | ret = -EBADF; | |
259 | goto out; | |
260 | } | |
261 | ||
262 | ret = asma->file->f_op->llseek(asma->file, offset, origin); | |
263 | if (ret < 0) { | |
264 | goto out; | |
265 | } | |
266 | ||
267 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ | |
268 | file->f_pos = asma->file->f_pos; | |
853ca7ae BB |
269 | |
270 | out: | |
271 | mutex_unlock(&ashmem_mutex); | |
272 | return ret; | |
273 | } | |
274 | ||
56f76fc6 AH |
275 | static inline unsigned long |
276 | calc_vm_may_flags(unsigned long prot) | |
277 | { | |
278 | return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | | |
279 | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | | |
280 | _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); | |
281 | } | |
853ca7ae | 282 | |
11980c2a RL |
283 | static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) |
284 | { | |
285 | struct ashmem_area *asma = file->private_data; | |
286 | int ret = 0; | |
287 | ||
288 | mutex_lock(&ashmem_mutex); | |
289 | ||
290 | /* user needs to SET_SIZE before mapping */ | |
291 | if (unlikely(!asma->size)) { | |
292 | ret = -EINVAL; | |
293 | goto out; | |
294 | } | |
295 | ||
296 | /* requested protection bits must match our allowed protection mask */ | |
56f76fc6 AH |
297 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & |
298 | calc_vm_prot_bits(PROT_MASK))) { | |
11980c2a RL |
299 | ret = -EPERM; |
300 | goto out; | |
301 | } | |
56f76fc6 | 302 | vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); |
11980c2a RL |
303 | |
304 | if (!asma->file) { | |
305 | char *name = ASHMEM_NAME_DEF; | |
306 | struct file *vmfile; | |
307 | ||
308 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') | |
309 | name = asma->name; | |
310 | ||
311 | /* ... and allocate the backing shmem file */ | |
312 | vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); | |
313 | if (unlikely(IS_ERR(vmfile))) { | |
314 | ret = PTR_ERR(vmfile); | |
315 | goto out; | |
316 | } | |
317 | asma->file = vmfile; | |
318 | } | |
319 | get_file(asma->file); | |
320 | ||
321 | /* | |
322 | * XXX - Reworked to use shmem_zero_setup() instead of | |
323 | * shmem_set_file while we're in staging. -jstultz | |
324 | */ | |
325 | if (vma->vm_flags & VM_SHARED) { | |
326 | ret = shmem_zero_setup(vma); | |
327 | if (ret) { | |
328 | fput(asma->file); | |
329 | goto out; | |
330 | } | |
331 | } | |
332 | ||
333 | if (vma->vm_file) | |
334 | fput(vma->vm_file); | |
335 | vma->vm_file = asma->file; | |
336 | vma->vm_flags |= VM_CAN_NONLINEAR; | |
337 | ||
338 | out: | |
339 | mutex_unlock(&ashmem_mutex); | |
340 | return ret; | |
341 | } | |
342 | ||
343 | /* | |
344 | * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab | |
345 | * | |
346 | * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how | |
347 | * many objects (pages) we have in total. | |
348 | * | |
349 | * 'gfp_mask' is the mask of the allocation that got us into this mess. | |
350 | * | |
351 | * Return value is the number of objects (pages) remaining, or -1 if we cannot | |
352 | * proceed without risk of deadlock (due to gfp_mask). | |
353 | * | |
354 | * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial | |
355 | * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' | |
356 | * pages freed. | |
357 | */ | |
33e8fc46 | 358 | static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc) |
11980c2a RL |
359 | { |
360 | struct ashmem_range *range, *next; | |
361 | ||
362 | /* We might recurse into filesystem code, so bail out if necessary */ | |
33e8fc46 | 363 | if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) |
11980c2a | 364 | return -1; |
33e8fc46 | 365 | if (!sc->nr_to_scan) |
11980c2a RL |
366 | return lru_count; |
367 | ||
368 | mutex_lock(&ashmem_mutex); | |
369 | list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { | |
370 | struct inode *inode = range->asma->file->f_dentry->d_inode; | |
371 | loff_t start = range->pgstart * PAGE_SIZE; | |
372 | loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; | |
373 | ||
374 | vmtruncate_range(inode, start, end); | |
375 | range->purged = ASHMEM_WAS_PURGED; | |
376 | lru_del(range); | |
377 | ||
33e8fc46 CC |
378 | sc->nr_to_scan -= range_size(range); |
379 | if (sc->nr_to_scan <= 0) | |
11980c2a RL |
380 | break; |
381 | } | |
382 | mutex_unlock(&ashmem_mutex); | |
383 | ||
384 | return lru_count; | |
385 | } | |
386 | ||
387 | static struct shrinker ashmem_shrinker = { | |
388 | .shrink = ashmem_shrink, | |
389 | .seeks = DEFAULT_SEEKS * 4, | |
390 | }; | |
391 | ||
392 | static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) | |
393 | { | |
394 | int ret = 0; | |
395 | ||
396 | mutex_lock(&ashmem_mutex); | |
397 | ||
398 | /* the user can only remove, not add, protection bits */ | |
399 | if (unlikely((asma->prot_mask & prot) != prot)) { | |
400 | ret = -EINVAL; | |
401 | goto out; | |
402 | } | |
403 | ||
404 | /* does the application expect PROT_READ to imply PROT_EXEC? */ | |
405 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) | |
406 | prot |= PROT_EXEC; | |
407 | ||
408 | asma->prot_mask = prot; | |
409 | ||
410 | out: | |
411 | mutex_unlock(&ashmem_mutex); | |
412 | return ret; | |
413 | } | |
414 | ||
415 | static int set_name(struct ashmem_area *asma, void __user *name) | |
416 | { | |
417 | int ret = 0; | |
418 | ||
419 | mutex_lock(&ashmem_mutex); | |
420 | ||
421 | /* cannot change an existing mapping's name */ | |
422 | if (unlikely(asma->file)) { | |
423 | ret = -EINVAL; | |
424 | goto out; | |
425 | } | |
426 | ||
427 | if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, | |
428 | name, ASHMEM_NAME_LEN))) | |
429 | ret = -EFAULT; | |
430 | asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; | |
431 | ||
432 | out: | |
433 | mutex_unlock(&ashmem_mutex); | |
434 | ||
435 | return ret; | |
436 | } | |
437 | ||
438 | static int get_name(struct ashmem_area *asma, void __user *name) | |
439 | { | |
440 | int ret = 0; | |
441 | ||
442 | mutex_lock(&ashmem_mutex); | |
443 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { | |
444 | size_t len; | |
445 | ||
446 | /* | |
447 | * Copying only `len', instead of ASHMEM_NAME_LEN, bytes | |
448 | * prevents us from revealing one user's stack to another. | |
449 | */ | |
450 | len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; | |
451 | if (unlikely(copy_to_user(name, | |
452 | asma->name + ASHMEM_NAME_PREFIX_LEN, len))) | |
453 | ret = -EFAULT; | |
454 | } else { | |
455 | if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, | |
456 | sizeof(ASHMEM_NAME_DEF)))) | |
457 | ret = -EFAULT; | |
458 | } | |
459 | mutex_unlock(&ashmem_mutex); | |
460 | ||
461 | return ret; | |
462 | } | |
463 | ||
464 | /* | |
465 | * ashmem_pin - pin the given ashmem region, returning whether it was | |
466 | * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). | |
467 | * | |
468 | * Caller must hold ashmem_mutex. | |
469 | */ | |
470 | static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) | |
471 | { | |
472 | struct ashmem_range *range, *next; | |
473 | int ret = ASHMEM_NOT_PURGED; | |
474 | ||
475 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { | |
476 | /* moved past last applicable page; we can short circuit */ | |
477 | if (range_before_page(range, pgstart)) | |
478 | break; | |
479 | ||
480 | /* | |
481 | * The user can ask us to pin pages that span multiple ranges, | |
482 | * or to pin pages that aren't even unpinned, so this is messy. | |
483 | * | |
484 | * Four cases: | |
485 | * 1. The requested range subsumes an existing range, so we | |
486 | * just remove the entire matching range. | |
487 | * 2. The requested range overlaps the start of an existing | |
488 | * range, so we just update that range. | |
489 | * 3. The requested range overlaps the end of an existing | |
490 | * range, so we just update that range. | |
491 | * 4. The requested range punches a hole in an existing range, | |
492 | * so we have to update one side of the range and then | |
493 | * create a new range for the other side. | |
494 | */ | |
495 | if (page_range_in_range(range, pgstart, pgend)) { | |
496 | ret |= range->purged; | |
497 | ||
498 | /* Case #1: Easy. Just nuke the whole thing. */ | |
499 | if (page_range_subsumes_range(range, pgstart, pgend)) { | |
500 | range_del(range); | |
501 | continue; | |
502 | } | |
503 | ||
504 | /* Case #2: We overlap from the start, so adjust it */ | |
505 | if (range->pgstart >= pgstart) { | |
506 | range_shrink(range, pgend + 1, range->pgend); | |
507 | continue; | |
508 | } | |
509 | ||
510 | /* Case #3: We overlap from the rear, so adjust it */ | |
511 | if (range->pgend <= pgend) { | |
512 | range_shrink(range, range->pgstart, pgstart-1); | |
513 | continue; | |
514 | } | |
515 | ||
516 | /* | |
517 | * Case #4: We eat a chunk out of the middle. A bit | |
518 | * more complicated, we allocate a new range for the | |
519 | * second half and adjust the first chunk's endpoint. | |
520 | */ | |
521 | range_alloc(asma, range, range->purged, | |
522 | pgend + 1, range->pgend); | |
523 | range_shrink(range, range->pgstart, pgstart - 1); | |
524 | break; | |
525 | } | |
526 | } | |
527 | ||
528 | return ret; | |
529 | } | |
530 | ||
531 | /* | |
532 | * ashmem_unpin - unpin the given range of pages. Returns zero on success. | |
533 | * | |
534 | * Caller must hold ashmem_mutex. | |
535 | */ | |
536 | static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) | |
537 | { | |
538 | struct ashmem_range *range, *next; | |
539 | unsigned int purged = ASHMEM_NOT_PURGED; | |
540 | ||
541 | restart: | |
542 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { | |
543 | /* short circuit: this is our insertion point */ | |
544 | if (range_before_page(range, pgstart)) | |
545 | break; | |
546 | ||
547 | /* | |
548 | * The user can ask us to unpin pages that are already entirely | |
549 | * or partially pinned. We handle those two cases here. | |
550 | */ | |
551 | if (page_range_subsumed_by_range(range, pgstart, pgend)) | |
552 | return 0; | |
553 | if (page_range_in_range(range, pgstart, pgend)) { | |
554 | pgstart = min_t(size_t, range->pgstart, pgstart), | |
555 | pgend = max_t(size_t, range->pgend, pgend); | |
556 | purged |= range->purged; | |
557 | range_del(range); | |
558 | goto restart; | |
559 | } | |
560 | } | |
561 | ||
562 | return range_alloc(asma, range, purged, pgstart, pgend); | |
563 | } | |
564 | ||
565 | /* | |
566 | * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the | |
567 | * given interval are unpinned and ASHMEM_IS_PINNED otherwise. | |
568 | * | |
569 | * Caller must hold ashmem_mutex. | |
570 | */ | |
571 | static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, | |
572 | size_t pgend) | |
573 | { | |
574 | struct ashmem_range *range; | |
575 | int ret = ASHMEM_IS_PINNED; | |
576 | ||
577 | list_for_each_entry(range, &asma->unpinned_list, unpinned) { | |
578 | if (range_before_page(range, pgstart)) | |
579 | break; | |
580 | if (page_range_in_range(range, pgstart, pgend)) { | |
581 | ret = ASHMEM_IS_UNPINNED; | |
582 | break; | |
583 | } | |
584 | } | |
585 | ||
586 | return ret; | |
587 | } | |
588 | ||
589 | static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |
590 | void __user *p) | |
591 | { | |
592 | struct ashmem_pin pin; | |
593 | size_t pgstart, pgend; | |
594 | int ret = -EINVAL; | |
595 | ||
596 | if (unlikely(!asma->file)) | |
597 | return -EINVAL; | |
598 | ||
599 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) | |
600 | return -EFAULT; | |
601 | ||
602 | /* per custom, you can pass zero for len to mean "everything onward" */ | |
603 | if (!pin.len) | |
604 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; | |
605 | ||
606 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) | |
607 | return -EINVAL; | |
608 | ||
609 | if (unlikely(((__u32) -1) - pin.offset < pin.len)) | |
610 | return -EINVAL; | |
611 | ||
612 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) | |
613 | return -EINVAL; | |
614 | ||
615 | pgstart = pin.offset / PAGE_SIZE; | |
616 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; | |
617 | ||
618 | mutex_lock(&ashmem_mutex); | |
619 | ||
620 | switch (cmd) { | |
621 | case ASHMEM_PIN: | |
622 | ret = ashmem_pin(asma, pgstart, pgend); | |
623 | break; | |
624 | case ASHMEM_UNPIN: | |
625 | ret = ashmem_unpin(asma, pgstart, pgend); | |
626 | break; | |
627 | case ASHMEM_GET_PIN_STATUS: | |
628 | ret = ashmem_get_pin_status(asma, pgstart, pgend); | |
629 | break; | |
630 | } | |
631 | ||
632 | mutex_unlock(&ashmem_mutex); | |
633 | ||
634 | return ret; | |
635 | } | |
636 | ||
637 | static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
638 | { | |
639 | struct ashmem_area *asma = file->private_data; | |
640 | long ret = -ENOTTY; | |
641 | ||
642 | switch (cmd) { | |
643 | case ASHMEM_SET_NAME: | |
644 | ret = set_name(asma, (void __user *) arg); | |
645 | break; | |
646 | case ASHMEM_GET_NAME: | |
647 | ret = get_name(asma, (void __user *) arg); | |
648 | break; | |
649 | case ASHMEM_SET_SIZE: | |
650 | ret = -EINVAL; | |
651 | if (!asma->file) { | |
652 | ret = 0; | |
653 | asma->size = (size_t) arg; | |
654 | } | |
655 | break; | |
656 | case ASHMEM_GET_SIZE: | |
657 | ret = asma->size; | |
658 | break; | |
659 | case ASHMEM_SET_PROT_MASK: | |
660 | ret = set_prot_mask(asma, arg); | |
661 | break; | |
662 | case ASHMEM_GET_PROT_MASK: | |
663 | ret = asma->prot_mask; | |
664 | break; | |
665 | case ASHMEM_PIN: | |
666 | case ASHMEM_UNPIN: | |
667 | case ASHMEM_GET_PIN_STATUS: | |
668 | ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg); | |
669 | break; | |
670 | case ASHMEM_PURGE_ALL_CACHES: | |
671 | ret = -EPERM; | |
672 | if (capable(CAP_SYS_ADMIN)) { | |
33e8fc46 CC |
673 | struct shrink_control sc = { |
674 | .gfp_mask = GFP_KERNEL, | |
675 | .nr_to_scan = 0, | |
676 | }; | |
677 | ret = ashmem_shrink(&ashmem_shrinker, &sc); | |
678 | sc.nr_to_scan = ret; | |
679 | ashmem_shrink(&ashmem_shrinker, &sc); | |
11980c2a RL |
680 | } |
681 | break; | |
682 | } | |
683 | ||
684 | return ret; | |
685 | } | |
686 | ||
687 | static struct file_operations ashmem_fops = { | |
688 | .owner = THIS_MODULE, | |
689 | .open = ashmem_open, | |
690 | .release = ashmem_release, | |
853ca7ae | 691 | .read = ashmem_read, |
5154b93b | 692 | .llseek = ashmem_llseek, |
11980c2a RL |
693 | .mmap = ashmem_mmap, |
694 | .unlocked_ioctl = ashmem_ioctl, | |
695 | .compat_ioctl = ashmem_ioctl, | |
696 | }; | |
697 | ||
698 | static struct miscdevice ashmem_misc = { | |
699 | .minor = MISC_DYNAMIC_MINOR, | |
700 | .name = "ashmem", | |
701 | .fops = &ashmem_fops, | |
702 | }; | |
703 | ||
704 | static int __init ashmem_init(void) | |
705 | { | |
706 | int ret; | |
707 | ||
708 | ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", | |
709 | sizeof(struct ashmem_area), | |
710 | 0, 0, NULL); | |
711 | if (unlikely(!ashmem_area_cachep)) { | |
712 | printk(KERN_ERR "ashmem: failed to create slab cache\n"); | |
713 | return -ENOMEM; | |
714 | } | |
715 | ||
716 | ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", | |
717 | sizeof(struct ashmem_range), | |
718 | 0, 0, NULL); | |
719 | if (unlikely(!ashmem_range_cachep)) { | |
720 | printk(KERN_ERR "ashmem: failed to create slab cache\n"); | |
721 | return -ENOMEM; | |
722 | } | |
723 | ||
724 | ret = misc_register(&ashmem_misc); | |
725 | if (unlikely(ret)) { | |
726 | printk(KERN_ERR "ashmem: failed to register misc device!\n"); | |
727 | return ret; | |
728 | } | |
729 | ||
730 | register_shrinker(&ashmem_shrinker); | |
731 | ||
732 | printk(KERN_INFO "ashmem: initialized\n"); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | static void __exit ashmem_exit(void) | |
738 | { | |
739 | int ret; | |
740 | ||
741 | unregister_shrinker(&ashmem_shrinker); | |
742 | ||
743 | ret = misc_deregister(&ashmem_misc); | |
744 | if (unlikely(ret)) | |
745 | printk(KERN_ERR "ashmem: failed to unregister misc device!\n"); | |
746 | ||
747 | kmem_cache_destroy(ashmem_range_cachep); | |
748 | kmem_cache_destroy(ashmem_area_cachep); | |
749 | ||
750 | printk(KERN_INFO "ashmem: unloaded\n"); | |
751 | } | |
752 | ||
753 | module_init(ashmem_init); | |
754 | module_exit(ashmem_exit); | |
755 | ||
756 | MODULE_LICENSE("GPL"); |