staging/android: Initial partial kernel-doc for ashmem.c
[deliverable/linux.git] / drivers / staging / android / ashmem.c
1 /* mm/ashmem.c
2 *
3 * Anonymous Shared Memory Subsystem, ashmem
4 *
5 * Copyright (C) 2008 Google, Inc.
6 *
7 * Robert Love <rlove@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #define pr_fmt(fmt) "ashmem: " fmt
20
21 #include <linux/module.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/falloc.h>
25 #include <linux/miscdevice.h>
26 #include <linux/security.h>
27 #include <linux/mm.h>
28 #include <linux/mman.h>
29 #include <linux/uaccess.h>
30 #include <linux/personality.h>
31 #include <linux/bitops.h>
32 #include <linux/mutex.h>
33 #include <linux/shmem_fs.h>
34 #include "ashmem.h"
35
36 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
37 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
39
40 /**
41 * struct ashmem_area - The anonymous shared memory area
42 * @name: The optional name in /proc/pid/maps
43 * @unpinned_list: The list of all ashmem areas
44 * @file: The shmem-based backing file
45 * @size: The size of the mapping, in bytes
46 * @prot_masks: The allowed protection bits, as vm_flags
47 *
48 * The lifecycle of this structure is from our parent file's open() until
49 * its release(). It is also protected by 'ashmem_mutex'
50 *
51 * Warning: Mappings do NOT pin this structure; It dies on close()
52 */
53 struct ashmem_area {
54 char name[ASHMEM_FULL_NAME_LEN];
55 struct list_head unpinned_list;
56 struct file *file;
57 size_t size;
58 unsigned long prot_mask;
59 };
60
61 /**
62 * struct ashmem_range - A range of unpinned/evictable pages
63 * @lru: The entry in the LRU list
64 * @unpinned: The entry in its area's unpinned list
65 * @asma: The associated anonymous shared memory area.
66 * @pgstart: The starting page (inclusive)
67 * @pgend: The ending page (inclusive)
68 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
69 *
70 * The lifecycle of this structure is from unpin to pin.
71 * It is protected by 'ashmem_mutex'
72 */
73 struct ashmem_range {
74 struct list_head lru;
75 struct list_head unpinned;
76 struct ashmem_area *asma;
77 size_t pgstart;
78 size_t pgend;
79 unsigned int purged;
80 };
81
82 /* LRU list of unpinned pages, protected by ashmem_mutex */
83 static LIST_HEAD(ashmem_lru_list);
84
85 /**
86 * long lru_count - The count of pages on our LRU list.
87 *
88 * This is protected by ashmem_mutex.
89 */
90 static unsigned long lru_count;
91
92 /**
93 * ashmem_mutex - protects the list of and each individual ashmem_area
94 *
95 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
96 */
97 static DEFINE_MUTEX(ashmem_mutex);
98
99 static struct kmem_cache *ashmem_area_cachep __read_mostly;
100 static struct kmem_cache *ashmem_range_cachep __read_mostly;
101
102 #define range_size(range) \
103 ((range)->pgend - (range)->pgstart + 1)
104
105 #define range_on_lru(range) \
106 ((range)->purged == ASHMEM_NOT_PURGED)
107
108 #define page_range_subsumes_range(range, start, end) \
109 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
110
111 #define page_range_subsumed_by_range(range, start, end) \
112 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
113
114 #define page_in_range(range, page) \
115 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
116
117 #define page_range_in_range(range, start, end) \
118 (page_in_range(range, start) || page_in_range(range, end) || \
119 page_range_subsumes_range(range, start, end))
120
121 #define range_before_page(range, page) \
122 ((range)->pgend < (page))
123
124 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
125
126 /**
127 * lru_add() - Adds a range of memory to the LRU list
128 * @range: The memory range being added.
129 *
130 * The range is first added to the end (tail) of the LRU list.
131 * After this, the size of the range is added to @lru_count
132 */
133 static inline void lru_add(struct ashmem_range *range)
134 {
135 list_add_tail(&range->lru, &ashmem_lru_list);
136 lru_count += range_size(range);
137 }
138
139 /**
140 * lru_del() - Removes a range of memory from the LRU list
141 * @range: The memory range being removed
142 *
143 * The range is first deleted from the LRU list.
144 * After this, the size of the range is removed from @lru_count
145 */
146 static inline void lru_del(struct ashmem_range *range)
147 {
148 list_del(&range->lru);
149 lru_count -= range_size(range);
150 }
151
152 /**
153 * range_alloc() - Allocates and initializes a new ashmem_range structure
154 * @asma: The associated ashmem_area
155 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
156 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
157 * @start: The starting page (inclusive)
158 * @end: The ending page (inclusive)
159 *
160 * This function is protected by ashmem_mutex.
161 *
162 * Return: 0 if successful, or -ENOMEM if there is an error
163 */
164 static int range_alloc(struct ashmem_area *asma,
165 struct ashmem_range *prev_range, unsigned int purged,
166 size_t start, size_t end)
167 {
168 struct ashmem_range *range;
169
170 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
171 if (unlikely(!range))
172 return -ENOMEM;
173
174 range->asma = asma;
175 range->pgstart = start;
176 range->pgend = end;
177 range->purged = purged;
178
179 list_add_tail(&range->unpinned, &prev_range->unpinned);
180
181 if (range_on_lru(range))
182 lru_add(range);
183
184 return 0;
185 }
186
187 /**
188 * range_del() - Deletes and dealloctes an ashmem_range structure
189 * @range: The associated ashmem_range that has previously been allocated
190 */
191 static void range_del(struct ashmem_range *range)
192 {
193 list_del(&range->unpinned);
194 if (range_on_lru(range))
195 lru_del(range);
196 kmem_cache_free(ashmem_range_cachep, range);
197 }
198
199 /*
200 * range_shrink - shrinks a range
201 *
202 * Caller must hold ashmem_mutex.
203 */
204 static inline void range_shrink(struct ashmem_range *range,
205 size_t start, size_t end)
206 {
207 size_t pre = range_size(range);
208
209 range->pgstart = start;
210 range->pgend = end;
211
212 if (range_on_lru(range))
213 lru_count -= pre - range_size(range);
214 }
215
216 static int ashmem_open(struct inode *inode, struct file *file)
217 {
218 struct ashmem_area *asma;
219 int ret;
220
221 ret = generic_file_open(inode, file);
222 if (unlikely(ret))
223 return ret;
224
225 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
226 if (unlikely(!asma))
227 return -ENOMEM;
228
229 INIT_LIST_HEAD(&asma->unpinned_list);
230 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
231 asma->prot_mask = PROT_MASK;
232 file->private_data = asma;
233
234 return 0;
235 }
236
237 static int ashmem_release(struct inode *ignored, struct file *file)
238 {
239 struct ashmem_area *asma = file->private_data;
240 struct ashmem_range *range, *next;
241
242 mutex_lock(&ashmem_mutex);
243 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
244 range_del(range);
245 mutex_unlock(&ashmem_mutex);
246
247 if (asma->file)
248 fput(asma->file);
249 kmem_cache_free(ashmem_area_cachep, asma);
250
251 return 0;
252 }
253
254 static ssize_t ashmem_read(struct file *file, char __user *buf,
255 size_t len, loff_t *pos)
256 {
257 struct ashmem_area *asma = file->private_data;
258 int ret = 0;
259
260 mutex_lock(&ashmem_mutex);
261
262 /* If size is not set, or set to 0, always return EOF. */
263 if (asma->size == 0)
264 goto out;
265
266 if (!asma->file) {
267 ret = -EBADF;
268 goto out;
269 }
270
271 ret = asma->file->f_op->read(asma->file, buf, len, pos);
272 if (ret < 0)
273 goto out;
274
275 /** Update backing file pos, since f_ops->read() doesn't */
276 asma->file->f_pos = *pos;
277
278 out:
279 mutex_unlock(&ashmem_mutex);
280 return ret;
281 }
282
283 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
284 {
285 struct ashmem_area *asma = file->private_data;
286 int ret;
287
288 mutex_lock(&ashmem_mutex);
289
290 if (asma->size == 0) {
291 ret = -EINVAL;
292 goto out;
293 }
294
295 if (!asma->file) {
296 ret = -EBADF;
297 goto out;
298 }
299
300 ret = asma->file->f_op->llseek(asma->file, offset, origin);
301 if (ret < 0)
302 goto out;
303
304 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
305 file->f_pos = asma->file->f_pos;
306
307 out:
308 mutex_unlock(&ashmem_mutex);
309 return ret;
310 }
311
312 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
313 {
314 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
315 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
316 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
317 }
318
319 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
320 {
321 struct ashmem_area *asma = file->private_data;
322 int ret = 0;
323
324 mutex_lock(&ashmem_mutex);
325
326 /* user needs to SET_SIZE before mapping */
327 if (unlikely(!asma->size)) {
328 ret = -EINVAL;
329 goto out;
330 }
331
332 /* requested protection bits must match our allowed protection mask */
333 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
334 calc_vm_prot_bits(PROT_MASK))) {
335 ret = -EPERM;
336 goto out;
337 }
338 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
339
340 if (!asma->file) {
341 char *name = ASHMEM_NAME_DEF;
342 struct file *vmfile;
343
344 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
345 name = asma->name;
346
347 /* ... and allocate the backing shmem file */
348 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
349 if (unlikely(IS_ERR(vmfile))) {
350 ret = PTR_ERR(vmfile);
351 goto out;
352 }
353 asma->file = vmfile;
354 }
355 get_file(asma->file);
356
357 /*
358 * XXX - Reworked to use shmem_zero_setup() instead of
359 * shmem_set_file while we're in staging. -jstultz
360 */
361 if (vma->vm_flags & VM_SHARED) {
362 ret = shmem_zero_setup(vma);
363 if (ret) {
364 fput(asma->file);
365 goto out;
366 }
367 }
368
369 if (vma->vm_file)
370 fput(vma->vm_file);
371 vma->vm_file = asma->file;
372
373 out:
374 mutex_unlock(&ashmem_mutex);
375 return ret;
376 }
377
378 /*
379 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
380 *
381 * 'nr_to_scan' is the number of objects to scan for freeing.
382 *
383 * 'gfp_mask' is the mask of the allocation that got us into this mess.
384 *
385 * Return value is the number of objects freed or -1 if we cannot
386 * proceed without risk of deadlock (due to gfp_mask).
387 *
388 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
389 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
390 * pages freed.
391 */
392 static unsigned long
393 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
394 {
395 struct ashmem_range *range, *next;
396 unsigned long freed = 0;
397
398 /* We might recurse into filesystem code, so bail out if necessary */
399 if (!(sc->gfp_mask & __GFP_FS))
400 return SHRINK_STOP;
401
402 mutex_lock(&ashmem_mutex);
403 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
404 loff_t start = range->pgstart * PAGE_SIZE;
405 loff_t end = (range->pgend + 1) * PAGE_SIZE;
406
407 do_fallocate(range->asma->file,
408 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
409 start, end - start);
410 range->purged = ASHMEM_WAS_PURGED;
411 lru_del(range);
412
413 freed += range_size(range);
414 if (--sc->nr_to_scan <= 0)
415 break;
416 }
417 mutex_unlock(&ashmem_mutex);
418 return freed;
419 }
420
421 static unsigned long
422 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
423 {
424 /*
425 * note that lru_count is count of pages on the lru, not a count of
426 * objects on the list. This means the scan function needs to return the
427 * number of pages freed, not the number of objects scanned.
428 */
429 return lru_count;
430 }
431
432 static struct shrinker ashmem_shrinker = {
433 .count_objects = ashmem_shrink_count,
434 .scan_objects = ashmem_shrink_scan,
435 /*
436 * XXX (dchinner): I wish people would comment on why they need on
437 * significant changes to the default value here
438 */
439 .seeks = DEFAULT_SEEKS * 4,
440 };
441
442 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
443 {
444 int ret = 0;
445
446 mutex_lock(&ashmem_mutex);
447
448 /* the user can only remove, not add, protection bits */
449 if (unlikely((asma->prot_mask & prot) != prot)) {
450 ret = -EINVAL;
451 goto out;
452 }
453
454 /* does the application expect PROT_READ to imply PROT_EXEC? */
455 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
456 prot |= PROT_EXEC;
457
458 asma->prot_mask = prot;
459
460 out:
461 mutex_unlock(&ashmem_mutex);
462 return ret;
463 }
464
465 static int set_name(struct ashmem_area *asma, void __user *name)
466 {
467 int ret = 0;
468 char local_name[ASHMEM_NAME_LEN];
469
470 /*
471 * Holding the ashmem_mutex while doing a copy_from_user might cause
472 * an data abort which would try to access mmap_sem. If another
473 * thread has invoked ashmem_mmap then it will be holding the
474 * semaphore and will be waiting for ashmem_mutex, there by leading to
475 * deadlock. We'll release the mutex and take the name to a local
476 * variable that does not need protection and later copy the local
477 * variable to the structure member with lock held.
478 */
479 if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
480 return -EFAULT;
481
482 mutex_lock(&ashmem_mutex);
483 /* cannot change an existing mapping's name */
484 if (unlikely(asma->file)) {
485 ret = -EINVAL;
486 goto out;
487 }
488 memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
489 local_name, ASHMEM_NAME_LEN);
490 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
491 out:
492 mutex_unlock(&ashmem_mutex);
493
494 return ret;
495 }
496
497 static int get_name(struct ashmem_area *asma, void __user *name)
498 {
499 int ret = 0;
500 size_t len;
501 /*
502 * Have a local variable to which we'll copy the content
503 * from asma with the lock held. Later we can copy this to the user
504 * space safely without holding any locks. So even if we proceed to
505 * wait for mmap_sem, it won't lead to deadlock.
506 */
507 char local_name[ASHMEM_NAME_LEN];
508
509 mutex_lock(&ashmem_mutex);
510 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
511
512 /*
513 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
514 * prevents us from revealing one user's stack to another.
515 */
516 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
517 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
518 } else {
519 len = sizeof(ASHMEM_NAME_DEF);
520 memcpy(local_name, ASHMEM_NAME_DEF, len);
521 }
522 mutex_unlock(&ashmem_mutex);
523
524 /*
525 * Now we are just copying from the stack variable to userland
526 * No lock held
527 */
528 if (unlikely(copy_to_user(name, local_name, len)))
529 ret = -EFAULT;
530 return ret;
531 }
532
533 /*
534 * ashmem_pin - pin the given ashmem region, returning whether it was
535 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
536 *
537 * Caller must hold ashmem_mutex.
538 */
539 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
540 {
541 struct ashmem_range *range, *next;
542 int ret = ASHMEM_NOT_PURGED;
543
544 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
545 /* moved past last applicable page; we can short circuit */
546 if (range_before_page(range, pgstart))
547 break;
548
549 /*
550 * The user can ask us to pin pages that span multiple ranges,
551 * or to pin pages that aren't even unpinned, so this is messy.
552 *
553 * Four cases:
554 * 1. The requested range subsumes an existing range, so we
555 * just remove the entire matching range.
556 * 2. The requested range overlaps the start of an existing
557 * range, so we just update that range.
558 * 3. The requested range overlaps the end of an existing
559 * range, so we just update that range.
560 * 4. The requested range punches a hole in an existing range,
561 * so we have to update one side of the range and then
562 * create a new range for the other side.
563 */
564 if (page_range_in_range(range, pgstart, pgend)) {
565 ret |= range->purged;
566
567 /* Case #1: Easy. Just nuke the whole thing. */
568 if (page_range_subsumes_range(range, pgstart, pgend)) {
569 range_del(range);
570 continue;
571 }
572
573 /* Case #2: We overlap from the start, so adjust it */
574 if (range->pgstart >= pgstart) {
575 range_shrink(range, pgend + 1, range->pgend);
576 continue;
577 }
578
579 /* Case #3: We overlap from the rear, so adjust it */
580 if (range->pgend <= pgend) {
581 range_shrink(range, range->pgstart, pgstart-1);
582 continue;
583 }
584
585 /*
586 * Case #4: We eat a chunk out of the middle. A bit
587 * more complicated, we allocate a new range for the
588 * second half and adjust the first chunk's endpoint.
589 */
590 range_alloc(asma, range, range->purged,
591 pgend + 1, range->pgend);
592 range_shrink(range, range->pgstart, pgstart - 1);
593 break;
594 }
595 }
596
597 return ret;
598 }
599
600 /*
601 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
602 *
603 * Caller must hold ashmem_mutex.
604 */
605 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
606 {
607 struct ashmem_range *range, *next;
608 unsigned int purged = ASHMEM_NOT_PURGED;
609
610 restart:
611 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
612 /* short circuit: this is our insertion point */
613 if (range_before_page(range, pgstart))
614 break;
615
616 /*
617 * The user can ask us to unpin pages that are already entirely
618 * or partially pinned. We handle those two cases here.
619 */
620 if (page_range_subsumed_by_range(range, pgstart, pgend))
621 return 0;
622 if (page_range_in_range(range, pgstart, pgend)) {
623 pgstart = min_t(size_t, range->pgstart, pgstart),
624 pgend = max_t(size_t, range->pgend, pgend);
625 purged |= range->purged;
626 range_del(range);
627 goto restart;
628 }
629 }
630
631 return range_alloc(asma, range, purged, pgstart, pgend);
632 }
633
634 /*
635 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
636 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
637 *
638 * Caller must hold ashmem_mutex.
639 */
640 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
641 size_t pgend)
642 {
643 struct ashmem_range *range;
644 int ret = ASHMEM_IS_PINNED;
645
646 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
647 if (range_before_page(range, pgstart))
648 break;
649 if (page_range_in_range(range, pgstart, pgend)) {
650 ret = ASHMEM_IS_UNPINNED;
651 break;
652 }
653 }
654
655 return ret;
656 }
657
658 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
659 void __user *p)
660 {
661 struct ashmem_pin pin;
662 size_t pgstart, pgend;
663 int ret = -EINVAL;
664
665 if (unlikely(!asma->file))
666 return -EINVAL;
667
668 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
669 return -EFAULT;
670
671 /* per custom, you can pass zero for len to mean "everything onward" */
672 if (!pin.len)
673 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
674
675 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
676 return -EINVAL;
677
678 if (unlikely(((__u32) -1) - pin.offset < pin.len))
679 return -EINVAL;
680
681 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
682 return -EINVAL;
683
684 pgstart = pin.offset / PAGE_SIZE;
685 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
686
687 mutex_lock(&ashmem_mutex);
688
689 switch (cmd) {
690 case ASHMEM_PIN:
691 ret = ashmem_pin(asma, pgstart, pgend);
692 break;
693 case ASHMEM_UNPIN:
694 ret = ashmem_unpin(asma, pgstart, pgend);
695 break;
696 case ASHMEM_GET_PIN_STATUS:
697 ret = ashmem_get_pin_status(asma, pgstart, pgend);
698 break;
699 }
700
701 mutex_unlock(&ashmem_mutex);
702
703 return ret;
704 }
705
706 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
707 {
708 struct ashmem_area *asma = file->private_data;
709 long ret = -ENOTTY;
710
711 switch (cmd) {
712 case ASHMEM_SET_NAME:
713 ret = set_name(asma, (void __user *) arg);
714 break;
715 case ASHMEM_GET_NAME:
716 ret = get_name(asma, (void __user *) arg);
717 break;
718 case ASHMEM_SET_SIZE:
719 ret = -EINVAL;
720 if (!asma->file) {
721 ret = 0;
722 asma->size = (size_t) arg;
723 }
724 break;
725 case ASHMEM_GET_SIZE:
726 ret = asma->size;
727 break;
728 case ASHMEM_SET_PROT_MASK:
729 ret = set_prot_mask(asma, arg);
730 break;
731 case ASHMEM_GET_PROT_MASK:
732 ret = asma->prot_mask;
733 break;
734 case ASHMEM_PIN:
735 case ASHMEM_UNPIN:
736 case ASHMEM_GET_PIN_STATUS:
737 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
738 break;
739 case ASHMEM_PURGE_ALL_CACHES:
740 ret = -EPERM;
741 if (capable(CAP_SYS_ADMIN)) {
742 struct shrink_control sc = {
743 .gfp_mask = GFP_KERNEL,
744 .nr_to_scan = LONG_MAX,
745 };
746
747 nodes_setall(sc.nodes_to_scan);
748 ashmem_shrink_scan(&ashmem_shrinker, &sc);
749 }
750 break;
751 }
752
753 return ret;
754 }
755
756 /* support of 32bit userspace on 64bit platforms */
757 #ifdef CONFIG_COMPAT
758 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
759 unsigned long arg)
760 {
761
762 switch (cmd) {
763 case COMPAT_ASHMEM_SET_SIZE:
764 cmd = ASHMEM_SET_SIZE;
765 break;
766 case COMPAT_ASHMEM_SET_PROT_MASK:
767 cmd = ASHMEM_SET_PROT_MASK;
768 break;
769 }
770 return ashmem_ioctl(file, cmd, arg);
771 }
772 #endif
773
774 static const struct file_operations ashmem_fops = {
775 .owner = THIS_MODULE,
776 .open = ashmem_open,
777 .release = ashmem_release,
778 .read = ashmem_read,
779 .llseek = ashmem_llseek,
780 .mmap = ashmem_mmap,
781 .unlocked_ioctl = ashmem_ioctl,
782 #ifdef CONFIG_COMPAT
783 .compat_ioctl = compat_ashmem_ioctl,
784 #endif
785 };
786
787 static struct miscdevice ashmem_misc = {
788 .minor = MISC_DYNAMIC_MINOR,
789 .name = "ashmem",
790 .fops = &ashmem_fops,
791 };
792
793 static int __init ashmem_init(void)
794 {
795 int ret;
796
797 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
798 sizeof(struct ashmem_area),
799 0, 0, NULL);
800 if (unlikely(!ashmem_area_cachep)) {
801 pr_err("failed to create slab cache\n");
802 return -ENOMEM;
803 }
804
805 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
806 sizeof(struct ashmem_range),
807 0, 0, NULL);
808 if (unlikely(!ashmem_range_cachep)) {
809 pr_err("failed to create slab cache\n");
810 return -ENOMEM;
811 }
812
813 ret = misc_register(&ashmem_misc);
814 if (unlikely(ret)) {
815 pr_err("failed to register misc device!\n");
816 return ret;
817 }
818
819 register_shrinker(&ashmem_shrinker);
820
821 pr_info("initialized\n");
822
823 return 0;
824 }
825
826 static void __exit ashmem_exit(void)
827 {
828 int ret;
829
830 unregister_shrinker(&ashmem_shrinker);
831
832 ret = misc_deregister(&ashmem_misc);
833 if (unlikely(ret))
834 pr_err("failed to unregister misc device!\n");
835
836 kmem_cache_destroy(ashmem_range_cachep);
837 kmem_cache_destroy(ashmem_area_cachep);
838
839 pr_info("unloaded\n");
840 }
841
842 module_init(ashmem_init);
843 module_exit(ashmem_exit);
844
845 MODULE_LICENSE("GPL");
This page took 0.083416 seconds and 5 git commands to generate.