mm: refactor inactive_file_is_low() to use get_lru_size()
[deliverable/linux.git] / mm / mlock.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/mlock.c
3 *
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 */
7
c59ede7b 8#include <linux/capability.h>
1da177e4
LT
9#include <linux/mman.h>
10#include <linux/mm.h>
b291f000
NP
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/pagemap.h>
1da177e4
LT
14#include <linux/mempolicy.h>
15#include <linux/syscalls.h>
e8edc6e0 16#include <linux/sched.h>
b95f1b31 17#include <linux/export.h>
b291f000
NP
18#include <linux/rmap.h>
19#include <linux/mmzone.h>
20#include <linux/hugetlb.h>
21
22#include "internal.h"
1da177e4 23
e8edc6e0
AD
24int can_do_mlock(void)
25{
26 if (capable(CAP_IPC_LOCK))
27 return 1;
59e99e5b 28 if (rlimit(RLIMIT_MEMLOCK) != 0)
e8edc6e0
AD
29 return 1;
30 return 0;
31}
32EXPORT_SYMBOL(can_do_mlock);
1da177e4 33
b291f000
NP
34/*
35 * Mlocked pages are marked with PageMlocked() flag for efficient testing
36 * in vmscan and, possibly, the fault path; and to support semi-accurate
37 * statistics.
38 *
39 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
40 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
41 * The unevictable list is an LRU sibling list to the [in]active lists.
42 * PageUnevictable is set to indicate the unevictable state.
43 *
44 * When lazy mlocking via vmscan, it is important to ensure that the
45 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
46 * may have mlocked a page that is being munlocked. So lazy mlock must take
47 * the mmap_sem for read, and verify that the vma really is locked
48 * (see mm/rmap.c).
49 */
50
51/*
52 * LRU accounting for clear_page_mlock()
53 */
e6c509f8 54void clear_page_mlock(struct page *page)
b291f000 55{
e6c509f8 56 if (!TestClearPageMlocked(page))
b291f000 57 return;
b291f000 58
8449d21f
DR
59 mod_zone_page_state(page_zone(page), NR_MLOCK,
60 -hpage_nr_pages(page));
5344b7e6 61 count_vm_event(UNEVICTABLE_PGCLEARED);
b291f000
NP
62 if (!isolate_lru_page(page)) {
63 putback_lru_page(page);
64 } else {
65 /*
8891d6da 66 * We lost the race. the page already moved to evictable list.
b291f000 67 */
8891d6da 68 if (PageUnevictable(page))
5344b7e6 69 count_vm_event(UNEVICTABLE_PGSTRANDED);
b291f000
NP
70 }
71}
72
73/*
74 * Mark page as mlocked if not already.
75 * If page on LRU, isolate and putback to move to unevictable list.
76 */
77void mlock_vma_page(struct page *page)
78{
79 BUG_ON(!PageLocked(page));
80
5344b7e6 81 if (!TestSetPageMlocked(page)) {
8449d21f
DR
82 mod_zone_page_state(page_zone(page), NR_MLOCK,
83 hpage_nr_pages(page));
5344b7e6
NP
84 count_vm_event(UNEVICTABLE_PGMLOCKED);
85 if (!isolate_lru_page(page))
86 putback_lru_page(page);
87 }
b291f000
NP
88}
89
6927c1dd
LS
90/**
91 * munlock_vma_page - munlock a vma page
92 * @page - page to be unlocked
b291f000 93 *
6927c1dd
LS
94 * called from munlock()/munmap() path with page supposedly on the LRU.
95 * When we munlock a page, because the vma where we found the page is being
96 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
97 * page locked so that we can leave it on the unevictable lru list and not
98 * bother vmscan with it. However, to walk the page's rmap list in
99 * try_to_munlock() we must isolate the page from the LRU. If some other
100 * task has removed the page from the LRU, we won't be able to do that.
101 * So we clear the PageMlocked as we might not get another chance. If we
102 * can't isolate the page, we leave it for putback_lru_page() and vmscan
103 * [page_referenced()/try_to_unmap()] to deal with.
b291f000 104 */
73848b46 105void munlock_vma_page(struct page *page)
b291f000
NP
106{
107 BUG_ON(!PageLocked(page));
108
5344b7e6 109 if (TestClearPageMlocked(page)) {
8449d21f
DR
110 mod_zone_page_state(page_zone(page), NR_MLOCK,
111 -hpage_nr_pages(page));
5344b7e6 112 if (!isolate_lru_page(page)) {
3d470fc3
HD
113 int ret = SWAP_AGAIN;
114
115 /*
116 * Optimization: if the page was mapped just once,
117 * that's our mapping and we don't need to check all the
118 * other vmas.
119 */
120 if (page_mapcount(page) > 1)
121 ret = try_to_munlock(page);
5344b7e6
NP
122 /*
123 * did try_to_unlock() succeed or punt?
124 */
53f79acb 125 if (ret != SWAP_MLOCK)
5344b7e6
NP
126 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
127
128 putback_lru_page(page);
129 } else {
130 /*
6927c1dd
LS
131 * Some other task has removed the page from the LRU.
132 * putback_lru_page() will take care of removing the
133 * page from the unevictable list, if necessary.
134 * vmscan [page_referenced()] will move the page back
135 * to the unevictable list if some other vma has it
136 * mlocked.
5344b7e6
NP
137 */
138 if (PageUnevictable(page))
139 count_vm_event(UNEVICTABLE_PGSTRANDED);
140 else
141 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
142 }
b291f000
NP
143 }
144}
145
ba470de4 146/**
408e82b7 147 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
ba470de4
RR
148 * @vma: target vma
149 * @start: start address
150 * @end: end address
ba470de4 151 *
408e82b7 152 * This takes care of making the pages present too.
b291f000 153 *
ba470de4 154 * return 0 on success, negative error code on error.
b291f000 155 *
ba470de4 156 * vma->vm_mm->mmap_sem must be held for at least read.
b291f000 157 */
cea10a19
ML
158long __mlock_vma_pages_range(struct vm_area_struct *vma,
159 unsigned long start, unsigned long end, int *nonblocking)
b291f000
NP
160{
161 struct mm_struct *mm = vma->vm_mm;
162 unsigned long addr = start;
b291f000 163 int nr_pages = (end - start) / PAGE_SIZE;
408e82b7 164 int gup_flags;
ba470de4
RR
165
166 VM_BUG_ON(start & ~PAGE_MASK);
167 VM_BUG_ON(end & ~PAGE_MASK);
168 VM_BUG_ON(start < vma->vm_start);
169 VM_BUG_ON(end > vma->vm_end);
408e82b7 170 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
b291f000 171
a1fde08c 172 gup_flags = FOLL_TOUCH | FOLL_MLOCK;
5ecfda04
ML
173 /*
174 * We want to touch writable mappings with a write fault in order
175 * to break COW, except for shared mappings because these don't COW
176 * and we would not want to dirty them for nothing.
177 */
178 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
58fa879e 179 gup_flags |= FOLL_WRITE;
b291f000 180
fdf4c587
ML
181 /*
182 * We want mlock to succeed for regions that have any permissions
183 * other than PROT_NONE.
184 */
185 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
186 gup_flags |= FOLL_FORCE;
187
53a7706d
ML
188 return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
189 NULL, NULL, nonblocking);
9978ad58
LS
190}
191
192/*
193 * convert get_user_pages() return value to posix mlock() error
194 */
195static int __mlock_posix_error_return(long retval)
196{
197 if (retval == -EFAULT)
198 retval = -ENOMEM;
199 else if (retval == -ENOMEM)
200 retval = -EAGAIN;
201 return retval;
b291f000
NP
202}
203
b291f000 204/*
ba470de4
RR
205 * munlock_vma_pages_range() - munlock all pages in the vma range.'
206 * @vma - vma containing range to be munlock()ed.
207 * @start - start address in @vma of the range
208 * @end - end of range in @vma.
209 *
210 * For mremap(), munmap() and exit().
211 *
212 * Called with @vma VM_LOCKED.
213 *
214 * Returns with VM_LOCKED cleared. Callers must be prepared to
215 * deal with this.
216 *
217 * We don't save and restore VM_LOCKED here because pages are
218 * still on lru. In unmap path, pages might be scanned by reclaim
219 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
220 * free them. This will result in freeing mlocked pages.
b291f000 221 */
ba470de4 222void munlock_vma_pages_range(struct vm_area_struct *vma,
408e82b7 223 unsigned long start, unsigned long end)
b291f000 224{
408e82b7
HD
225 unsigned long addr;
226
227 lru_add_drain();
b291f000 228 vma->vm_flags &= ~VM_LOCKED;
408e82b7
HD
229
230 for (addr = start; addr < end; addr += PAGE_SIZE) {
6e919717
HD
231 struct page *page;
232 /*
233 * Although FOLL_DUMP is intended for get_dump_page(),
234 * it just so happens that its special treatment of the
235 * ZERO_PAGE (returning an error instead of doing get_page)
236 * suits munlock very well (and if somehow an abnormal page
237 * has sneaked into the range, we won't oops here: great).
238 */
239 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
240 if (page && !IS_ERR(page)) {
408e82b7 241 lock_page(page);
e6c509f8 242 munlock_vma_page(page);
408e82b7
HD
243 unlock_page(page);
244 put_page(page);
245 }
246 cond_resched();
247 }
b291f000
NP
248}
249
250/*
251 * mlock_fixup - handle mlock[all]/munlock[all] requests.
252 *
253 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
254 * munlock is a no-op. However, for some special vmas, we go ahead and
cea10a19 255 * populate the ptes.
b291f000
NP
256 *
257 * For vmas that pass the filters, merge/split as appropriate.
258 */
1da177e4 259static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
ca16d140 260 unsigned long start, unsigned long end, vm_flags_t newflags)
1da177e4 261{
b291f000 262 struct mm_struct *mm = vma->vm_mm;
1da177e4 263 pgoff_t pgoff;
b291f000 264 int nr_pages;
1da177e4 265 int ret = 0;
ca16d140 266 int lock = !!(newflags & VM_LOCKED);
1da177e4 267
fed067da 268 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
31db58b3 269 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
b291f000
NP
270 goto out; /* don't set VM_LOCKED, don't count */
271
1da177e4
LT
272 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
273 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
274 vma->vm_file, pgoff, vma_policy(vma));
275 if (*prev) {
276 vma = *prev;
277 goto success;
278 }
279
1da177e4
LT
280 if (start != vma->vm_start) {
281 ret = split_vma(mm, vma, start, 1);
282 if (ret)
283 goto out;
284 }
285
286 if (end != vma->vm_end) {
287 ret = split_vma(mm, vma, end, 0);
288 if (ret)
289 goto out;
290 }
291
292success:
b291f000
NP
293 /*
294 * Keep track of amount of locked VM.
295 */
296 nr_pages = (end - start) >> PAGE_SHIFT;
297 if (!lock)
298 nr_pages = -nr_pages;
299 mm->locked_vm += nr_pages;
300
1da177e4
LT
301 /*
302 * vm_flags is protected by the mmap_sem held in write mode.
303 * It's okay if try_to_unmap_one unmaps a page just after we
b291f000 304 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
1da177e4 305 */
1da177e4 306
fed067da 307 if (lock)
408e82b7 308 vma->vm_flags = newflags;
fed067da 309 else
408e82b7 310 munlock_vma_pages_range(vma, start, end);
1da177e4 311
1da177e4 312out:
b291f000 313 *prev = vma;
1da177e4
LT
314 return ret;
315}
316
317static int do_mlock(unsigned long start, size_t len, int on)
318{
319 unsigned long nstart, end, tmp;
320 struct vm_area_struct * vma, * prev;
321 int error;
322
fed067da
ML
323 VM_BUG_ON(start & ~PAGE_MASK);
324 VM_BUG_ON(len != PAGE_ALIGN(len));
1da177e4
LT
325 end = start + len;
326 if (end < start)
327 return -EINVAL;
328 if (end == start)
329 return 0;
097d5910 330 vma = find_vma(current->mm, start);
1da177e4
LT
331 if (!vma || vma->vm_start > start)
332 return -ENOMEM;
333
097d5910 334 prev = vma->vm_prev;
1da177e4
LT
335 if (start > vma->vm_start)
336 prev = vma;
337
338 for (nstart = start ; ; ) {
ca16d140 339 vm_flags_t newflags;
1da177e4
LT
340
341 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
342
18693050
ML
343 newflags = vma->vm_flags & ~VM_LOCKED;
344 if (on)
345 newflags |= VM_LOCKED | VM_POPULATE;
1da177e4
LT
346
347 tmp = vma->vm_end;
348 if (tmp > end)
349 tmp = end;
350 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
351 if (error)
352 break;
353 nstart = tmp;
354 if (nstart < prev->vm_end)
355 nstart = prev->vm_end;
356 if (nstart >= end)
357 break;
358
359 vma = prev->vm_next;
360 if (!vma || vma->vm_start != nstart) {
361 error = -ENOMEM;
362 break;
363 }
364 }
365 return error;
366}
367
bebeb3d6
ML
368/*
369 * __mm_populate - populate and/or mlock pages within a range of address space.
370 *
371 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
372 * flags. VMAs must be already marked with the desired vm_flags, and
373 * mmap_sem must not be held.
374 */
375int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
fed067da
ML
376{
377 struct mm_struct *mm = current->mm;
378 unsigned long end, nstart, nend;
379 struct vm_area_struct *vma = NULL;
53a7706d 380 int locked = 0;
fed067da
ML
381 int ret = 0;
382
383 VM_BUG_ON(start & ~PAGE_MASK);
384 VM_BUG_ON(len != PAGE_ALIGN(len));
385 end = start + len;
386
fed067da
ML
387 for (nstart = start; nstart < end; nstart = nend) {
388 /*
389 * We want to fault in pages for [nstart; end) address range.
390 * Find first corresponding VMA.
391 */
53a7706d
ML
392 if (!locked) {
393 locked = 1;
394 down_read(&mm->mmap_sem);
fed067da 395 vma = find_vma(mm, nstart);
53a7706d 396 } else if (nstart >= vma->vm_end)
fed067da
ML
397 vma = vma->vm_next;
398 if (!vma || vma->vm_start >= end)
399 break;
400 /*
401 * Set [nstart; nend) to intersection of desired address
402 * range with the first VMA. Also, skip undesirable VMA types.
403 */
404 nend = min(end, vma->vm_end);
18693050
ML
405 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) !=
406 VM_POPULATE)
fed067da
ML
407 continue;
408 if (nstart < vma->vm_start)
409 nstart = vma->vm_start;
410 /*
53a7706d
ML
411 * Now fault in a range of pages. __mlock_vma_pages_range()
412 * double checks the vma flags, so that it won't mlock pages
413 * if the vma was already munlocked.
fed067da 414 */
53a7706d
ML
415 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
416 if (ret < 0) {
417 if (ignore_errors) {
418 ret = 0;
419 continue; /* continue at next VMA */
420 }
5fdb2002
ML
421 ret = __mlock_posix_error_return(ret);
422 break;
423 }
53a7706d
ML
424 nend = nstart + ret * PAGE_SIZE;
425 ret = 0;
fed067da 426 }
53a7706d
ML
427 if (locked)
428 up_read(&mm->mmap_sem);
fed067da
ML
429 return ret; /* 0 or negative error code */
430}
431
6a6160a7 432SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
1da177e4
LT
433{
434 unsigned long locked;
435 unsigned long lock_limit;
436 int error = -ENOMEM;
437
438 if (!can_do_mlock())
439 return -EPERM;
440
8891d6da
KM
441 lru_add_drain_all(); /* flush pagevec */
442
1da177e4
LT
443 down_write(&current->mm->mmap_sem);
444 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
445 start &= PAGE_MASK;
446
447 locked = len >> PAGE_SHIFT;
448 locked += current->mm->locked_vm;
449
59e99e5b 450 lock_limit = rlimit(RLIMIT_MEMLOCK);
1da177e4
LT
451 lock_limit >>= PAGE_SHIFT;
452
453 /* check against resource limits */
454 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
455 error = do_mlock(start, len, 1);
456 up_write(&current->mm->mmap_sem);
fed067da 457 if (!error)
bebeb3d6 458 error = __mm_populate(start, len, 0);
1da177e4
LT
459 return error;
460}
461
6a6160a7 462SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
1da177e4
LT
463{
464 int ret;
465
466 down_write(&current->mm->mmap_sem);
467 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
468 start &= PAGE_MASK;
469 ret = do_mlock(start, len, 0);
470 up_write(&current->mm->mmap_sem);
471 return ret;
472}
473
474static int do_mlockall(int flags)
475{
476 struct vm_area_struct * vma, * prev = NULL;
1da177e4
LT
477
478 if (flags & MCL_FUTURE)
18693050 479 current->mm->def_flags |= VM_LOCKED | VM_POPULATE;
9977f0f1 480 else
18693050 481 current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE);
1da177e4
LT
482 if (flags == MCL_FUTURE)
483 goto out;
484
485 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
ca16d140 486 vm_flags_t newflags;
1da177e4 487
18693050
ML
488 newflags = vma->vm_flags & ~VM_LOCKED;
489 if (flags & MCL_CURRENT)
490 newflags |= VM_LOCKED | VM_POPULATE;
1da177e4
LT
491
492 /* Ignore errors */
493 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
494 }
495out:
496 return 0;
497}
498
3480b257 499SYSCALL_DEFINE1(mlockall, int, flags)
1da177e4
LT
500{
501 unsigned long lock_limit;
502 int ret = -EINVAL;
503
504 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
505 goto out;
506
507 ret = -EPERM;
508 if (!can_do_mlock())
509 goto out;
510
df9d6985
CL
511 if (flags & MCL_CURRENT)
512 lru_add_drain_all(); /* flush pagevec */
8891d6da 513
1da177e4
LT
514 down_write(&current->mm->mmap_sem);
515
59e99e5b 516 lock_limit = rlimit(RLIMIT_MEMLOCK);
1da177e4
LT
517 lock_limit >>= PAGE_SHIFT;
518
519 ret = -ENOMEM;
520 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
521 capable(CAP_IPC_LOCK))
522 ret = do_mlockall(flags);
523 up_write(&current->mm->mmap_sem);
bebeb3d6
ML
524 if (!ret && (flags & MCL_CURRENT))
525 mm_populate(0, TASK_SIZE);
1da177e4
LT
526out:
527 return ret;
528}
529
3480b257 530SYSCALL_DEFINE0(munlockall)
1da177e4
LT
531{
532 int ret;
533
534 down_write(&current->mm->mmap_sem);
535 ret = do_mlockall(0);
536 up_write(&current->mm->mmap_sem);
537 return ret;
538}
539
540/*
541 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
542 * shm segments) get accounted against the user_struct instead.
543 */
544static DEFINE_SPINLOCK(shmlock_user_lock);
545
546int user_shm_lock(size_t size, struct user_struct *user)
547{
548 unsigned long lock_limit, locked;
549 int allowed = 0;
550
551 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
59e99e5b 552 lock_limit = rlimit(RLIMIT_MEMLOCK);
5ed44a40
HB
553 if (lock_limit == RLIM_INFINITY)
554 allowed = 1;
1da177e4
LT
555 lock_limit >>= PAGE_SHIFT;
556 spin_lock(&shmlock_user_lock);
5ed44a40
HB
557 if (!allowed &&
558 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
1da177e4
LT
559 goto out;
560 get_uid(user);
561 user->locked_shm += locked;
562 allowed = 1;
563out:
564 spin_unlock(&shmlock_user_lock);
565 return allowed;
566}
567
568void user_shm_unlock(size_t size, struct user_struct *user)
569{
570 spin_lock(&shmlock_user_lock);
571 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
572 spin_unlock(&shmlock_user_lock);
573 free_uid(user);
574}
This page took 0.927861 seconds and 5 git commands to generate.