Merge branch 'tracing/hw-branch-tracing' into tracing/core
[deliverable/linux.git] / mm / madvise.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
05b74384 11#include <linux/mempolicy.h>
1da177e4 12#include <linux/hugetlb.h>
e8edc6e0 13#include <linux/sched.h>
1da177e4 14
0a27a14a
NP
15/*
16 * Any behaviour which results in changes to the vma->vm_flags needs to
17 * take mmap_sem for writing. Others, which simply traverse vmas, need
18 * to only take it for reading.
19 */
20static int madvise_need_mmap_write(int behavior)
21{
22 switch (behavior) {
23 case MADV_REMOVE:
24 case MADV_WILLNEED:
25 case MADV_DONTNEED:
26 return 0;
27 default:
28 /* be safe, default to 1. list exceptions explicitly */
29 return 1;
30 }
31}
32
1da177e4
LT
33/*
34 * We can potentially split a vm area into separate
35 * areas, each area with its own behavior.
36 */
05b74384
PM
37static long madvise_behavior(struct vm_area_struct * vma,
38 struct vm_area_struct **prev,
39 unsigned long start, unsigned long end, int behavior)
1da177e4
LT
40{
41 struct mm_struct * mm = vma->vm_mm;
42 int error = 0;
05b74384 43 pgoff_t pgoff;
f8225661 44 int new_flags = vma->vm_flags;
e798c6e8
PM
45
46 switch (behavior) {
f8225661
MT
47 case MADV_NORMAL:
48 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
49 break;
e798c6e8 50 case MADV_SEQUENTIAL:
f8225661 51 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
e798c6e8
PM
52 break;
53 case MADV_RANDOM:
f8225661 54 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
e798c6e8 55 break;
f8225661
MT
56 case MADV_DONTFORK:
57 new_flags |= VM_DONTCOPY;
58 break;
59 case MADV_DOFORK:
60 new_flags &= ~VM_DONTCOPY;
e798c6e8
PM
61 break;
62 }
63
05b74384
PM
64 if (new_flags == vma->vm_flags) {
65 *prev = vma;
836d5ffd 66 goto out;
05b74384
PM
67 }
68
69 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
70 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
71 vma->vm_file, pgoff, vma_policy(vma));
72 if (*prev) {
73 vma = *prev;
74 goto success;
75 }
76
77 *prev = vma;
1da177e4
LT
78
79 if (start != vma->vm_start) {
80 error = split_vma(mm, vma, start, 1);
81 if (error)
82 goto out;
83 }
84
85 if (end != vma->vm_end) {
86 error = split_vma(mm, vma, end, 0);
87 if (error)
88 goto out;
89 }
90
836d5ffd 91success:
1da177e4
LT
92 /*
93 * vm_flags is protected by the mmap_sem held in write mode.
94 */
e798c6e8 95 vma->vm_flags = new_flags;
1da177e4
LT
96
97out:
98 if (error == -ENOMEM)
99 error = -EAGAIN;
100 return error;
101}
102
103/*
104 * Schedule all required I/O operations. Do not wait for completion.
105 */
106static long madvise_willneed(struct vm_area_struct * vma,
05b74384 107 struct vm_area_struct ** prev,
1da177e4
LT
108 unsigned long start, unsigned long end)
109{
110 struct file *file = vma->vm_file;
111
1bef4003
S
112 if (!file)
113 return -EBADF;
114
a425a638
MG
115 /*
116 * Page cache readahead assumes page cache pages are order-0 which
117 * is not the case for hugetlbfs. Do not give a bad return value
118 * but ignore the advice.
119 */
120 if (vma->vm_flags & VM_HUGETLB)
121 return 0;
122
70688e4d 123 if (file->f_mapping->a_ops->get_xip_mem) {
fe77ba6f
CO
124 /* no bad return value, but ignore advice */
125 return 0;
126 }
127
05b74384 128 *prev = vma;
1da177e4
LT
129 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
130 if (end > vma->vm_end)
131 end = vma->vm_end;
132 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
133
134 force_page_cache_readahead(file->f_mapping,
135 file, start, max_sane_readahead(end - start));
136 return 0;
137}
138
139/*
140 * Application no longer needs these pages. If the pages are dirty,
141 * it's OK to just throw them away. The app will be more careful about
142 * data it wants to keep. Be sure to free swap resources too. The
7e6cbea3 143 * zap_page_range call sets things up for shrink_active_list to actually free
1da177e4
LT
144 * these pages later if no one else has touched them in the meantime,
145 * although we could add these pages to a global reuse list for
7e6cbea3 146 * shrink_active_list to pick up before reclaiming other pages.
1da177e4
LT
147 *
148 * NB: This interface discards data rather than pushes it out to swap,
149 * as some implementations do. This has performance implications for
150 * applications like large transactional databases which want to discard
151 * pages in anonymous maps after committing to backing store the data
152 * that was kept in them. There is no reason to write this data out to
153 * the swap area if the application is discarding it.
154 *
155 * An interface that causes the system to free clean pages and flush
156 * dirty pages is already available as msync(MS_INVALIDATE).
157 */
158static long madvise_dontneed(struct vm_area_struct * vma,
05b74384 159 struct vm_area_struct ** prev,
1da177e4
LT
160 unsigned long start, unsigned long end)
161{
05b74384 162 *prev = vma;
6aab341e 163 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
1da177e4
LT
164 return -EINVAL;
165
166 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
167 struct zap_details details = {
168 .nonlinear_vma = vma,
169 .last_index = ULONG_MAX,
170 };
171 zap_page_range(vma, start, end - start, &details);
172 } else
173 zap_page_range(vma, start, end - start, NULL);
174 return 0;
175}
176
f6b3ec23
BP
177/*
178 * Application wants to free up the pages and associated backing store.
179 * This is effectively punching a hole into the middle of a file.
180 *
181 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
182 * Other filesystems return -ENOSYS.
183 */
184static long madvise_remove(struct vm_area_struct *vma,
00e9fa2d 185 struct vm_area_struct **prev,
f6b3ec23
BP
186 unsigned long start, unsigned long end)
187{
188 struct address_space *mapping;
90ed52eb
HD
189 loff_t offset, endoff;
190 int error;
f6b3ec23 191
90ed52eb 192 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
00e9fa2d 193
f6b3ec23
BP
194 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
195 return -EINVAL;
196
197 if (!vma->vm_file || !vma->vm_file->f_mapping
198 || !vma->vm_file->f_mapping->host) {
199 return -EINVAL;
200 }
201
69cf0fac
HD
202 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
203 return -EACCES;
204
f6b3ec23
BP
205 mapping = vma->vm_file->f_mapping;
206
207 offset = (loff_t)(start - vma->vm_start)
208 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
209 endoff = (loff_t)(end - vma->vm_start - 1)
210 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
90ed52eb
HD
211
212 /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
0a27a14a 213 up_read(&current->mm->mmap_sem);
90ed52eb 214 error = vmtruncate_range(mapping->host, offset, endoff);
0a27a14a 215 down_read(&current->mm->mmap_sem);
90ed52eb 216 return error;
f6b3ec23
BP
217}
218
165cd402 219static long
220madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
221 unsigned long start, unsigned long end, int behavior)
1da177e4 222{
1bef4003 223 long error;
165cd402 224
1da177e4 225 switch (behavior) {
f8225661
MT
226 case MADV_DOFORK:
227 if (vma->vm_flags & VM_IO) {
228 error = -EINVAL;
229 break;
230 }
231 case MADV_DONTFORK:
1da177e4
LT
232 case MADV_NORMAL:
233 case MADV_SEQUENTIAL:
234 case MADV_RANDOM:
05b74384 235 error = madvise_behavior(vma, prev, start, end, behavior);
1da177e4 236 break;
f6b3ec23 237 case MADV_REMOVE:
00e9fa2d 238 error = madvise_remove(vma, prev, start, end);
f6b3ec23 239 break;
1da177e4
LT
240
241 case MADV_WILLNEED:
05b74384 242 error = madvise_willneed(vma, prev, start, end);
1da177e4
LT
243 break;
244
245 case MADV_DONTNEED:
05b74384 246 error = madvise_dontneed(vma, prev, start, end);
1da177e4
LT
247 break;
248
249 default:
250 error = -EINVAL;
251 break;
252 }
1da177e4
LT
253 return error;
254}
255
256/*
257 * The madvise(2) system call.
258 *
259 * Applications can use madvise() to advise the kernel how it should
260 * handle paging I/O in this VM area. The idea is to help the kernel
261 * use appropriate read-ahead and caching techniques. The information
262 * provided is advisory only, and can be safely disregarded by the
263 * kernel without affecting the correct operation of the application.
264 *
265 * behavior values:
266 * MADV_NORMAL - the default behavior is to read clusters. This
267 * results in some read-ahead and read-behind.
268 * MADV_RANDOM - the system should read the minimum amount of data
269 * on any access, since it is unlikely that the appli-
270 * cation will need more than what it asks for.
271 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
272 * once, so they can be aggressively read ahead, and
273 * can be freed soon after they are accessed.
274 * MADV_WILLNEED - the application is notifying the system to read
275 * some pages ahead.
276 * MADV_DONTNEED - the application is finished with the given range,
277 * so the kernel can free resources associated with it.
f6b3ec23
BP
278 * MADV_REMOVE - the application wants to free up the given range of
279 * pages and associated backing store.
1da177e4
LT
280 *
281 * return values:
282 * zero - success
283 * -EINVAL - start + len < 0, start is not page-aligned,
284 * "behavior" is not a valid value, or application
285 * is attempting to release locked or shared pages.
286 * -ENOMEM - addresses in the specified range are not currently
287 * mapped, or are outside the AS of the process.
288 * -EIO - an I/O error occurred while paging in data.
289 * -EBADF - map exists, but area maps something that isn't a file.
290 * -EAGAIN - a kernel resource was temporarily unavailable.
291 */
3480b257 292SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1da177e4 293{
05b74384
PM
294 unsigned long end, tmp;
295 struct vm_area_struct * vma, *prev;
1da177e4
LT
296 int unmapped_error = 0;
297 int error = -EINVAL;
f7977793 298 int write;
1da177e4
LT
299 size_t len;
300
f7977793
JB
301 write = madvise_need_mmap_write(behavior);
302 if (write)
0a27a14a
NP
303 down_write(&current->mm->mmap_sem);
304 else
305 down_read(&current->mm->mmap_sem);
1da177e4
LT
306
307 if (start & ~PAGE_MASK)
308 goto out;
309 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
310
311 /* Check to see whether len was rounded up from small -ve to zero */
312 if (len_in && !len)
313 goto out;
314
315 end = start + len;
316 if (end < start)
317 goto out;
318
319 error = 0;
320 if (end == start)
321 goto out;
322
323 /*
324 * If the interval [start,end) covers some unmapped address
325 * ranges, just ignore them, but return -ENOMEM at the end.
05b74384 326 * - different from the way of handling in mlock etc.
1da177e4 327 */
05b74384 328 vma = find_vma_prev(current->mm, start, &prev);
836d5ffd
HD
329 if (vma && start > vma->vm_start)
330 prev = vma;
331
1da177e4
LT
332 for (;;) {
333 /* Still start < end. */
334 error = -ENOMEM;
335 if (!vma)
336 goto out;
337
05b74384 338 /* Here start < (end|vma->vm_end). */
1da177e4
LT
339 if (start < vma->vm_start) {
340 unmapped_error = -ENOMEM;
341 start = vma->vm_start;
05b74384
PM
342 if (start >= end)
343 goto out;
1da177e4
LT
344 }
345
05b74384
PM
346 /* Here vma->vm_start <= start < (end|vma->vm_end) */
347 tmp = vma->vm_end;
348 if (end < tmp)
349 tmp = end;
1da177e4 350
05b74384
PM
351 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
352 error = madvise_vma(vma, &prev, start, tmp, behavior);
1da177e4
LT
353 if (error)
354 goto out;
05b74384 355 start = tmp;
90ed52eb 356 if (prev && start < prev->vm_end)
05b74384
PM
357 start = prev->vm_end;
358 error = unmapped_error;
359 if (start >= end)
360 goto out;
90ed52eb
HD
361 if (prev)
362 vma = prev->vm_next;
363 else /* madvise_remove dropped mmap_sem */
364 vma = find_vma(current->mm, start);
1da177e4 365 }
1da177e4 366out:
f7977793 367 if (write)
0a27a14a
NP
368 up_write(&current->mm->mmap_sem);
369 else
370 up_read(&current->mm->mmap_sem);
371
1da177e4
LT
372 return error;
373}
This page took 0.39631 seconds and 5 git commands to generate.