mm: catch memory commitment underflow
[deliverable/linux.git] / mm / util.c
CommitLineData
16d69265 1#include <linux/mm.h>
30992c97
MM
2#include <linux/slab.h>
3#include <linux/string.h>
3b32123d 4#include <linux/compiler.h>
b95f1b31 5#include <linux/export.h>
96840aa0 6#include <linux/err.h>
3b8f14b4 7#include <linux/sched.h>
eb36c587 8#include <linux/security.h>
9800339b 9#include <linux/swap.h>
33806f06 10#include <linux/swapops.h>
00619bcc
JM
11#include <linux/mman.h>
12#include <linux/hugetlb.h>
39f1f78d 13#include <linux/vmalloc.h>
00619bcc 14
96840aa0 15#include <asm/uaccess.h>
30992c97 16
6038def0
NK
17#include "internal.h"
18
30992c97 19/**
30992c97 20 * kstrdup - allocate space for and copy an existing string
30992c97
MM
21 * @s: the string to duplicate
22 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
23 */
24char *kstrdup(const char *s, gfp_t gfp)
25{
26 size_t len;
27 char *buf;
28
29 if (!s)
30 return NULL;
31
32 len = strlen(s) + 1;
1d2c8eea 33 buf = kmalloc_track_caller(len, gfp);
30992c97
MM
34 if (buf)
35 memcpy(buf, s, len);
36 return buf;
37}
38EXPORT_SYMBOL(kstrdup);
96840aa0 39
1e66df3e
JF
40/**
41 * kstrndup - allocate space for and copy an existing string
42 * @s: the string to duplicate
43 * @max: read at most @max chars from @s
44 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
45 */
46char *kstrndup(const char *s, size_t max, gfp_t gfp)
47{
48 size_t len;
49 char *buf;
50
51 if (!s)
52 return NULL;
53
54 len = strnlen(s, max);
55 buf = kmalloc_track_caller(len+1, gfp);
56 if (buf) {
57 memcpy(buf, s, len);
58 buf[len] = '\0';
59 }
60 return buf;
61}
62EXPORT_SYMBOL(kstrndup);
63
1a2f67b4
AD
64/**
65 * kmemdup - duplicate region of memory
66 *
67 * @src: memory region to duplicate
68 * @len: memory region length
69 * @gfp: GFP mask to use
70 */
71void *kmemdup(const void *src, size_t len, gfp_t gfp)
72{
73 void *p;
74
1d2c8eea 75 p = kmalloc_track_caller(len, gfp);
1a2f67b4
AD
76 if (p)
77 memcpy(p, src, len);
78 return p;
79}
80EXPORT_SYMBOL(kmemdup);
81
610a77e0
LZ
82/**
83 * memdup_user - duplicate memory region from user space
84 *
85 * @src: source address in user space
86 * @len: number of bytes to copy
87 *
88 * Returns an ERR_PTR() on failure.
89 */
90void *memdup_user(const void __user *src, size_t len)
91{
92 void *p;
93
94 /*
95 * Always use GFP_KERNEL, since copy_from_user() can sleep and
96 * cause pagefault, which makes it pointless to use GFP_NOFS
97 * or GFP_ATOMIC.
98 */
99 p = kmalloc_track_caller(len, GFP_KERNEL);
100 if (!p)
101 return ERR_PTR(-ENOMEM);
102
103 if (copy_from_user(p, src, len)) {
104 kfree(p);
105 return ERR_PTR(-EFAULT);
106 }
107
108 return p;
109}
110EXPORT_SYMBOL(memdup_user);
111
96840aa0
DA
112/*
113 * strndup_user - duplicate an existing string from user space
96840aa0
DA
114 * @s: The string to duplicate
115 * @n: Maximum number of bytes to copy, including the trailing NUL.
116 */
117char *strndup_user(const char __user *s, long n)
118{
119 char *p;
120 long length;
121
122 length = strnlen_user(s, n);
123
124 if (!length)
125 return ERR_PTR(-EFAULT);
126
127 if (length > n)
128 return ERR_PTR(-EINVAL);
129
90d74045 130 p = memdup_user(s, length);
96840aa0 131
90d74045
JL
132 if (IS_ERR(p))
133 return p;
96840aa0
DA
134
135 p[length - 1] = '\0';
136
137 return p;
138}
139EXPORT_SYMBOL(strndup_user);
16d69265 140
6038def0
NK
141void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
142 struct vm_area_struct *prev, struct rb_node *rb_parent)
143{
144 struct vm_area_struct *next;
145
146 vma->vm_prev = prev;
147 if (prev) {
148 next = prev->vm_next;
149 prev->vm_next = vma;
150 } else {
151 mm->mmap = vma;
152 if (rb_parent)
153 next = rb_entry(rb_parent,
154 struct vm_area_struct, vm_rb);
155 else
156 next = NULL;
157 }
158 vma->vm_next = next;
159 if (next)
160 next->vm_prev = vma;
161}
162
b7643757
SP
163/* Check if the vma is being used as a stack by this task */
164static int vm_is_stack_for_task(struct task_struct *t,
165 struct vm_area_struct *vma)
166{
167 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
168}
169
170/*
171 * Check if the vma is being used as a stack.
172 * If is_group is non-zero, check in the entire thread group or else
173 * just check in the current task. Returns the pid of the task that
174 * the vma is stack for.
175 */
176pid_t vm_is_stack(struct task_struct *task,
177 struct vm_area_struct *vma, int in_group)
178{
179 pid_t ret = 0;
180
181 if (vm_is_stack_for_task(task, vma))
182 return task->pid;
183
184 if (in_group) {
185 struct task_struct *t;
186 rcu_read_lock();
187 if (!pid_alive(task))
188 goto done;
189
190 t = task;
191 do {
192 if (vm_is_stack_for_task(t, vma)) {
193 ret = t->pid;
194 goto done;
195 }
196 } while_each_thread(task, t);
197done:
198 rcu_read_unlock();
199 }
200
201 return ret;
202}
203
efc1a3b1 204#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265
AM
205void arch_pick_mmap_layout(struct mm_struct *mm)
206{
207 mm->mmap_base = TASK_UNMAPPED_BASE;
208 mm->get_unmapped_area = arch_get_unmapped_area;
16d69265
AM
209}
210#endif
912985dc 211
45888a0c
XG
212/*
213 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
214 * back to the regular GUP.
25985edc 215 * If the architecture not support this function, simply return with no
45888a0c
XG
216 * page pinned
217 */
3b32123d 218int __weak __get_user_pages_fast(unsigned long start,
45888a0c
XG
219 int nr_pages, int write, struct page **pages)
220{
221 return 0;
222}
223EXPORT_SYMBOL_GPL(__get_user_pages_fast);
224
9de100d0
AG
225/**
226 * get_user_pages_fast() - pin user pages in memory
227 * @start: starting user address
228 * @nr_pages: number of pages from start to pin
229 * @write: whether pages will be written to
230 * @pages: array that receives pointers to the pages pinned.
231 * Should be at least nr_pages long.
232 *
9de100d0
AG
233 * Returns number of pages pinned. This may be fewer than the number
234 * requested. If nr_pages is 0 or negative, returns 0. If no pages
235 * were pinned, returns -errno.
d2bf6be8
NP
236 *
237 * get_user_pages_fast provides equivalent functionality to get_user_pages,
238 * operating on current and current->mm, with force=0 and vma=NULL. However
239 * unlike get_user_pages, it must be called without mmap_sem held.
240 *
241 * get_user_pages_fast may take mmap_sem and page table locks, so no
242 * assumptions can be made about lack of locking. get_user_pages_fast is to be
243 * implemented in a way that is advantageous (vs get_user_pages()) when the
244 * user memory area is already faulted in and present in ptes. However if the
245 * pages have to be faulted in, it may turn out to be slightly slower so
246 * callers need to carefully consider what to use. On many architectures,
247 * get_user_pages_fast simply falls back to get_user_pages.
9de100d0 248 */
3b32123d 249int __weak get_user_pages_fast(unsigned long start,
912985dc
RR
250 int nr_pages, int write, struct page **pages)
251{
252 struct mm_struct *mm = current->mm;
253 int ret;
254
255 down_read(&mm->mmap_sem);
256 ret = get_user_pages(current, mm, start, nr_pages,
257 write, 0, pages, NULL);
258 up_read(&mm->mmap_sem);
259
260 return ret;
261}
262EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb 263
eb36c587
AV
264unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
265 unsigned long len, unsigned long prot,
266 unsigned long flag, unsigned long pgoff)
267{
268 unsigned long ret;
269 struct mm_struct *mm = current->mm;
41badc15 270 unsigned long populate;
eb36c587
AV
271
272 ret = security_mmap_file(file, prot, flag);
273 if (!ret) {
274 down_write(&mm->mmap_sem);
bebeb3d6
ML
275 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
276 &populate);
eb36c587 277 up_write(&mm->mmap_sem);
41badc15
ML
278 if (populate)
279 mm_populate(ret, populate);
eb36c587
AV
280 }
281 return ret;
282}
283
284unsigned long vm_mmap(struct file *file, unsigned long addr,
285 unsigned long len, unsigned long prot,
286 unsigned long flag, unsigned long offset)
287{
288 if (unlikely(offset + PAGE_ALIGN(len) < offset))
289 return -EINVAL;
290 if (unlikely(offset & ~PAGE_MASK))
291 return -EINVAL;
292
293 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
294}
295EXPORT_SYMBOL(vm_mmap);
296
39f1f78d
AV
297void kvfree(const void *addr)
298{
299 if (is_vmalloc_addr(addr))
300 vfree(addr);
301 else
302 kfree(addr);
303}
304EXPORT_SYMBOL(kvfree);
305
9800339b
SL
306struct address_space *page_mapping(struct page *page)
307{
308 struct address_space *mapping = page->mapping;
309
03e5ac2f
MP
310 /* This happens if someone calls flush_dcache_page on slab page */
311 if (unlikely(PageSlab(page)))
312 return NULL;
313
33806f06
SL
314 if (unlikely(PageSwapCache(page))) {
315 swp_entry_t entry;
316
317 entry.val = page_private(page);
318 mapping = swap_address_space(entry);
d2cf5ad6 319 } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
9800339b
SL
320 mapping = NULL;
321 return mapping;
322}
323
49f0ce5f
JM
324int overcommit_ratio_handler(struct ctl_table *table, int write,
325 void __user *buffer, size_t *lenp,
326 loff_t *ppos)
327{
328 int ret;
329
330 ret = proc_dointvec(table, write, buffer, lenp, ppos);
331 if (ret == 0 && write)
332 sysctl_overcommit_kbytes = 0;
333 return ret;
334}
335
336int overcommit_kbytes_handler(struct ctl_table *table, int write,
337 void __user *buffer, size_t *lenp,
338 loff_t *ppos)
339{
340 int ret;
341
342 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
343 if (ret == 0 && write)
344 sysctl_overcommit_ratio = 0;
345 return ret;
346}
347
00619bcc
JM
348/*
349 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
350 */
351unsigned long vm_commit_limit(void)
352{
49f0ce5f
JM
353 unsigned long allowed;
354
355 if (sysctl_overcommit_kbytes)
356 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
357 else
358 allowed = ((totalram_pages - hugetlb_total_pages())
359 * sysctl_overcommit_ratio / 100);
360 allowed += total_swap_pages;
361
362 return allowed;
00619bcc
JM
363}
364
a9090253
WR
365/**
366 * get_cmdline() - copy the cmdline value to a buffer.
367 * @task: the task whose cmdline value to copy.
368 * @buffer: the buffer to copy to.
369 * @buflen: the length of the buffer. Larger cmdline values are truncated
370 * to this length.
371 * Returns the size of the cmdline field copied. Note that the copy does
372 * not guarantee an ending NULL byte.
373 */
374int get_cmdline(struct task_struct *task, char *buffer, int buflen)
375{
376 int res = 0;
377 unsigned int len;
378 struct mm_struct *mm = get_task_mm(task);
379 if (!mm)
380 goto out;
381 if (!mm->arg_end)
382 goto out_mm; /* Shh! No looking before we're done */
383
384 len = mm->arg_end - mm->arg_start;
385
386 if (len > buflen)
387 len = buflen;
388
389 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
390
391 /*
392 * If the nul at the end of args has been overwritten, then
393 * assume application is using setproctitle(3).
394 */
395 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
396 len = strnlen(buffer, res);
397 if (len < res) {
398 res = len;
399 } else {
400 len = mm->env_end - mm->env_start;
401 if (len > buflen - res)
402 len = buflen - res;
403 res += access_process_vm(task, mm->env_start,
404 buffer+res, len, 0);
405 res = strnlen(buffer, res);
406 }
407 }
408out_mm:
409 mmput(mm);
410out:
411 return res;
412}
This page took 0.654018 seconds and 5 git commands to generate.