lib/percpu_ida.c: remove redundant includes
[deliverable/linux.git] / mm / nommu.c
index 541bed64e34870c2e62c8cae68ba6e03920ae437..1a19fb3b04635549f63d90c9e62408cf55ad162c 100644 (file)
@@ -214,6 +214,39 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 }
 EXPORT_SYMBOL(get_user_pages);
 
+long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+                          unsigned long start, unsigned long nr_pages,
+                          int write, int force, struct page **pages,
+                          int *locked)
+{
+       return get_user_pages(tsk, mm, start, nr_pages, write, force,
+                             pages, NULL);
+}
+EXPORT_SYMBOL(get_user_pages_locked);
+
+long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+                              unsigned long start, unsigned long nr_pages,
+                              int write, int force, struct page **pages,
+                              unsigned int gup_flags)
+{
+       long ret;
+       down_read(&mm->mmap_sem);
+       ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
+                            pages, NULL);
+       up_read(&mm->mmap_sem);
+       return ret;
+}
+EXPORT_SYMBOL(__get_user_pages_unlocked);
+
+long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+                            unsigned long start, unsigned long nr_pages,
+                            int write, int force, struct page **pages)
+{
+       return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
+                                        force, pages, 0);
+}
+EXPORT_SYMBOL(get_user_pages_unlocked);
+
 /**
  * follow_pfn - look up PFN at a user virtual address
  * @vma: memory mapping
@@ -1895,7 +1928,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
  */
 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 {
-       unsigned long free, allowed, reserve;
+       long free, allowed, reserve;
 
        vm_acct_memory(pages);
 
@@ -1959,7 +1992,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
         */
        if (mm) {
                reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
-               allowed -= min(mm->total_vm / 32, reserve);
+               allowed -= min_t(long, mm->total_vm / 32, reserve);
        }
 
        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
This page took 0.091754 seconds and 5 git commands to generate.