mm: define USE_SPLIT_PTLOCKS rather than repeating expression
[deliverable/linux.git] / include / linux / mm.h
index 335288bff1b741bd25dce72d3a84773c5ef70aee..4194bf8e4f6c249c5d06740a7f21396f55bf7241 100644 (file)
@@ -73,7 +73,7 @@ extern unsigned int kobjsize(const void *objp);
 #endif
 
 /*
- * vm_flags..
+ * vm_flags in vm_area_struct, see mm_types.h.
  */
 #define VM_READ                0x00000001      /* currently active flags */
 #define VM_WRITE       0x00000002
@@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
                          struct vm_area_struct **pprev, unsigned long start,
                          unsigned long end, unsigned long newflags);
 
-#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
 /*
  * get_user_pages_fast provides equivalent functionality to get_user_pages,
  * operating on current and current->mm (force=0 and doesn't return any vmas).
@@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
-#else
-/*
- * Should probably be moved to asm-generic, and architectures can include it if
- * they don't implement their own get_user_pages_fast.
- */
-#define get_user_pages_fast(start, nr_pages, write, pages)     \
-({                                                             \
-       struct mm_struct *mm = current->mm;                     \
-       int ret;                                                \
-                                                               \
-       down_read(&mm->mmap_sem);                               \
-       ret = get_user_pages(current, mm, start, nr_pages,      \
-                                       write, 0, pages, NULL); \
-       up_read(&mm->mmap_sem);                                 \
-                                                               \
-       ret;                                                    \
-})
-#endif
-
 /*
  * A callback you can register to apply pressure to ageable caches.
  *
@@ -939,7 +919,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
 }
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#if USE_SPLIT_PTLOCKS
 /*
  * We tuck a spinlock to guard each pagetable page into its struct page,
  * at page->private, with BUILD_BUG_ON to make sure that this will not
@@ -952,14 +932,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
 } while (0)
 #define pte_lock_deinit(page)  ((page)->mapping = NULL)
 #define pte_lockptr(mm, pmd)   ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
-#else
+#else  /* !USE_SPLIT_PTLOCKS */
 /*
  * We use mm->page_table_lock to guard all pagetable pages of the mm.
  */
 #define pte_lock_init(page)    do {} while (0)
 #define pte_lock_deinit(page)  do {} while (0)
 #define pte_lockptr(mm, pmd)   ({(void)(pmd); &(mm)->page_table_lock;})
-#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#endif /* USE_SPLIT_PTLOCKS */
 
 static inline void pgtable_page_ctor(struct page *page)
 {
This page took 0.030238 seconds and 5 git commands to generate.