mm: use macros from compiler.h instead of __attribute__((...))
[deliverable/linux.git] / mm / hugetlb.c
index 7c02b9dadfb05b28e2aef363523f021f933bdb6d..c5aa439933649675d1c9d1c33367fbd4ca2cfa4a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
+#include <linux/compiler.h>
 #include <linux/cpuset.h>
 #include <linux/mutex.h>
 #include <linux/bootmem.h>
@@ -2690,7 +2691,8 @@ retry_avoidcopy:
                                BUG_ON(huge_pte_none(pte));
                                spin_lock(ptl);
                                ptep = huge_pte_offset(mm, address & huge_page_mask(h));
-                               if (likely(pte_same(huge_ptep_get(ptep), pte)))
+                               if (likely(ptep &&
+                                          pte_same(huge_ptep_get(ptep), pte)))
                                        goto retry_avoidcopy;
                                /*
                                 * race occurs while re-acquiring page table
@@ -2734,7 +2736,7 @@ retry_avoidcopy:
         */
        spin_lock(ptl);
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
-       if (likely(pte_same(huge_ptep_get(ptep), pte))) {
+       if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
                ClearPagePrivate(new_page);
 
                /* Break COW */
@@ -3185,6 +3187,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        for (; address < end; address += huge_page_size(h)) {
                spinlock_t *ptl;
@@ -3214,6 +3217,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         */
        flush_tlb_range(vma, start, end);
        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+       mmu_notifier_invalidate_range_end(mm, start, end);
 
        return pages << h->order;
 }
@@ -3518,7 +3522,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
 #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
 
 /* Can be overriden by architectures */
-__attribute__((weak)) struct page *
+struct page * __weak
 follow_huge_pud(struct mm_struct *mm, unsigned long address,
               pud_t *pud, int write)
 {
This page took 0.024832 seconds and 5 git commands to generate.