From: Ingo Molnar Date: Fri, 13 Feb 2009 12:09:00 +0000 (+0100) Subject: Merge branch 'x86/untangle2' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy... X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=b233969eaa98c7b339d955fe25a58bf6bf25739a;p=deliverable%2Flinux.git Merge branch 'x86/untangle2' of git://git./linux/kernel/git/jeremy/xen into x86/headers Conflicts: arch/x86/include/asm/page.h arch/x86/include/asm/pgtable.h arch/x86/mach-voyager/voyager_smp.c arch/x86/mm/fault.c --- b233969eaa98c7b339d955fe25a58bf6bf25739a diff --cc arch/x86/include/asm/page_types.h index 000000000000,9f0c95963358..2c52ff767584 mode 000000,100644..100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@@ -1,0 -1,64 +1,63 @@@ + #ifndef _ASM_X86_PAGE_DEFS_H + #define _ASM_X86_PAGE_DEFS_H + + #include + + /* PAGE_SHIFT determines the page size */ + #define PAGE_SHIFT 12 + #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) + #define PAGE_MASK (~(PAGE_SIZE-1)) + + #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) + #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) + + /* Cast PAGE_MASK to a signed type so that it is sign-extended if + virtual addresses are 32-bits but physical addresses are larger + (ie, 32-bit PAE). */ + #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) + + /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ + #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) + + /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ + #define PTE_FLAGS_MASK (~PTE_PFN_MASK) + + #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) + #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) + + #define HPAGE_SHIFT PMD_SHIFT + #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) + #define HPAGE_MASK (~(HPAGE_SIZE - 1)) + #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + + #define HUGE_MAX_HSTATE 2 + + #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) + + #define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #ifdef CONFIG_X86_64 + #include + #else + #include + #endif /* CONFIG_X86_64 */ + + #ifndef __ASSEMBLY__ + + struct pgprot; + + extern int page_is_ram(unsigned long pagenr); -extern int pagerange_is_ram(unsigned long start, unsigned long end); + extern int devmem_is_allowed(unsigned long pagenr); + extern void map_devmem(unsigned long pfn, unsigned long size, + struct pgprot vma_prot); + extern void unmap_devmem(unsigned long pfn, unsigned long size, + struct pgprot vma_prot); + + extern unsigned long max_low_pfn_mapped; + extern unsigned long max_pfn_mapped; + + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_X86_PAGE_DEFS_H */ diff --cc arch/x86/include/asm/pgtable.h index 8fef0f6bfbb6,b0d1066ab6a5..62024ff897d9 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@@ -316,26 -216,10 +216,24 @@@ static inline pte_t pte_mkspecial(pte_ return pte_set_flags(pte, _PAGE_SPECIAL); } - extern pteval_t __supported_pte_mask; - +/* + * Mask out unsupported bits in a present pgprot. Non-present pgprots + * can use those bits for other purposes, so leave them be. + */ +static inline pgprotval_t massage_pgprot(pgprot_t pgprot) +{ + pgprotval_t protval = pgprot_val(pgprot); + + if (protval & _PAGE_PRESENT) + protval &= __supported_pte_mask; + + return protval; +} + static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { - return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | - pgprot_val(pgprot)) & __supported_pte_mask); + return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | + massage_pgprot(pgprot)); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) diff --cc arch/x86/mm/fault.c index 94c4e7262197,976b5a72ec30..29644175490f --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@@ -851,7 -848,8 +851,8 @@@ void __kprobes do_page_fault(struct pt_ return; } + /* kprobes don't want to hook the spurious faults. */ - if (notify_page_fault(regs)) + if (unlikely(notify_page_fault(regs))) return; /* * It's safe to allow irq's after cr2 has been saved and the