81574f94ea3297c29822fde07834bf96c4f225e6
1 #ifndef _ASM_POWERPC_PGTABLE_H
2 #define _ASM_POWERPC_PGTABLE_H
6 #include <asm/processor.h> /* For TASK_SIZE */
12 #ifdef CONFIG_DEBUG_VM
13 extern void assert_pte_locked(struct mm_struct
*mm
, unsigned long addr
);
14 #else /* CONFIG_DEBUG_VM */
15 static inline void assert_pte_locked(struct mm_struct
*mm
, unsigned long addr
)
18 #endif /* !CONFIG_DEBUG_VM */
20 #endif /* !__ASSEMBLY__ */
22 #if defined(CONFIG_PPC64)
23 # include <asm/pgtable-ppc64.h>
25 # include <asm/pgtable-ppc32.h>
28 /* Special mapping for AGP */
29 #define PAGE_AGP (PAGE_KERNEL_NC)
34 /* Insert a PTE, top-level function is out of line. It uses an inline
35 * low level function in the respective pgtable-* files
37 extern void set_pte_at(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
,
40 /* This low level function performs the actual PTE insertion
41 * Setting the PTE depends on the MMU type and other factors. It's
42 * an horrible mess that I'm not going to try to clean up now but
43 * I'm keeping it in one place rather than spread around
45 static inline void __set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
46 pte_t
*ptep
, pte_t pte
, int percpu
)
48 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
49 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
50 * helper pte_update() which does an atomic update. We need to do that
51 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
52 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
53 * the hash bits instead (ie, same as the non-SMP case)
56 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
57 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
59 pte_update(ptep
, ~_PAGE_HASHPTE
, pte_val(pte
));
61 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
62 /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
63 * can just store as long as we do the two halves in the right order
64 * with a barrier in between. This is possible because we take care,
65 * in the hash code, to pre-invalidate if the PTE was already hashed,
66 * which synchronizes us with any concurrent invalidation.
67 * In the percpu case, we also fallback to the simple update preserving
71 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
72 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
75 #if _PAGE_HASHPTE != 0
76 if (pte_val(*ptep
) & _PAGE_HASHPTE
)
77 flush_hash_entry(mm
, ptep
, addr
);
79 __asm__
__volatile__("\
83 : "=m" (*ptep
), "=m" (*((unsigned char *)ptep
+4))
84 : "r" (pte
) : "memory");
86 #elif defined(CONFIG_PPC_STD_MMU_32)
87 /* Third case is 32-bit hash table in UP mode, we need to preserve
88 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
89 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
90 * and see we need to keep track that this PTE needs invalidating
92 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
93 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
96 /* Anything else just stores the PTE normally. That covers all 64-bit
97 * cases, and 32-bit non-hash with 64-bit PTEs in UP mode
104 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
105 extern int ptep_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
106 pte_t
*ptep
, pte_t entry
, int dirty
);
109 * Macro to mark a page protection value as "uncacheable".
112 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
115 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
116 _PAGE_NO_CACHE | _PAGE_GUARDED))
118 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
121 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
124 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
125 _PAGE_COHERENT | _PAGE_WRITETHRU))
129 extern pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
130 unsigned long size
, pgprot_t vma_prot
);
131 #define __HAVE_PHYS_MEM_ACCESS_PROT
134 * ZERO_PAGE is a global shared page that is always zero: used
135 * for zero-mapped memory areas etc..
137 extern unsigned long empty_zero_page
[];
138 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
140 extern pgd_t swapper_pg_dir
[];
142 extern void paging_init(void);
145 * kern_addr_valid is intended to indicate whether an address is a valid
146 * kernel address. Most 32-bit archs define it as always true (like this)
147 * but most 64-bit archs actually perform a test. What should we do here?
149 #define kern_addr_valid(addr) (1)
151 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
152 remap_pfn_range(vma, vaddr, pfn, size, prot)
154 #include <asm-generic/pgtable.h>
158 * This gets called at the end of handling a page fault, when
159 * the kernel has put a new PTE into the page table for the process.
160 * We use it to ensure coherency between the i-cache and d-cache
161 * for the page which has just been mapped in.
162 * On machines which use an MMU hash table, we use this to put a
163 * corresponding HPTE into the hash table ahead of time, instead of
164 * waiting for the inevitable extra hash-table miss exception.
166 extern void update_mmu_cache(struct vm_area_struct
*, unsigned long, pte_t
);
168 #endif /* __ASSEMBLY__ */
170 #endif /* __KERNEL__ */
171 #endif /* _ASM_POWERPC_PGTABLE_H */
This page took 0.035408 seconds and 4 git commands to generate.