compat: generic compat_sys_sched_rr_get_interval() implementation
[deliverable/linux.git] / arch / powerpc / include / asm / pgtable.h
CommitLineData
047ea784
PM
1#ifndef _ASM_POWERPC_PGTABLE_H
2#define _ASM_POWERPC_PGTABLE_H
88ced031 3#ifdef __KERNEL__
047ea784 4
9c709f3b
DG
5#ifndef __ASSEMBLY__
6#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h>
8#include <asm/page.h>
8d30c14c 9
9c709f3b 10struct mm_struct;
8d30c14c 11
9c709f3b
DG
12#endif /* !__ASSEMBLY__ */
13
f88df14b
DG
14#if defined(CONFIG_PPC64)
15# include <asm/pgtable-ppc64.h>
047ea784 16#else
f88df14b 17# include <asm/pgtable-ppc32.h>
e28f7faf 18#endif
1da177e4 19
1da177e4 20#ifndef __ASSEMBLY__
64b3d0e8 21
78f1dbde
AK
22#include <asm/tlbflush.h>
23
71087002
BH
24/* Generic accessors to PTE bits */
25static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
26static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
27static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
28static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
29static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
30static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
31static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
32static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
33
34/* Conversion functions: convert a page and protection to a page entry,
35 * and a page entry and page directory to the page they refer to.
36 *
37 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
38 * long for now.
39 */
40static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
41 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
42 pgprot_val(pgprot)); }
43static inline unsigned long pte_pfn(pte_t pte) {
44 return pte_val(pte) >> PTE_RPN_SHIFT; }
45
46/* Keep these as a macros to avoid include dependency mess */
47#define pte_page(x) pfn_to_page(pte_pfn(x))
48#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
49
50/* Generic modifiers for PTE bits */
51static inline pte_t pte_wrprotect(pte_t pte) {
52 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
53static inline pte_t pte_mkclean(pte_t pte) {
54 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
55static inline pte_t pte_mkold(pte_t pte) {
56 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
57static inline pte_t pte_mkwrite(pte_t pte) {
58 pte_val(pte) |= _PAGE_RW; return pte; }
59static inline pte_t pte_mkdirty(pte_t pte) {
60 pte_val(pte) |= _PAGE_DIRTY; return pte; }
61static inline pte_t pte_mkyoung(pte_t pte) {
62 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
63static inline pte_t pte_mkspecial(pte_t pte) {
64 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
65static inline pte_t pte_mkhuge(pte_t pte) {
66 return pte; }
67static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
68{
69 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
70 return pte;
71}
72
73
8d30c14c
BH
74/* Insert a PTE, top-level function is out of line. It uses an inline
75 * low level function in the respective pgtable-* files
76 */
77extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
78 pte_t pte);
79
80/* This low level function performs the actual PTE insertion
81 * Setting the PTE depends on the MMU type and other factors. It's
82 * an horrible mess that I'm not going to try to clean up now but
83 * I'm keeping it in one place rather than spread around
84 */
85static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
86 pte_t *ptep, pte_t pte, int percpu)
87{
88#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
89 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
90 * helper pte_update() which does an atomic update. We need to do that
91 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
92 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
93 * the hash bits instead (ie, same as the non-SMP case)
94 */
95 if (percpu)
96 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
97 | (pte_val(pte) & ~_PAGE_HASHPTE));
98 else
99 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
100
1660e9d3
PM
101#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
102 /* Second case is 32-bit with 64-bit PTE. In this case, we
8d30c14c
BH
103 * can just store as long as we do the two halves in the right order
104 * with a barrier in between. This is possible because we take care,
105 * in the hash code, to pre-invalidate if the PTE was already hashed,
106 * which synchronizes us with any concurrent invalidation.
107 * In the percpu case, we also fallback to the simple update preserving
108 * the hash bits
109 */
110 if (percpu) {
111 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
112 | (pte_val(pte) & ~_PAGE_HASHPTE));
113 return;
114 }
115#if _PAGE_HASHPTE != 0
116 if (pte_val(*ptep) & _PAGE_HASHPTE)
117 flush_hash_entry(mm, ptep, addr);
118#endif
119 __asm__ __volatile__("\
120 stw%U0%X0 %2,%0\n\
121 eieio\n\
122 stw%U0%X0 %L2,%1"
123 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
124 : "r" (pte) : "memory");
125
126#elif defined(CONFIG_PPC_STD_MMU_32)
127 /* Third case is 32-bit hash table in UP mode, we need to preserve
128 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
129 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
130 * and see we need to keep track that this PTE needs invalidating
131 */
132 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
133 | (pte_val(pte) & ~_PAGE_HASHPTE));
134
135#else
136 /* Anything else just stores the PTE normally. That covers all 64-bit
1660e9d3 137 * cases, and 32-bit non-hash with 32-bit PTEs.
8d30c14c
BH
138 */
139 *ptep = pte;
140#endif
141}
142
143
144#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
145extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
146 pte_t *ptep, pte_t entry, int dirty);
147
64b3d0e8
BH
148/*
149 * Macro to mark a page protection value as "uncacheable".
150 */
151
152#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
153 _PAGE_WRITETHRU)
154
155#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
156 _PAGE_NO_CACHE | _PAGE_GUARDED))
157
158#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
159 _PAGE_NO_CACHE))
160
161#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
162 _PAGE_COHERENT))
163
164#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
165 _PAGE_COHERENT | _PAGE_WRITETHRU))
166
09c188c4
GT
167#define pgprot_cached_noncoherent(prot) \
168 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
169
fe3cc0d9 170#define pgprot_writecombine pgprot_noncached_wc
64b3d0e8
BH
171
172struct file;
173extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
174 unsigned long size, pgprot_t vma_prot);
175#define __HAVE_PHYS_MEM_ACCESS_PROT
176
9c709f3b
DG
177/*
178 * ZERO_PAGE is a global shared page that is always zero: used
179 * for zero-mapped memory areas etc..
180 */
181extern unsigned long empty_zero_page[];
182#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
183
184extern pgd_t swapper_pg_dir[];
185
186extern void paging_init(void);
187
188/*
189 * kern_addr_valid is intended to indicate whether an address is a valid
190 * kernel address. Most 32-bit archs define it as always true (like this)
191 * but most 64-bit archs actually perform a test. What should we do here?
192 */
193#define kern_addr_valid(addr) (1)
194
195#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
196 remap_pfn_range(vma, vaddr, pfn, size, prot)
197
1da177e4 198#include <asm-generic/pgtable.h>
1e3519f8
BH
199
200
201/*
202 * This gets called at the end of handling a page fault, when
203 * the kernel has put a new PTE into the page table for the process.
204 * We use it to ensure coherency between the i-cache and d-cache
205 * for the page which has just been mapped in.
206 * On machines which use an MMU hash table, we use this to put a
207 * corresponding HPTE into the hash table ahead of time, instead of
208 * waiting for the inevitable extra hash-table miss exception.
209 */
4b3073e1 210extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
1e3519f8 211
a4fe3ce7
DG
212extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
213 unsigned long end, int write, struct page **pages, int *nr);
214
1da177e4
LT
215#endif /* __ASSEMBLY__ */
216
88ced031 217#endif /* __KERNEL__ */
047ea784 218#endif /* _ASM_POWERPC_PGTABLE_H */
This page took 0.59989 seconds and 5 git commands to generate.