powerpc/mm: Tweak PTE bit combination definitions
[deliverable/linux.git] / arch / powerpc / include / asm / pgtable.h
CommitLineData
047ea784
PM
1#ifndef _ASM_POWERPC_PGTABLE_H
2#define _ASM_POWERPC_PGTABLE_H
88ced031 3#ifdef __KERNEL__
047ea784 4
9c709f3b
DG
5#ifndef __ASSEMBLY__
6#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h>
8#include <asm/page.h>
8d30c14c 9
9c709f3b 10struct mm_struct;
8d30c14c
BH
11
12#ifdef CONFIG_DEBUG_VM
13extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
14#else /* CONFIG_DEBUG_VM */
15static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
16{
17}
18#endif /* !CONFIG_DEBUG_VM */
19
9c709f3b
DG
20#endif /* !__ASSEMBLY__ */
21
f88df14b
DG
22#if defined(CONFIG_PPC64)
23# include <asm/pgtable-ppc64.h>
047ea784 24#else
f88df14b 25# include <asm/pgtable-ppc32.h>
e28f7faf 26#endif
1da177e4 27
8d1cf34e
BH
28/* Special mapping for AGP */
29#define PAGE_AGP (PAGE_KERNEL_NC)
30#define HAVE_PAGE_AGP
31
1da177e4 32#ifndef __ASSEMBLY__
64b3d0e8 33
8d30c14c
BH
34/* Insert a PTE, top-level function is out of line. It uses an inline
35 * low level function in the respective pgtable-* files
36 */
37extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
38 pte_t pte);
39
40/* This low level function performs the actual PTE insertion
41 * Setting the PTE depends on the MMU type and other factors. It's
42 * an horrible mess that I'm not going to try to clean up now but
43 * I'm keeping it in one place rather than spread around
44 */
45static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
46 pte_t *ptep, pte_t pte, int percpu)
47{
48#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
49 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
50 * helper pte_update() which does an atomic update. We need to do that
51 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
52 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
53 * the hash bits instead (ie, same as the non-SMP case)
54 */
55 if (percpu)
56 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
57 | (pte_val(pte) & ~_PAGE_HASHPTE));
58 else
59 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
60
61#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
62 /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
63 * can just store as long as we do the two halves in the right order
64 * with a barrier in between. This is possible because we take care,
65 * in the hash code, to pre-invalidate if the PTE was already hashed,
66 * which synchronizes us with any concurrent invalidation.
67 * In the percpu case, we also fallback to the simple update preserving
68 * the hash bits
69 */
70 if (percpu) {
71 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
72 | (pte_val(pte) & ~_PAGE_HASHPTE));
73 return;
74 }
75#if _PAGE_HASHPTE != 0
76 if (pte_val(*ptep) & _PAGE_HASHPTE)
77 flush_hash_entry(mm, ptep, addr);
78#endif
79 __asm__ __volatile__("\
80 stw%U0%X0 %2,%0\n\
81 eieio\n\
82 stw%U0%X0 %L2,%1"
83 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
84 : "r" (pte) : "memory");
85
86#elif defined(CONFIG_PPC_STD_MMU_32)
87 /* Third case is 32-bit hash table in UP mode, we need to preserve
88 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
89 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
90 * and see we need to keep track that this PTE needs invalidating
91 */
92 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
93 | (pte_val(pte) & ~_PAGE_HASHPTE));
94
95#else
96 /* Anything else just stores the PTE normally. That covers all 64-bit
97 * cases, and 32-bit non-hash with 64-bit PTEs in UP mode
98 */
99 *ptep = pte;
100#endif
101}
102
103
104#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
105extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
106 pte_t *ptep, pte_t entry, int dirty);
107
64b3d0e8
BH
108/*
109 * Macro to mark a page protection value as "uncacheable".
110 */
111
112#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
113 _PAGE_WRITETHRU)
114
115#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
116 _PAGE_NO_CACHE | _PAGE_GUARDED))
117
118#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
119 _PAGE_NO_CACHE))
120
121#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
122 _PAGE_COHERENT))
123
124#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
125 _PAGE_COHERENT | _PAGE_WRITETHRU))
126
127
128struct file;
129extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
130 unsigned long size, pgprot_t vma_prot);
131#define __HAVE_PHYS_MEM_ACCESS_PROT
132
9c709f3b
DG
133/*
134 * ZERO_PAGE is a global shared page that is always zero: used
135 * for zero-mapped memory areas etc..
136 */
137extern unsigned long empty_zero_page[];
138#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
139
140extern pgd_t swapper_pg_dir[];
141
142extern void paging_init(void);
143
144/*
145 * kern_addr_valid is intended to indicate whether an address is a valid
146 * kernel address. Most 32-bit archs define it as always true (like this)
147 * but most 64-bit archs actually perform a test. What should we do here?
148 */
149#define kern_addr_valid(addr) (1)
150
151#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
152 remap_pfn_range(vma, vaddr, pfn, size, prot)
153
1da177e4 154#include <asm-generic/pgtable.h>
1e3519f8
BH
155
156
157/*
158 * This gets called at the end of handling a page fault, when
159 * the kernel has put a new PTE into the page table for the process.
160 * We use it to ensure coherency between the i-cache and d-cache
161 * for the page which has just been mapped in.
162 * On machines which use an MMU hash table, we use this to put a
163 * corresponding HPTE into the hash table ahead of time, instead of
164 * waiting for the inevitable extra hash-table miss exception.
165 */
166extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
167
1da177e4
LT
168#endif /* __ASSEMBLY__ */
169
88ced031 170#endif /* __KERNEL__ */
047ea784 171#endif /* _ASM_POWERPC_PGTABLE_H */
This page took 0.384288 seconds and 5 git commands to generate.