[ARM] Convert set_pte_ext implementions to macros
[deliverable/linux.git] / arch / arm / mm / proc-macros.S
CommitLineData
1da177e4
LT
1/*
2 * We need constants.h for:
3 * VMA_VM_MM
4 * VMA_VM_FLAGS
5 * VM_EXEC
6 */
e6ae744d 7#include <asm/asm-offsets.h>
1da177e4
LT
8#include <asm/thread_info.h>
9
10/*
11 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
12 */
13 .macro vma_vm_mm, rd, rn
14 ldr \rd, [\rn, #VMA_VM_MM]
15 .endm
16
17/*
18 * vma_vm_flags - get vma->vm_flags
19 */
20 .macro vma_vm_flags, rd, rn
21 ldr \rd, [\rn, #VMA_VM_FLAGS]
22 .endm
23
24 .macro tsk_mm, rd, rn
25 ldr \rd, [\rn, #TI_TASK]
26 ldr \rd, [\rd, #TSK_ACTIVE_MM]
27 .endm
28
29/*
30 * act_mm - get current->active_mm
31 */
32 .macro act_mm, rd
33 bic \rd, sp, #8128
34 bic \rd, \rd, #63
35 ldr \rd, [\rd, #TI_TASK]
36 ldr \rd, [\rd, #TSK_ACTIVE_MM]
37 .endm
38
39/*
40 * mmid - get context id from mm pointer (mm->context.id)
41 */
42 .macro mmid, rd, rn
43 ldr \rd, [\rn, #MM_CONTEXT_ID]
44 .endm
45
46/*
47 * mask_asid - mask the ASID from the context ID
48 */
49 .macro asid, rd, rn
50 and \rd, \rn, #255
51 .endm
22b19086
RK
52
53 .macro crval, clear, mmuset, ucset
54#ifdef CONFIG_MMU
55 .word \clear
56 .word \mmuset
57#else
58 .word \clear
59 .word \ucset
60#endif
61 .endm
bbe88886
CM
62
63/*
64 * cache_line_size - get the cache line size from the CSIDR register
65 * (available on ARMv7+). It assumes that the CSSR register was configured
66 * to access the L1 data cache CSIDR.
67 */
68 .macro dcache_line_size, reg, tmp
69 mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
70 and \tmp, \tmp, #7 @ cache line size encoding
71 mov \reg, #16 @ size offset
72 mov \reg, \reg, lsl \tmp @ actual cache line size
73 .endm
da091653
RK
74
75
76/*
77 * Sanity check the PTE configuration for the code below - which makes
78 * certain assumptions about how these bits are layed out.
79 */
80#if L_PTE_SHARED != PTE_EXT_SHARED
81#error PTE shared bit mismatch
82#endif
83#if L_PTE_BUFFERABLE != PTE_BUFFERABLE
84#error PTE bufferable bit mismatch
85#endif
86#if L_PTE_CACHEABLE != PTE_CACHEABLE
87#error PTE cacheable bit mismatch
88#endif
89#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
90 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
91#error Invalid Linux PTE bit settings
92#endif
93
94/*
95 * The ARMv6 and ARMv7 set_pte_ext translation function.
96 *
97 * Permission translation:
98 * YUWD APX AP1 AP0 SVC User
99 * 0xxx 0 0 0 no acc no acc
100 * 100x 1 0 1 r/o no acc
101 * 10x0 1 0 1 r/o no acc
102 * 1011 0 0 1 r/w no acc
103 * 110x 0 1 0 r/w r/o
104 * 11x0 0 1 0 r/w r/o
105 * 1111 0 1 1 r/w r/w
106 */
107 .macro armv6_set_pte_ext
108 str r1, [r0], #-2048 @ linux version
109
110 bic r3, r1, #0x000003f0
111 bic r3, r3, #PTE_TYPE_MASK
112 orr r3, r3, r2
113 orr r3, r3, #PTE_EXT_AP0 | 2
114
115 tst r1, #L_PTE_WRITE
116 tstne r1, #L_PTE_DIRTY
117 orreq r3, r3, #PTE_EXT_APX
118
119 tst r1, #L_PTE_USER
120 orrne r3, r3, #PTE_EXT_AP1
121 tstne r3, #PTE_EXT_APX
122 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
123
124 tst r1, #L_PTE_EXEC
125 orreq r3, r3, #PTE_EXT_XN
126
127 tst r1, #L_PTE_YOUNG
128 tstne r1, #L_PTE_PRESENT
129 moveq r3, #0
130
131 str r3, [r0]
132 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
133 .endm
134
135
136/*
137 * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
138 * covering most CPUs except Xscale and Xscale 3.
139 *
140 * Permission translation:
141 * YUWD AP SVC User
142 * 0xxx 0x00 no acc no acc
143 * 100x 0x00 r/o no acc
144 * 10x0 0x00 r/o no acc
145 * 1011 0x55 r/w no acc
146 * 110x 0xaa r/w r/o
147 * 11x0 0xaa r/w r/o
148 * 1111 0xff r/w r/w
149 */
150 .macro armv3_set_pte_ext wc_disable=1
151 str r1, [r0], #-2048 @ linux version
152
153 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
154
155 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
156 bic r2, r2, #PTE_TYPE_MASK
157 orr r2, r2, #PTE_TYPE_SMALL
158
159 tst r3, #L_PTE_USER @ user?
160 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
161
162 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
163 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
164
165 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
166 movne r2, #0
167
168 .if \wc_disable
169#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
170 tst r2, #PTE_CACHEABLE
171 bicne r2, r2, #PTE_BUFFERABLE
172#endif
173 .endif
174 str r2, [r0] @ hardware version
175 .endm
176
177
178/*
179 * Xscale set_pte_ext translation, split into two halves to cope
180 * with work-arounds. r3 must be preserved by code between these
181 * two macros.
182 *
183 * Permission translation:
184 * YUWD AP SVC User
185 * 0xxx 00 no acc no acc
186 * 100x 00 r/o no acc
187 * 10x0 00 r/o no acc
188 * 1011 01 r/w no acc
189 * 110x 10 r/w r/o
190 * 11x0 10 r/w r/o
191 * 1111 11 r/w r/w
192 */
193 .macro xscale_set_pte_ext_prologue
194 str r1, [r0], #-2048 @ linux version
195
196 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
197
198 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
199 orr r2, r2, #PTE_TYPE_EXT @ extended page
200
201 tst r3, #L_PTE_USER @ user?
202 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
203
204 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
205 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
206 @ combined with user -> user r/w
207 .endm
208
209 .macro xscale_set_pte_ext_epilogue
210 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
211 movne r2, #0 @ no -> fault
212
213 str r2, [r0] @ hardware version
214 mov ip, #0
215 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
216 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
217 .endm
This page took 0.537307 seconds and 5 git commands to generate.