fbff24827ae78d83809fc078900ed4299e16ab86
[deliverable/linux.git] / arch / ppc64 / mm / hash_low.S
1 /*
2 * ppc64 MMU hashtable management routines
3 *
4 * (c) Copyright IBM Corp. 2003
5 *
6 * Maintained by: Benjamin Herrenschmidt
7 * <benh@kernel.crashing.org>
8 *
9 * This file is covered by the GNU Public Licence v2 as
10 * described in the kernel's COPYING file.
11 */
12
13 #include <asm/processor.h>
14 #include <asm/pgtable.h>
15 #include <asm/mmu.h>
16 #include <asm/page.h>
17 #include <asm/types.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/offsets.h>
20 #include <asm/cputable.h>
21
22 .text
23
24 /*
25 * Stackframe:
26 *
27 * +-> Back chain (SP + 256)
28 * | General register save area (SP + 112)
29 * | Parameter save area (SP + 48)
30 * | TOC save area (SP + 40)
31 * | link editor doubleword (SP + 32)
32 * | compiler doubleword (SP + 24)
33 * | LR save area (SP + 16)
34 * | CR save area (SP + 8)
35 * SP ---> +-- Back chain (SP + 0)
36 */
37 #define STACKFRAMESIZE 256
38
39 /* Save parameters offsets */
40 #define STK_PARM(i) (STACKFRAMESIZE + 48 + ((i)-3)*8)
41
42 /* Save non-volatile offsets */
43 #define STK_REG(i) (112 + ((i)-14)*8)
44
45 /*
46 * _hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
47 * pte_t *ptep, unsigned long trap, int local)
48 *
49 * Adds a page to the hash table. This is the non-LPAR version for now
50 */
51
52 _GLOBAL(__hash_page)
53 mflr r0
54 std r0,16(r1)
55 stdu r1,-STACKFRAMESIZE(r1)
56 /* Save all params that we need after a function call */
57 std r6,STK_PARM(r6)(r1)
58 std r8,STK_PARM(r8)(r1)
59
60 /* Add _PAGE_PRESENT to access */
61 ori r4,r4,_PAGE_PRESENT
62
63 /* Save non-volatile registers.
64 * r31 will hold "old PTE"
65 * r30 is "new PTE"
66 * r29 is "va"
67 * r28 is a hash value
68 * r27 is hashtab mask (maybe dynamic patched instead ?)
69 */
70 std r27,STK_REG(r27)(r1)
71 std r28,STK_REG(r28)(r1)
72 std r29,STK_REG(r29)(r1)
73 std r30,STK_REG(r30)(r1)
74 std r31,STK_REG(r31)(r1)
75
76 /* Step 1:
77 *
78 * Check permissions, atomically mark the linux PTE busy
79 * and hashed.
80 */
81 1:
82 ldarx r31,0,r6
83 /* Check access rights (access & ~(pte_val(*ptep))) */
84 andc. r0,r4,r31
85 bne- htab_wrong_access
86 /* Check if PTE is busy */
87 andi. r0,r31,_PAGE_BUSY
88 /* If so, just bail out and refault if needed. Someone else
89 * is changing this PTE anyway and might hash it.
90 */
91 bne- bail_ok
92 /* Prepare new PTE value (turn access RW into DIRTY, then
93 * add BUSY,HASHPTE and ACCESSED)
94 */
95 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
96 or r30,r30,r31
97 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
98 /* Write the linux PTE atomically (setting busy) */
99 stdcx. r30,0,r6
100 bne- 1b
101 isync
102
103 /* Step 2:
104 *
105 * Insert/Update the HPTE in the hash table. At this point,
106 * r4 (access) is re-useable, we use it for the new HPTE flags
107 */
108
109 /* Calc va and put it in r29 */
110 rldicr r29,r5,28,63-28
111 rldicl r3,r3,0,36
112 or r29,r3,r29
113
114 /* Calculate hash value for primary slot and store it in r28 */
115 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
116 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
117 xor r28,r5,r0
118
119 /* Convert linux PTE bits into HW equivalents */
120 andi. r3,r30,0x1fe /* Get basic set of flags */
121 xori r3,r3,HW_NO_EXEC /* _PAGE_EXEC -> NOEXEC */
122 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
123 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
124 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY -> r0 bit 30 */
125 andc r0,r30,r0 /* r0 = pte & ~r0 */
126 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
127
128 /* We eventually do the icache sync here (maybe inline that
129 * code rather than call a C function...)
130 */
131 BEGIN_FTR_SECTION
132 BEGIN_FTR_SECTION
133 mr r4,r30
134 mr r5,r7
135 bl .hash_page_do_lazy_icache
136 END_FTR_SECTION_IFSET(CPU_FTR_NOEXECUTE)
137 END_FTR_SECTION_IFCLR(CPU_FTR_COHERENT_ICACHE)
138
139 /* At this point, r3 contains new PP bits, save them in
140 * place of "access" in the param area (sic)
141 */
142 std r3,STK_PARM(r4)(r1)
143
144 /* Get htab_hash_mask */
145 ld r4,htab_hash_mask@got(2)
146 ld r27,0(r4) /* htab_hash_mask -> r27 */
147
148 /* Check if we may already be in the hashtable, in this case, we
149 * go to out-of-line code to try to modify the HPTE
150 */
151 andi. r0,r31,_PAGE_HASHPTE
152 bne htab_modify_pte
153
154 htab_insert_pte:
155 /* Clear hpte bits in new pte (we also clear BUSY btw) and
156 * add _PAGE_HASHPTE
157 */
158 lis r0,_PAGE_HPTEFLAGS@h
159 ori r0,r0,_PAGE_HPTEFLAGS@l
160 andc r30,r30,r0
161 ori r30,r30,_PAGE_HASHPTE
162
163 /* page number in r5 */
164 rldicl r5,r31,64-PTE_SHIFT,PTE_SHIFT
165
166 /* Calculate primary group hash */
167 and r0,r28,r27
168 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
169
170 /* Call ppc_md.hpte_insert */
171 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
172 mr r4,r29 /* Retreive va */
173 li r6,0 /* no vflags */
174 _GLOBAL(htab_call_hpte_insert1)
175 bl . /* Will be patched by htab_finish_init() */
176 cmpdi 0,r3,0
177 bge htab_pte_insert_ok /* Insertion successful */
178 cmpdi 0,r3,-2 /* Critical failure */
179 beq- htab_pte_insert_failure
180
181 /* Now try secondary slot */
182
183 /* page number in r5 */
184 rldicl r5,r31,64-PTE_SHIFT,PTE_SHIFT
185
186 /* Calculate secondary group hash */
187 andc r0,r27,r28
188 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
189
190 /* Call ppc_md.hpte_insert */
191 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
192 mr r4,r29 /* Retreive va */
193 li r6,HPTE_V_SECONDARY@l /* secondary slot */
194 _GLOBAL(htab_call_hpte_insert2)
195 bl . /* Will be patched by htab_finish_init() */
196 cmpdi 0,r3,0
197 bge+ htab_pte_insert_ok /* Insertion successful */
198 cmpdi 0,r3,-2 /* Critical failure */
199 beq- htab_pte_insert_failure
200
201 /* Both are full, we need to evict something */
202 mftb r0
203 /* Pick a random group based on TB */
204 andi. r0,r0,1
205 mr r5,r28
206 bne 2f
207 not r5,r5
208 2: and r0,r5,r27
209 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
210 /* Call ppc_md.hpte_remove */
211 _GLOBAL(htab_call_hpte_remove)
212 bl . /* Will be patched by htab_finish_init() */
213
214 /* Try all again */
215 b htab_insert_pte
216
217 bail_ok:
218 li r3,0
219 b bail
220
221 htab_pte_insert_ok:
222 /* Insert slot number & secondary bit in PTE */
223 rldimi r30,r3,12,63-15
224
225 /* Write out the PTE with a normal write
226 * (maybe add eieio may be good still ?)
227 */
228 htab_write_out_pte:
229 ld r6,STK_PARM(r6)(r1)
230 std r30,0(r6)
231 li r3, 0
232 bail:
233 ld r27,STK_REG(r27)(r1)
234 ld r28,STK_REG(r28)(r1)
235 ld r29,STK_REG(r29)(r1)
236 ld r30,STK_REG(r30)(r1)
237 ld r31,STK_REG(r31)(r1)
238 addi r1,r1,STACKFRAMESIZE
239 ld r0,16(r1)
240 mtlr r0
241 blr
242
243 htab_modify_pte:
244 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
245 mr r4,r3
246 rlwinm r3,r31,32-12,29,31
247
248 /* Secondary group ? if yes, get a inverted hash value */
249 mr r5,r28
250 andi. r0,r31,_PAGE_SECONDARY
251 beq 1f
252 not r5,r5
253 1:
254 /* Calculate proper slot value for ppc_md.hpte_updatepp */
255 and r0,r5,r27
256 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
257 add r3,r0,r3 /* add slot idx */
258
259 /* Call ppc_md.hpte_updatepp */
260 mr r5,r29 /* va */
261 li r6,0 /* large is 0 */
262 ld r7,STK_PARM(r8)(r1) /* get "local" param */
263 _GLOBAL(htab_call_hpte_updatepp)
264 bl . /* Will be patched by htab_finish_init() */
265
266 /* if we failed because typically the HPTE wasn't really here
267 * we try an insertion.
268 */
269 cmpdi 0,r3,-1
270 beq- htab_insert_pte
271
272 /* Clear the BUSY bit and Write out the PTE */
273 li r0,_PAGE_BUSY
274 andc r30,r30,r0
275 b htab_write_out_pte
276
277 htab_wrong_access:
278 /* Bail out clearing reservation */
279 stdcx. r31,0,r6
280 li r3,1
281 b bail
282
283 htab_pte_insert_failure:
284 /* Bail out restoring old PTE */
285 ld r6,STK_PARM(r6)(r1)
286 std r31,0(r6)
287 li r3,-1
288 b bail
289
290
This page took 0.035738 seconds and 4 git commands to generate.