Merge branch 'acpi-lpss'
[deliverable/linux.git] / arch / powerpc / mm / slb_low.S
1 /*
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <asm/processor.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cputable.h>
21 #include <asm/page.h>
22 #include <asm/mmu.h>
23 #include <asm/pgtable.h>
24 #include <asm/firmware.h>
25
26 /* void slb_allocate_realmode(unsigned long ea);
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
33 _GLOBAL(slb_allocate_realmode)
34 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
38 rldicr. r9,r3,4,(63 - 46 - 4)
39 bne- 8f
40
41 srdi r9,r3,60 /* get region */
42 srdi r10,r3,SID_SHIFT /* get esid */
43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
44
45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
46 blt cr7,0f /* user or kernel? */
47
48 /* kernel address: proto-VSID = ESID */
49 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
50 * this code will generate the protoVSID 0xfffffffff for the
51 * top segment. That's ok, the scramble below will translate
52 * it to VSID 0, which is reserved as a bad VSID - one which
53 * will never have any pages in it. */
54
55 /* Check if hitting the linear mapping or some other kernel space
56 */
57 bne cr7,1f
58
59 /* Linear mapping encoding bits, the "li" instruction below will
60 * be patched by the kernel at boot
61 */
62 _GLOBAL(slb_miss_kernel_load_linear)
63 li r11,0
64 /*
65 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
66 * r9 = region id.
67 */
68 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
69 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
70
71
72 BEGIN_FTR_SECTION
73 b slb_finish_load
74 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
75 b slb_finish_load_1T
76
77 1:
78 #ifdef CONFIG_SPARSEMEM_VMEMMAP
79 /* Check virtual memmap region. To be patches at kernel boot */
80 cmpldi cr0,r9,0xf
81 bne 1f
82 _GLOBAL(slb_miss_kernel_load_vmemmap)
83 li r11,0
84 b 6f
85 1:
86 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
87
88 /* vmalloc mapping gets the encoding from the PACA as the mapping
89 * can be demoted from 64K -> 4K dynamically on some machines
90 */
91 clrldi r11,r10,48
92 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
93 bgt 5f
94 lhz r11,PACAVMALLOCSLLP(r13)
95 b 6f
96 5:
97 /* IO mapping */
98 _GLOBAL(slb_miss_kernel_load_io)
99 li r11,0
100 6:
101 /*
102 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
103 * r9 = region id.
104 */
105 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
106 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
107
108 BEGIN_FTR_SECTION
109 b slb_finish_load
110 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
111 b slb_finish_load_1T
112
113 0:
114 /* when using slices, we extract the psize off the slice bitmaps
115 * and then we need to get the sllp encoding off the mmu_psize_defs
116 * array.
117 *
118 * XXX This is a bit inefficient especially for the normal case,
119 * so we should try to implement a fast path for the standard page
120 * size using the old sllp value so we avoid the array. We cannot
121 * really do dynamic patching unfortunately as processes might flip
122 * between 4k and 64k standard page size
123 */
124 #ifdef CONFIG_PPC_MM_SLICES
125 /* r10 have esid */
126 cmpldi r10,16
127 /* below SLICE_LOW_TOP */
128 blt 5f
129 /*
130 * Handle hpsizes,
131 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
132 */
133 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
134 addi r9,r11,PACAHIGHSLICEPSIZE
135 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
136 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
137 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
138 b 6f
139
140 5:
141 /*
142 * Handle lpsizes
143 * r9 is get_paca()->context.low_slices_psize, r11 is index
144 */
145 ld r9,PACALOWSLICESPSIZE(r13)
146 mr r11,r10
147 6:
148 sldi r11,r11,2 /* index * 4 */
149 /* Extract the psize and multiply to get an array offset */
150 srd r9,r9,r11
151 andi. r9,r9,0xf
152 mulli r9,r9,MMUPSIZEDEFSIZE
153
154 /* Now get to the array and obtain the sllp
155 */
156 ld r11,PACATOC(r13)
157 ld r11,mmu_psize_defs@got(r11)
158 add r11,r11,r9
159 ld r11,MMUPSIZESLLP(r11)
160 ori r11,r11,SLB_VSID_USER
161 #else
162 /* paca context sllp already contains the SLB_VSID_USER bits */
163 lhz r11,PACACONTEXTSLLP(r13)
164 #endif /* CONFIG_PPC_MM_SLICES */
165
166 ld r9,PACACONTEXTID(r13)
167 BEGIN_FTR_SECTION
168 cmpldi r10,0x1000
169 bge slb_finish_load_1T
170 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
171 b slb_finish_load
172
173 8: /* invalid EA */
174 li r10,0 /* BAD_VSID */
175 li r9,0 /* BAD_VSID */
176 li r11,SLB_VSID_USER /* flags don't much matter */
177 b slb_finish_load
178
179 #ifdef __DISABLED__
180
181 /* void slb_allocate_user(unsigned long ea);
182 *
183 * Create an SLB entry for the given EA (user or kernel).
184 * r3 = faulting address, r13 = PACA
185 * r9, r10, r11 are clobbered by this function
186 * No other registers are examined or changed.
187 *
188 * It is called with translation enabled in order to be able to walk the
189 * page tables. This is not currently used.
190 */
191 _GLOBAL(slb_allocate_user)
192 /* r3 = faulting address */
193 srdi r10,r3,28 /* get esid */
194
195 crset 4*cr7+lt /* set "user" flag for later */
196
197 /* check if we fit in the range covered by the pagetables*/
198 srdi. r9,r3,PGTABLE_EADDR_SIZE
199 crnot 4*cr0+eq,4*cr0+eq
200 beqlr
201
202 /* now we need to get to the page tables in order to get the page
203 * size encoding from the PMD. In the future, we'll be able to deal
204 * with 1T segments too by getting the encoding from the PGD instead
205 */
206 ld r9,PACAPGDIR(r13)
207 cmpldi cr0,r9,0
208 beqlr
209 rlwinm r11,r10,8,25,28
210 ldx r9,r9,r11 /* get pgd_t */
211 cmpldi cr0,r9,0
212 beqlr
213 rlwinm r11,r10,3,17,28
214 ldx r9,r9,r11 /* get pmd_t */
215 cmpldi cr0,r9,0
216 beqlr
217
218 /* build vsid flags */
219 andi. r11,r9,SLB_VSID_LLP
220 ori r11,r11,SLB_VSID_USER
221
222 /* get context to calculate proto-VSID */
223 ld r9,PACACONTEXTID(r13)
224 /* fall through slb_finish_load */
225
226 #endif /* __DISABLED__ */
227
228
229 /*
230 * Finish loading of an SLB entry and return
231 *
232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
233 */
234 slb_finish_load:
235 rldimi r10,r9,ESID_BITS,0
236 ASM_VSID_SCRAMBLE(r10,r9,256M)
237 /*
238 * bits above VSID_BITS_256M need to be ignored from r10
239 * also combine VSID and flags
240 */
241 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
242
243 /* r3 = EA, r11 = VSID data */
244 /*
245 * Find a slot, round robin. Previously we tried to find a
246 * free slot first but that took too long. Unfortunately we
247 * dont have any LRU information to help us choose a slot.
248 */
249
250 7: ld r10,PACASTABRR(r13)
251 addi r10,r10,1
252 /* This gets soft patched on boot. */
253 _GLOBAL(slb_compare_rr_to_size)
254 cmpldi r10,0
255
256 blt+ 4f
257 li r10,SLB_NUM_BOLTED
258
259 4:
260 std r10,PACASTABRR(r13)
261
262 3:
263 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
264 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
265
266 /* r3 = ESID data, r11 = VSID data */
267
268 /*
269 * No need for an isync before or after this slbmte. The exception
270 * we enter with and the rfid we exit with are context synchronizing.
271 */
272 slbmte r11,r10
273
274 /* we're done for kernel addresses */
275 crclr 4*cr0+eq /* set result to "success" */
276 bgelr cr7
277
278 /* Update the slb cache */
279 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
280 cmpldi r3,SLB_CACHE_ENTRIES
281 bge 1f
282
283 /* still room in the slb cache */
284 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
285 srdi r10,r10,28 /* get the 36 bits of the ESID */
286 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
287 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
288 addi r3,r3,1 /* offset++ */
289 b 2f
290 1: /* offset >= SLB_CACHE_ENTRIES */
291 li r3,SLB_CACHE_ENTRIES+1
292 2:
293 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
294 crclr 4*cr0+eq /* set result to "success" */
295 blr
296
297 /*
298 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
299 *
300 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
301 */
302 slb_finish_load_1T:
303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
304 rldimi r10,r9,ESID_BITS_1T,0
305 ASM_VSID_SCRAMBLE(r10,r9,1T)
306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10
308 * also combine VSID and flags
309 */
310 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
311 li r10,MMU_SEGSIZE_1T
312 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
313
314 /* r3 = EA, r11 = VSID data */
315 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
316 b 7b
317
This page took 0.039353 seconds and 5 git commands to generate.