Merge remote-tracking branch 'scott/next' into next
[deliverable/linux.git] / arch / powerpc / mm / tlb_low_64e.S
CommitLineData
25d21ad6 1/*
6c504d42 2 * Low level TLB miss handlers for Book3E
25d21ad6
BH
3 *
4 * Copyright (C) 2008-2009
5 * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/processor.h>
14#include <asm/reg.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
19#include <asm/cputable.h>
20#include <asm/pgtable.h>
25d21ad6
BH
21#include <asm/exception-64e.h>
22#include <asm/ppc-opcode.h>
fecff0f7
MC
23#include <asm/kvm_asm.h>
24#include <asm/kvm_booke_hv_asm.h>
25d21ad6
BH
25
26#ifdef CONFIG_PPC_64K_PAGES
27#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
28#else
29#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE)
30#endif
31#define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
32#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
33#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
34
f67f4ef5
SW
35/**********************************************************************
36 * *
37 * TLB miss handling for Book3E with a bolted linear mapping *
38 * No virtual page table, no nested TLB misses *
39 * *
40 **********************************************************************/
41
fecff0f7 42.macro tlb_prolog_bolted intnum addr
8b64a9df 43 mtspr SPRN_SPRG_GEN_SCRATCH,r13
f67f4ef5
SW
44 mfspr r13,SPRN_SPRG_PACA
45 std r10,PACA_EXTLB+EX_TLB_R10(r13)
46 mfcr r10
47 std r11,PACA_EXTLB+EX_TLB_R11(r13)
fecff0f7
MC
48#ifdef CONFIG_KVM_BOOKE_HV
49BEGIN_FTR_SECTION
50 mfspr r11, SPRN_SRR1
51END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
52#endif
53 DO_KVM \intnum, SPRN_SRR1
f67f4ef5
SW
54 std r16,PACA_EXTLB+EX_TLB_R16(r13)
55 mfspr r16,\addr /* get faulting address */
56 std r14,PACA_EXTLB+EX_TLB_R14(r13)
57 ld r14,PACAPGD(r13)
58 std r15,PACA_EXTLB+EX_TLB_R15(r13)
59 std r10,PACA_EXTLB+EX_TLB_CR(r13)
60 TLB_MISS_PROLOG_STATS_BOLTED
61.endm
62
63.macro tlb_epilog_bolted
64 ld r14,PACA_EXTLB+EX_TLB_CR(r13)
65 ld r10,PACA_EXTLB+EX_TLB_R10(r13)
66 ld r11,PACA_EXTLB+EX_TLB_R11(r13)
67 mtcr r14
68 ld r14,PACA_EXTLB+EX_TLB_R14(r13)
69 ld r15,PACA_EXTLB+EX_TLB_R15(r13)
70 TLB_MISS_RESTORE_STATS_BOLTED
71 ld r16,PACA_EXTLB+EX_TLB_R16(r13)
8b64a9df 72 mfspr r13,SPRN_SPRG_GEN_SCRATCH
f67f4ef5
SW
73.endm
74
75/* Data TLB miss */
76 START_EXCEPTION(data_tlb_miss_bolted)
fecff0f7 77 tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
f67f4ef5
SW
78
79 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
80
81 /* We do the user/kernel test for the PID here along with the RW test
82 */
83 /* We pre-test some combination of permissions to avoid double
84 * faults:
85 *
86 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
87 * ESR_ST is 0x00800000
88 * _PAGE_BAP_SW is 0x00000010
89 * So the shift is >> 19. This tests for supervisor writeability.
90 * If the page happens to be supervisor writeable and not user
91 * writeable, we will take a new fault later, but that should be
92 * a rare enough case.
93 *
94 * We also move ESR_ST in _PAGE_DIRTY position
95 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
96 *
97 * MAS1 is preset for all we need except for TID that needs to
98 * be cleared for kernel translations
99 */
100
101 mfspr r11,SPRN_ESR
102
103 srdi r15,r16,60 /* get region */
104 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
27609a42 105 bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */
f67f4ef5
SW
106
107 rlwinm r10,r11,32-19,27,27
108 rlwimi r10,r11,32-16,19,19
27609a42 109 cmpwi r15,0 /* user vs kernel check */
f67f4ef5
SW
110 ori r10,r10,_PAGE_PRESENT
111 oris r11,r10,_PAGE_ACCESSED@h
112
113 TLB_MISS_STATS_SAVE_INFO_BOLTED
114 bne tlb_miss_kernel_bolted
115
116tlb_miss_common_bolted:
117/*
118 * This is the guts of the TLB miss handler for bolted-linear.
119 * We are entered with:
120 *
121 * r16 = faulting address
122 * r15 = crap (free to use)
123 * r14 = page table base
124 * r13 = PACA
125 * r11 = PTE permission mask
126 * r10 = crap (free to use)
127 */
128 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
129 cmpldi cr0,r14,0
130 clrrdi r15,r15,3
27609a42 131 beq tlb_miss_fault_bolted /* No PGDIR, bail */
f67f4ef5
SW
132
133BEGIN_MMU_FTR_SECTION
134 /* Set the TLB reservation and search for existing entry. Then load
135 * the entry.
136 */
962cffbd 137 PPC_TLBSRX_DOT(0,R16)
27609a42 138 ldx r14,r14,r15 /* grab pgd entry */
1149e8a7 139 beq tlb_miss_done_bolted /* tlb exists already, bail */
f67f4ef5 140MMU_FTR_SECTION_ELSE
27609a42 141 ldx r14,r14,r15 /* grab pgd entry */
f67f4ef5
SW
142ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
143
144#ifndef CONFIG_PPC_64K_PAGES
145 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
146 clrrdi r15,r15,3
d1b9b128
BB
147 cmpdi cr0,r14,0
148 bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
27609a42 149 ldx r14,r14,r15 /* grab pud entry */
f67f4ef5
SW
150#endif /* CONFIG_PPC_64K_PAGES */
151
152 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
153 clrrdi r15,r15,3
d1b9b128
BB
154 cmpdi cr0,r14,0
155 bge tlb_miss_fault_bolted
27609a42 156 ldx r14,r14,r15 /* Grab pmd entry */
f67f4ef5
SW
157
158 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
159 clrrdi r15,r15,3
d1b9b128
BB
160 cmpdi cr0,r14,0
161 bge tlb_miss_fault_bolted
162 ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */
f67f4ef5
SW
163
164 /* Check if required permissions are met */
165 andc. r15,r11,r14
166 rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
167 bne- tlb_miss_fault_bolted
168
169 /* Now we build the MAS:
170 *
171 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
172 * MAS 1 : Almost fully setup
173 * - PID already updated by caller if necessary
174 * - TSIZE need change if !base page size, not
175 * yet implemented for now
176 * MAS 2 : Defaults not useful, need to be redone
177 * MAS 3+7 : Needs to be done
178 */
179 clrrdi r11,r16,12 /* Clear low crap in EA */
180 clrldi r15,r15,12 /* Clear crap at the top */
181 rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
182 rlwimi r15,r14,32-8,22,25 /* Move in U bits */
183 mtspr SPRN_MAS2,r11
184 andi. r11,r14,_PAGE_DIRTY
185 rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
186
187 /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
188 bne 1f
189 li r11,MAS3_SW|MAS3_UW
190 andc r15,r15,r11
1911:
192 mtspr SPRN_MAS7_MAS3,r15
193 tlbwe
194
1149e8a7 195tlb_miss_done_bolted:
f67f4ef5
SW
196 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
197 tlb_epilog_bolted
198 rfi
199
200itlb_miss_kernel_bolted:
201 li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
202 oris r11,r11,_PAGE_ACCESSED@h
203tlb_miss_kernel_bolted:
204 mfspr r10,SPRN_MAS1
205 ld r14,PACA_KERNELPGD(r13)
206 cmpldi cr0,r15,8 /* Check for vmalloc region */
207 rlwinm r10,r10,0,16,1 /* Clear TID */
208 mtspr SPRN_MAS1,r10
209 beq+ tlb_miss_common_bolted
210
211tlb_miss_fault_bolted:
212 /* We need to check if it was an instruction miss */
213 andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
214 bne itlb_miss_fault_bolted
215dtlb_miss_fault_bolted:
216 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
217 tlb_epilog_bolted
218 b exc_data_storage_book3e
219itlb_miss_fault_bolted:
220 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
221 tlb_epilog_bolted
222 b exc_instruction_storage_book3e
223
224/* Instruction TLB miss */
225 START_EXCEPTION(instruction_tlb_miss_bolted)
fecff0f7 226 tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
f67f4ef5
SW
227
228 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
229 srdi r15,r16,60 /* get region */
230 TLB_MISS_STATS_SAVE_INFO_BOLTED
231 bne- itlb_miss_fault_bolted
232
233 li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
234
235 /* We do the user/kernel test for the PID here along with the RW test
236 */
237
238 cmpldi cr0,r15,0 /* Check for user region */
239 oris r11,r11,_PAGE_ACCESSED@h
240 beq tlb_miss_common_bolted
241 b itlb_miss_kernel_bolted
25d21ad6 242
28efc35f
SW
243/*
244 * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
245 *
246 * Linear mapping is bolted: no virtual page table or nested TLB misses
247 * Indirect entries in TLB1, hardware loads resulting direct entries
248 * into TLB0
249 * No HES or NV hint on TLB1, so we need to do software round-robin
250 * No tlbsrx. so we need a spinlock, and we have to deal
251 * with MAS-damage caused by tlbsx
252 * 4K pages only
253 */
254
255 START_EXCEPTION(instruction_tlb_miss_e6500)
256 tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
257
258 ld r11,PACA_TCD_PTR(r13)
259 srdi. r15,r16,60 /* get region */
260 ori r16,r16,1
261
262 TLB_MISS_STATS_SAVE_INFO_BOLTED
263 bne tlb_miss_kernel_e6500 /* user/kernel test */
264
265 b tlb_miss_common_e6500
266
267 START_EXCEPTION(data_tlb_miss_e6500)
268 tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
269
270 ld r11,PACA_TCD_PTR(r13)
271 srdi. r15,r16,60 /* get region */
272 rldicr r16,r16,0,62
273
274 TLB_MISS_STATS_SAVE_INFO_BOLTED
275 bne tlb_miss_kernel_e6500 /* user vs kernel check */
276
277/*
278 * This is the guts of the TLB miss handler for e6500 and derivatives.
279 * We are entered with:
280 *
281 * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
282 * r15 = crap (free to use)
283 * r14 = page table base
284 * r13 = PACA
285 * r11 = tlb_per_core ptr
286 * r10 = crap (free to use)
287 */
288tlb_miss_common_e6500:
289 /*
290 * Search if we already have an indirect entry for that virtual
291 * address, and if we do, bail out.
292 *
293 * MAS6:IND should be already set based on MAS4
294 */
295 addi r10,r11,TCD_LOCK
2961: lbarx r15,0,r10
297 cmpdi r15,0
298 bne 2f
299 li r15,1
300 stbcx. r15,0,r10
301 bne 1b
302 .subsection 1
3032: lbz r15,0(r10)
304 cmpdi r15,0
305 bne 2b
306 b 1b
307 .previous
308
309 mfspr r15,SPRN_MAS2
310
311 tlbsx 0,r16
312 mfspr r10,SPRN_MAS1
313 andis. r10,r10,MAS1_VALID@h
314 bne tlb_miss_done_e6500
315
316 /* Undo MAS-damage from the tlbsx */
317 mfspr r10,SPRN_MAS1
318 oris r10,r10,MAS1_VALID@h
319 mtspr SPRN_MAS1,r10
320 mtspr SPRN_MAS2,r15
321
322 /* Now, we need to walk the page tables. First check if we are in
323 * range.
324 */
325 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
326 bne- tlb_miss_fault_e6500
327
328 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
329 cmpldi cr0,r14,0
330 clrrdi r15,r15,3
331 beq- tlb_miss_fault_e6500 /* No PGDIR, bail */
332 ldx r14,r14,r15 /* grab pgd entry */
333
334 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
335 clrrdi r15,r15,3
336 cmpdi cr0,r14,0
337 bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */
338 ldx r14,r14,r15 /* grab pud entry */
339
340 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
341 clrrdi r15,r15,3
342 cmpdi cr0,r14,0
343 bge tlb_miss_fault_e6500
344 ldx r14,r14,r15 /* Grab pmd entry */
345
346 mfspr r10,SPRN_MAS0
347 cmpdi cr0,r14,0
348 bge tlb_miss_fault_e6500
349
350 /* Now we build the MAS for a 2M indirect page:
351 *
352 * MAS 0 : ESEL needs to be filled by software round-robin
353 * MAS 1 : Fully set up
354 * - PID already updated by caller if necessary
355 * - TSIZE for now is base ind page size always
356 * - TID already cleared if necessary
357 * MAS 2 : Default not 2M-aligned, need to be redone
358 * MAS 3+7 : Needs to be done
359 */
360
361 ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
362 mtspr SPRN_MAS7_MAS3,r14
363
364 clrrdi r15,r16,21 /* make EA 2M-aligned */
365 mtspr SPRN_MAS2,r15
366
367 lbz r15,TCD_ESEL_NEXT(r11)
368 lbz r16,TCD_ESEL_MAX(r11)
369 lbz r14,TCD_ESEL_FIRST(r11)
370 rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
371 addi r15,r15,1 /* increment esel_next */
372 mtspr SPRN_MAS0,r10
373 cmpw r15,r16
374 iseleq r15,r14,r15 /* if next == last use first */
375 stb r15,TCD_ESEL_NEXT(r11)
376
377 tlbwe
378
379tlb_miss_done_e6500:
380 .macro tlb_unlock_e6500
381 li r15,0
382 isync
383 stb r15,TCD_LOCK(r11)
384 .endm
385
386 tlb_unlock_e6500
387 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
388 tlb_epilog_bolted
389 rfi
390
391tlb_miss_kernel_e6500:
392 mfspr r10,SPRN_MAS1
393 ld r14,PACA_KERNELPGD(r13)
394 cmpldi cr0,r15,8 /* Check for vmalloc region */
395 rlwinm r10,r10,0,16,1 /* Clear TID */
396 mtspr SPRN_MAS1,r10
397 beq+ tlb_miss_common_e6500
398
399tlb_miss_fault_e6500:
400 tlb_unlock_e6500
401 /* We need to check if it was an instruction miss */
402 andi. r16,r16,1
403 bne itlb_miss_fault_e6500
404dtlb_miss_fault_e6500:
405 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
406 tlb_epilog_bolted
407 b exc_data_storage_book3e
408itlb_miss_fault_e6500:
409 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
410 tlb_epilog_bolted
411 b exc_instruction_storage_book3e
412
413
25d21ad6
BH
414/**********************************************************************
415 * *
416 * TLB miss handling for Book3E with TLB reservation and HES support *
417 * *
418 **********************************************************************/
419
420
421/* Data TLB miss */
422 START_EXCEPTION(data_tlb_miss)
423 TLB_MISS_PROLOG
424
425 /* Now we handle the fault proper. We only save DEAR in normal
426 * fault case since that's the only interesting values here.
427 * We could probably also optimize by not saving SRR0/1 in the
428 * linear mapping case but I'll leave that for later
429 */
430 mfspr r14,SPRN_ESR
431 mfspr r16,SPRN_DEAR /* get faulting address */
432 srdi r15,r16,60 /* get region */
433 cmpldi cr0,r15,0xc /* linear mapping ? */
434 TLB_MISS_STATS_SAVE_INFO
435 beq tlb_load_linear /* yes -> go to linear map load */
436
437 /* The page tables are mapped virtually linear. At this point, though,
438 * we don't know whether we are trying to fault in a first level
439 * virtual address or a virtual page table address. We can get that
440 * from bit 0x1 of the region ID which we have set for a page table
441 */
442 andi. r10,r15,0x1
443 bne- virt_page_table_tlb_miss
444
445 std r14,EX_TLB_ESR(r12); /* save ESR */
446 std r16,EX_TLB_DEAR(r12); /* save DEAR */
447
448 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
449 li r11,_PAGE_PRESENT
450 oris r11,r11,_PAGE_ACCESSED@h
451
452 /* We do the user/kernel test for the PID here along with the RW test
453 */
454 cmpldi cr0,r15,0 /* Check for user region */
455
456 /* We pre-test some combination of permissions to avoid double
457 * faults:
458 *
459 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
460 * ESR_ST is 0x00800000
461 * _PAGE_BAP_SW is 0x00000010
462 * So the shift is >> 19. This tests for supervisor writeability.
463 * If the page happens to be supervisor writeable and not user
464 * writeable, we will take a new fault later, but that should be
465 * a rare enough case.
466 *
467 * We also move ESR_ST in _PAGE_DIRTY position
468 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
469 *
470 * MAS1 is preset for all we need except for TID that needs to
471 * be cleared for kernel translations
472 */
473 rlwimi r11,r14,32-19,27,27
474 rlwimi r11,r14,32-16,19,19
475 beq normal_tlb_miss
476 /* XXX replace the RMW cycles with immediate loads + writes */
4771: mfspr r10,SPRN_MAS1
478 cmpldi cr0,r15,8 /* Check for vmalloc region */
479 rlwinm r10,r10,0,16,1 /* Clear TID */
480 mtspr SPRN_MAS1,r10
481 beq+ normal_tlb_miss
482
483 /* We got a crappy address, just fault with whatever DEAR and ESR
484 * are here
485 */
486 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
487 TLB_MISS_EPILOG_ERROR
488 b exc_data_storage_book3e
489
490/* Instruction TLB miss */
491 START_EXCEPTION(instruction_tlb_miss)
492 TLB_MISS_PROLOG
493
494 /* If we take a recursive fault, the second level handler may need
495 * to know whether we are handling a data or instruction fault in
496 * order to get to the right store fault handler. We provide that
497 * info by writing a crazy value in ESR in our exception frame
498 */
499 li r14,-1 /* store to exception frame is done later */
500
501 /* Now we handle the fault proper. We only save DEAR in the non
502 * linear mapping case since we know the linear mapping case will
503 * not re-enter. We could indeed optimize and also not save SRR0/1
504 * in the linear mapping case but I'll leave that for later
505 *
506 * Faulting address is SRR0 which is already in r16
507 */
508 srdi r15,r16,60 /* get region */
509 cmpldi cr0,r15,0xc /* linear mapping ? */
510 TLB_MISS_STATS_SAVE_INFO
511 beq tlb_load_linear /* yes -> go to linear map load */
512
513 /* We do the user/kernel test for the PID here along with the RW test
514 */
ea3cc330 515 li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
25d21ad6
BH
516 oris r11,r11,_PAGE_ACCESSED@h
517
518 cmpldi cr0,r15,0 /* Check for user region */
519 std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
520 beq normal_tlb_miss
82ae5eaf
KG
521
522 li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
523 oris r11,r11,_PAGE_ACCESSED@h
25d21ad6 524 /* XXX replace the RMW cycles with immediate loads + writes */
82ae5eaf 525 mfspr r10,SPRN_MAS1
25d21ad6
BH
526 cmpldi cr0,r15,8 /* Check for vmalloc region */
527 rlwinm r10,r10,0,16,1 /* Clear TID */
528 mtspr SPRN_MAS1,r10
529 beq+ normal_tlb_miss
530
531 /* We got a crappy address, just fault */
532 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
533 TLB_MISS_EPILOG_ERROR
534 b exc_instruction_storage_book3e
535
536/*
537 * This is the guts of the first-level TLB miss handler for direct
538 * misses. We are entered with:
539 *
540 * r16 = faulting address
541 * r15 = region ID
542 * r14 = crap (free to use)
543 * r13 = PACA
544 * r12 = TLB exception frame in PACA
545 * r11 = PTE permission mask
546 * r10 = crap (free to use)
547 */
548normal_tlb_miss:
549 /* So we first construct the page table address. We do that by
550 * shifting the bottom of the address (not the region ID) by
551 * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
552 * or'ing the fourth high bit.
553 *
554 * NOTE: For 64K pages, we do things slightly differently in
555 * order to handle the weird page table format used by linux
556 */
557 ori r10,r15,0x1
558#ifdef CONFIG_PPC_64K_PAGES
559 /* For the top bits, 16 bytes per PTE */
560 rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
561 /* Now create the bottom bits as 0 in position 0x8000 and
562 * the rest calculated for 8 bytes per PTE
563 */
564 rldicl r15,r16,64-(PAGE_SHIFT-3),64-15
565 /* Insert the bottom bits in */
566 rlwimi r14,r15,0,16,31
567#else
568 rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
569#endif
570 sldi r15,r10,60
571 clrrdi r14,r14,3
572 or r10,r15,r14
573
df5d6ecf 574BEGIN_MMU_FTR_SECTION
25985edc 575 /* Set the TLB reservation and search for existing entry. Then load
25d21ad6
BH
576 * the entry.
577 */
962cffbd 578 PPC_TLBSRX_DOT(0,R16)
25d21ad6
BH
579 ld r14,0(r10)
580 beq normal_tlb_miss_done
df5d6ecf
KG
581MMU_FTR_SECTION_ELSE
582 ld r14,0(r10)
583ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
25d21ad6
BH
584
585finish_normal_tlb_miss:
586 /* Check if required permissions are met */
587 andc. r15,r11,r14
588 bne- normal_tlb_miss_access_fault
589
590 /* Now we build the MAS:
591 *
592 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
593 * MAS 1 : Almost fully setup
594 * - PID already updated by caller if necessary
595 * - TSIZE need change if !base page size, not
596 * yet implemented for now
597 * MAS 2 : Defaults not useful, need to be redone
598 * MAS 3+7 : Needs to be done
599 *
600 * TODO: mix up code below for better scheduling
601 */
602 clrrdi r11,r16,12 /* Clear low crap in EA */
603 rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
604 mtspr SPRN_MAS2,r11
605
606 /* Check page size, if not standard, update MAS1 */
607 rldicl r11,r14,64-8,64-8
608#ifdef CONFIG_PPC_64K_PAGES
609 cmpldi cr0,r11,BOOK3E_PAGESZ_64K
610#else
611 cmpldi cr0,r11,BOOK3E_PAGESZ_4K
612#endif
613 beq- 1f
614 mfspr r11,SPRN_MAS1
615 rlwimi r11,r14,31,21,24
616 rlwinm r11,r11,0,21,19
617 mtspr SPRN_MAS1,r11
6181:
619 /* Move RPN in position */
620 rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
621 clrldi r15,r11,12 /* Clear crap at the top */
622 rlwimi r15,r14,32-8,22,25 /* Move in U bits */
623 rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
624
625 /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
626 andi. r11,r14,_PAGE_DIRTY
627 bne 1f
628 li r11,MAS3_SW|MAS3_UW
629 andc r15,r15,r11
df5d6ecf
KG
6301:
631BEGIN_MMU_FTR_SECTION
632 srdi r16,r15,32
633 mtspr SPRN_MAS3,r15
634 mtspr SPRN_MAS7,r16
635MMU_FTR_SECTION_ELSE
636 mtspr SPRN_MAS7_MAS3,r15
637ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
25d21ad6
BH
638
639 tlbwe
640
641normal_tlb_miss_done:
642 /* We don't bother with restoring DEAR or ESR since we know we are
643 * level 0 and just going back to userland. They are only needed
644 * if you are going to take an access fault
645 */
646 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
647 TLB_MISS_EPILOG_SUCCESS
648 rfi
649
650normal_tlb_miss_access_fault:
651 /* We need to check if it was an instruction miss */
ea3cc330 652 andi. r10,r11,_PAGE_EXEC
25d21ad6
BH
653 bne 1f
654 ld r14,EX_TLB_DEAR(r12)
655 ld r15,EX_TLB_ESR(r12)
656 mtspr SPRN_DEAR,r14
657 mtspr SPRN_ESR,r15
658 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
659 TLB_MISS_EPILOG_ERROR
660 b exc_data_storage_book3e
6611: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
662 TLB_MISS_EPILOG_ERROR
663 b exc_instruction_storage_book3e
664
665
666/*
667 * This is the guts of the second-level TLB miss handler for direct
668 * misses. We are entered with:
669 *
670 * r16 = virtual page table faulting address
671 * r15 = region (top 4 bits of address)
672 * r14 = crap (free to use)
673 * r13 = PACA
674 * r12 = TLB exception frame in PACA
675 * r11 = crap (free to use)
676 * r10 = crap (free to use)
677 *
678 * Note that this should only ever be called as a second level handler
679 * with the current scheme when using SW load.
680 * That means we can always get the original fault DEAR at
681 * EX_TLB_DEAR-EX_TLB_SIZE(r12)
682 *
683 * It can be re-entered by the linear mapping miss handler. However, to
684 * avoid too much complication, it will restart the whole fault at level
685 * 0 so we don't care too much about clobbers
686 *
687 * XXX That code was written back when we couldn't clobber r14. We can now,
688 * so we could probably optimize things a bit
689 */
690virt_page_table_tlb_miss:
691 /* Are we hitting a kernel page table ? */
692 andi. r10,r15,0x8
693
694 /* The cool thing now is that r10 contains 0 for user and 8 for kernel,
695 * and we happen to have the swapper_pg_dir at offset 8 from the user
696 * pgdir in the PACA :-).
697 */
698 add r11,r10,r13
699
700 /* If kernel, we need to clear MAS1 TID */
701 beq 1f
702 /* XXX replace the RMW cycles with immediate loads + writes */
703 mfspr r10,SPRN_MAS1
704 rlwinm r10,r10,0,16,1 /* Clear TID */
705 mtspr SPRN_MAS1,r10
7061:
df5d6ecf 707BEGIN_MMU_FTR_SECTION
25d21ad6
BH
708 /* Search if we already have a TLB entry for that virtual address, and
709 * if we do, bail out.
710 */
962cffbd 711 PPC_TLBSRX_DOT(0,R16)
25d21ad6 712 beq virt_page_table_tlb_miss_done
df5d6ecf 713END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
25d21ad6
BH
714
715 /* Now, we need to walk the page tables. First check if we are in
716 * range.
717 */
718 rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
719 bne- virt_page_table_tlb_miss_fault
720
721 /* Get the PGD pointer */
722 ld r15,PACAPGD(r11)
723 cmpldi cr0,r15,0
724 beq- virt_page_table_tlb_miss_fault
725
726 /* Get to PGD entry */
727 rldicl r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
728 clrrdi r10,r11,3
729 ldx r15,r10,r15
41151e77
BB
730 cmpdi cr0,r15,0
731 bge virt_page_table_tlb_miss_fault
25d21ad6
BH
732
733#ifndef CONFIG_PPC_64K_PAGES
734 /* Get to PUD entry */
735 rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
736 clrrdi r10,r11,3
737 ldx r15,r10,r15
41151e77
BB
738 cmpdi cr0,r15,0
739 bge virt_page_table_tlb_miss_fault
25d21ad6
BH
740#endif /* CONFIG_PPC_64K_PAGES */
741
742 /* Get to PMD entry */
743 rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
744 clrrdi r10,r11,3
745 ldx r15,r10,r15
41151e77
BB
746 cmpdi cr0,r15,0
747 bge virt_page_table_tlb_miss_fault
25d21ad6
BH
748
749 /* Ok, we're all right, we can now create a kernel translation for
750 * a 4K or 64K page from r16 -> r15.
751 */
752 /* Now we build the MAS:
753 *
754 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
755 * MAS 1 : Almost fully setup
756 * - PID already updated by caller if necessary
757 * - TSIZE for now is base page size always
758 * MAS 2 : Use defaults
759 * MAS 3+7 : Needs to be done
760 *
761 * So we only do MAS 2 and 3 for now...
762 */
763 clrldi r11,r15,4 /* remove region ID from RPN */
764 ori r10,r11,1 /* Or-in SR */
df5d6ecf
KG
765
766BEGIN_MMU_FTR_SECTION
767 srdi r16,r10,32
768 mtspr SPRN_MAS3,r10
769 mtspr SPRN_MAS7,r16
770MMU_FTR_SECTION_ELSE
25d21ad6 771 mtspr SPRN_MAS7_MAS3,r10
df5d6ecf 772ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
25d21ad6
BH
773
774 tlbwe
775
df5d6ecf 776BEGIN_MMU_FTR_SECTION
25d21ad6
BH
777virt_page_table_tlb_miss_done:
778
779 /* We have overriden MAS2:EPN but currently our primary TLB miss
780 * handler will always restore it so that should not be an issue,
781 * if we ever optimize the primary handler to not write MAS2 on
782 * some cases, we'll have to restore MAS2:EPN here based on the
783 * original fault's DEAR. If we do that we have to modify the
784 * ITLB miss handler to also store SRR0 in the exception frame
785 * as DEAR.
786 *
787 * However, one nasty thing we did is we cleared the reservation
788 * (well, potentially we did). We do a trick here thus if we
789 * are not a level 0 exception (we interrupted the TLB miss) we
790 * offset the return address by -4 in order to replay the tlbsrx
791 * instruction there
792 */
793 subf r10,r13,r12
794 cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
795 bne- 1f
796 ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
797 addi r10,r11,-4
798 std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
7991:
df5d6ecf 800END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
25d21ad6
BH
801 /* Return to caller, normal case */
802 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
803 TLB_MISS_EPILOG_SUCCESS
804 rfi
805
806virt_page_table_tlb_miss_fault:
807 /* If we fault here, things are a little bit tricky. We need to call
25985edc 808 * either data or instruction store fault, and we need to retrieve
25d21ad6
BH
809 * the original fault address and ESR (for data).
810 *
811 * The thing is, we know that in normal circumstances, this is
812 * always called as a second level tlb miss for SW load or as a first
813 * level TLB miss for HW load, so we should be able to peek at the
f65e51d7 814 * relevant information in the first exception frame in the PACA.
25d21ad6
BH
815 *
816 * However, we do need to double check that, because we may just hit
817 * a stray kernel pointer or a userland attack trying to hit those
818 * areas. If that is the case, we do a data fault. (We can't get here
819 * from an instruction tlb miss anyway).
820 *
821 * Note also that when going to a fault, we must unwind the previous
822 * level as well. Since we are doing that, we don't need to clear or
823 * restore the TLB reservation neither.
824 */
825 subf r10,r13,r12
826 cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
827 bne- virt_page_table_tlb_miss_whacko_fault
828
829 /* We dig the original DEAR and ESR from slot 0 */
830 ld r15,EX_TLB_DEAR+PACA_EXTLB(r13)
831 ld r16,EX_TLB_ESR+PACA_EXTLB(r13)
832
833 /* We check for the "special" ESR value for instruction faults */
834 cmpdi cr0,r16,-1
835 beq 1f
836 mtspr SPRN_DEAR,r15
837 mtspr SPRN_ESR,r16
838 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
839 TLB_MISS_EPILOG_ERROR
840 b exc_data_storage_book3e
8411: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
842 TLB_MISS_EPILOG_ERROR
843 b exc_instruction_storage_book3e
844
845virt_page_table_tlb_miss_whacko_fault:
846 /* The linear fault will restart everything so ESR and DEAR will
847 * not have been clobbered, let's just fault with what we have
848 */
849 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
850 TLB_MISS_EPILOG_ERROR
851 b exc_data_storage_book3e
852
853
854/**************************************************************
855 * *
856 * TLB miss handling for Book3E with hw page table support *
857 * *
858 **************************************************************/
859
860
861/* Data TLB miss */
862 START_EXCEPTION(data_tlb_miss_htw)
863 TLB_MISS_PROLOG
864
865 /* Now we handle the fault proper. We only save DEAR in normal
866 * fault case since that's the only interesting values here.
867 * We could probably also optimize by not saving SRR0/1 in the
868 * linear mapping case but I'll leave that for later
869 */
870 mfspr r14,SPRN_ESR
871 mfspr r16,SPRN_DEAR /* get faulting address */
872 srdi r11,r16,60 /* get region */
873 cmpldi cr0,r11,0xc /* linear mapping ? */
874 TLB_MISS_STATS_SAVE_INFO
875 beq tlb_load_linear /* yes -> go to linear map load */
876
877 /* We do the user/kernel test for the PID here along with the RW test
878 */
879 cmpldi cr0,r11,0 /* Check for user region */
880 ld r15,PACAPGD(r13) /* Load user pgdir */
881 beq htw_tlb_miss
882
883 /* XXX replace the RMW cycles with immediate loads + writes */
8841: mfspr r10,SPRN_MAS1
885 cmpldi cr0,r11,8 /* Check for vmalloc region */
886 rlwinm r10,r10,0,16,1 /* Clear TID */
887 mtspr SPRN_MAS1,r10
888 ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
889 beq+ htw_tlb_miss
890
891 /* We got a crappy address, just fault with whatever DEAR and ESR
892 * are here
893 */
894 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
895 TLB_MISS_EPILOG_ERROR
896 b exc_data_storage_book3e
897
898/* Instruction TLB miss */
899 START_EXCEPTION(instruction_tlb_miss_htw)
900 TLB_MISS_PROLOG
901
902 /* If we take a recursive fault, the second level handler may need
903 * to know whether we are handling a data or instruction fault in
904 * order to get to the right store fault handler. We provide that
905 * info by keeping a crazy value for ESR in r14
906 */
907 li r14,-1 /* store to exception frame is done later */
908
909 /* Now we handle the fault proper. We only save DEAR in the non
910 * linear mapping case since we know the linear mapping case will
911 * not re-enter. We could indeed optimize and also not save SRR0/1
912 * in the linear mapping case but I'll leave that for later
913 *
914 * Faulting address is SRR0 which is already in r16
915 */
916 srdi r11,r16,60 /* get region */
917 cmpldi cr0,r11,0xc /* linear mapping ? */
918 TLB_MISS_STATS_SAVE_INFO
919 beq tlb_load_linear /* yes -> go to linear map load */
920
921 /* We do the user/kernel test for the PID here along with the RW test
922 */
923 cmpldi cr0,r11,0 /* Check for user region */
924 ld r15,PACAPGD(r13) /* Load user pgdir */
925 beq htw_tlb_miss
926
927 /* XXX replace the RMW cycles with immediate loads + writes */
9281: mfspr r10,SPRN_MAS1
929 cmpldi cr0,r11,8 /* Check for vmalloc region */
930 rlwinm r10,r10,0,16,1 /* Clear TID */
931 mtspr SPRN_MAS1,r10
932 ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
933 beq+ htw_tlb_miss
934
935 /* We got a crappy address, just fault */
936 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
937 TLB_MISS_EPILOG_ERROR
938 b exc_instruction_storage_book3e
939
940
941/*
942 * This is the guts of the second-level TLB miss handler for direct
943 * misses. We are entered with:
944 *
945 * r16 = virtual page table faulting address
946 * r15 = PGD pointer
947 * r14 = ESR
948 * r13 = PACA
949 * r12 = TLB exception frame in PACA
950 * r11 = crap (free to use)
951 * r10 = crap (free to use)
952 *
953 * It can be re-entered by the linear mapping miss handler. However, to
954 * avoid too much complication, it will save/restore things for us
955 */
956htw_tlb_miss:
957 /* Search if we already have a TLB entry for that virtual address, and
958 * if we do, bail out.
959 *
960 * MAS1:IND should be already set based on MAS4
961 */
962cffbd 962 PPC_TLBSRX_DOT(0,R16)
25d21ad6
BH
963 beq htw_tlb_miss_done
964
965 /* Now, we need to walk the page tables. First check if we are in
966 * range.
967 */
968 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
969 bne- htw_tlb_miss_fault
970
971 /* Get the PGD pointer */
972 cmpldi cr0,r15,0
973 beq- htw_tlb_miss_fault
974
975 /* Get to PGD entry */
976 rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
977 clrrdi r10,r11,3
978 ldx r15,r10,r15
41151e77
BB
979 cmpdi cr0,r15,0
980 bge htw_tlb_miss_fault
25d21ad6
BH
981
982#ifndef CONFIG_PPC_64K_PAGES
983 /* Get to PUD entry */
984 rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
985 clrrdi r10,r11,3
986 ldx r15,r10,r15
41151e77
BB
987 cmpdi cr0,r15,0
988 bge htw_tlb_miss_fault
25d21ad6
BH
989#endif /* CONFIG_PPC_64K_PAGES */
990
991 /* Get to PMD entry */
992 rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
993 clrrdi r10,r11,3
994 ldx r15,r10,r15
41151e77
BB
995 cmpdi cr0,r15,0
996 bge htw_tlb_miss_fault
25d21ad6
BH
997
998 /* Ok, we're all right, we can now create an indirect entry for
999 * a 1M or 256M page.
1000 *
1001 * The last trick is now that because we use "half" pages for
1002 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
1003 * for an added LSB bit to the RPN. For 64K pages, there is no
1004 * problem as we already use 32K arrays (half PTE pages), but for
1005 * 4K page we need to extract a bit from the virtual address and
1006 * insert it into the "PA52" bit of the RPN.
1007 */
1008#ifndef CONFIG_PPC_64K_PAGES
1009 rlwimi r15,r16,32-9,20,20
1010#endif
1011 /* Now we build the MAS:
1012 *
1013 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
1014 * MAS 1 : Almost fully setup
1015 * - PID already updated by caller if necessary
1016 * - TSIZE for now is base ind page size always
1017 * MAS 2 : Use defaults
1018 * MAS 3+7 : Needs to be done
1019 */
1020#ifdef CONFIG_PPC_64K_PAGES
1021 ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
1022#else
1023 ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
1024#endif
df5d6ecf
KG
1025
1026BEGIN_MMU_FTR_SECTION
1027 srdi r16,r10,32
1028 mtspr SPRN_MAS3,r10
1029 mtspr SPRN_MAS7,r16
1030MMU_FTR_SECTION_ELSE
25d21ad6 1031 mtspr SPRN_MAS7_MAS3,r10
df5d6ecf 1032ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
25d21ad6
BH
1033
1034 tlbwe
1035
1036htw_tlb_miss_done:
1037 /* We don't bother with restoring DEAR or ESR since we know we are
1038 * level 0 and just going back to userland. They are only needed
1039 * if you are going to take an access fault
1040 */
1041 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
1042 TLB_MISS_EPILOG_SUCCESS
1043 rfi
1044
1045htw_tlb_miss_fault:
1046 /* We need to check if it was an instruction miss. We know this
1047 * though because r14 would contain -1
1048 */
1049 cmpdi cr0,r14,-1
1050 beq 1f
1051 mtspr SPRN_DEAR,r16
1052 mtspr SPRN_ESR,r14
1053 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
1054 TLB_MISS_EPILOG_ERROR
1055 b exc_data_storage_book3e
10561: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
1057 TLB_MISS_EPILOG_ERROR
1058 b exc_instruction_storage_book3e
1059
1060/*
1061 * This is the guts of "any" level TLB miss handler for kernel linear
1062 * mapping misses. We are entered with:
1063 *
1064 *
1065 * r16 = faulting address
1066 * r15 = crap (free to use)
1067 * r14 = ESR (data) or -1 (instruction)
1068 * r13 = PACA
1069 * r12 = TLB exception frame in PACA
1070 * r11 = crap (free to use)
1071 * r10 = crap (free to use)
1072 *
1073 * In addition we know that we will not re-enter, so in theory, we could
1074 * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
1075 *
1076 * We also need to be careful about MAS registers here & TLB reservation,
1077 * as we know we'll have clobbered them if we interrupt the main TLB miss
1078 * handlers in which case we probably want to do a full restart at level
1079 * 0 rather than saving / restoring the MAS.
1080 *
1081 * Note: If we care about performance of that core, we can easily shuffle
1082 * a few things around
1083 */
1084tlb_load_linear:
1085 /* For now, we assume the linear mapping is contiguous and stops at
1086 * linear_map_top. We also assume the size is a multiple of 1G, thus
1087 * we only use 1G pages for now. That might have to be changed in a
1088 * final implementation, especially when dealing with hypervisors
1089 */
1090 ld r11,PACATOC(r13)
1091 ld r11,linear_map_top@got(r11)
1092 ld r10,0(r11)
1093 cmpld cr0,r10,r16
1094 bge tlb_load_linear_fault
1095
1096 /* MAS1 need whole new setup. */
1097 li r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
1098 oris r15,r15,MAS1_VALID@h /* MAS1 needs V and TSIZE */
1099 mtspr SPRN_MAS1,r15
1100
1101 /* Already somebody there ? */
962cffbd 1102 PPC_TLBSRX_DOT(0,R16)
25d21ad6
BH
1103 beq tlb_load_linear_done
1104
1105 /* Now we build the remaining MAS. MAS0 and 2 should be fine
1106 * with their defaults, which leaves us with MAS 3 and 7. The
1107 * mapping is linear, so we just take the address, clear the
1108 * region bits, and or in the permission bits which are currently
1109 * hard wired
1110 */
1111 clrrdi r10,r16,30 /* 1G page index */
1112 clrldi r10,r10,4 /* clear region bits */
1113 ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
df5d6ecf
KG
1114
1115BEGIN_MMU_FTR_SECTION
1116 srdi r16,r10,32
1117 mtspr SPRN_MAS3,r10
1118 mtspr SPRN_MAS7,r16
1119MMU_FTR_SECTION_ELSE
25d21ad6 1120 mtspr SPRN_MAS7_MAS3,r10
df5d6ecf 1121ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
25d21ad6
BH
1122
1123 tlbwe
1124
1125tlb_load_linear_done:
1126 /* We use the "error" epilog for success as we do want to
1127 * restore to the initial faulting context, whatever it was.
1128 * We do that because we can't resume a fault within a TLB
1129 * miss handler, due to MAS and TLB reservation being clobbered.
1130 */
1131 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
1132 TLB_MISS_EPILOG_ERROR
1133 rfi
1134
1135tlb_load_linear_fault:
1136 /* We keep the DEAR and ESR around, this shouldn't have happened */
1137 cmpdi cr0,r14,-1
1138 beq 1f
1139 TLB_MISS_EPILOG_ERROR_SPECIAL
1140 b exc_data_storage_book3e
11411: TLB_MISS_EPILOG_ERROR_SPECIAL
1142 b exc_instruction_storage_book3e
1143
1144
1145#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
1146.tlb_stat_inc:
11471: ldarx r8,0,r9
1148 addi r8,r8,1
1149 stdcx. r8,0,r9
1150 bne- 1b
1151 blr
1152#endif
This page took 0.459578 seconds and 5 git commands to generate.