Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/ppc64/mm/slb_low.S | |
3 | * | |
4 | * Low-level SLB routines | |
5 | * | |
6 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
7 | * | |
8 | * Based on earlier C version: | |
9 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | |
10 | * Copyright (c) 2001 Dave Engebretsen | |
11 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | ||
19 | #include <linux/config.h> | |
20 | #include <asm/processor.h> | |
21 | #include <asm/page.h> | |
22 | #include <asm/mmu.h> | |
23 | #include <asm/ppc_asm.h> | |
24 | #include <asm/offsets.h> | |
25 | #include <asm/cputable.h> | |
26 | ||
27 | /* void slb_allocate(unsigned long ea); | |
28 | * | |
29 | * Create an SLB entry for the given EA (user or kernel). | |
30 | * r3 = faulting address, r13 = PACA | |
31 | * r9, r10, r11 are clobbered by this function | |
32 | * No other registers are examined or changed. | |
33 | */ | |
34 | _GLOBAL(slb_allocate) | |
35 | /* | |
36 | * First find a slot, round robin. Previously we tried to find | |
37 | * a free slot first but that took too long. Unfortunately we | |
38 | * dont have any LRU information to help us choose a slot. | |
39 | */ | |
40 | #ifdef CONFIG_PPC_ISERIES | |
41 | /* | |
42 | * On iSeries, the "bolted" stack segment can be cast out on | |
43 | * shared processor switch so we need to check for a miss on | |
44 | * it and restore it to the right slot. | |
45 | */ | |
46 | ld r9,PACAKSAVE(r13) | |
47 | clrrdi r9,r9,28 | |
48 | clrrdi r11,r3,28 | |
49 | li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ | |
50 | cmpld r9,r11 | |
51 | beq 3f | |
52 | #endif /* CONFIG_PPC_ISERIES */ | |
53 | ||
54 | ld r10,PACASTABRR(r13) | |
55 | addi r10,r10,1 | |
56 | /* use a cpu feature mask if we ever change our slb size */ | |
57 | cmpldi r10,SLB_NUM_ENTRIES | |
58 | ||
59 | blt+ 4f | |
60 | li r10,SLB_NUM_BOLTED | |
61 | ||
62 | 4: | |
63 | std r10,PACASTABRR(r13) | |
64 | 3: | |
65 | /* r3 = faulting address, r10 = entry */ | |
66 | ||
67 | srdi r9,r3,60 /* get region */ | |
68 | srdi r3,r3,28 /* get esid */ | |
69 | cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ | |
70 | ||
71 | rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */ | |
72 | oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */ | |
73 | ||
74 | /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */ | |
75 | ||
76 | blt cr7,0f /* user or kernel? */ | |
77 | ||
78 | /* kernel address: proto-VSID = ESID */ | |
79 | /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but | |
80 | * this code will generate the protoVSID 0xfffffffff for the | |
81 | * top segment. That's ok, the scramble below will translate | |
82 | * it to VSID 0, which is reserved as a bad VSID - one which | |
83 | * will never have any pages in it. */ | |
84 | li r11,SLB_VSID_KERNEL | |
85 | BEGIN_FTR_SECTION | |
86 | bne cr7,9f | |
87 | li r11,(SLB_VSID_KERNEL|SLB_VSID_L) | |
88 | END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) | |
89 | b 9f | |
90 | ||
91 | 0: /* user address: proto-VSID = context<<15 | ESID */ | |
92 | li r11,SLB_VSID_USER | |
93 | ||
94 | srdi. r9,r3,13 | |
95 | bne- 8f /* invalid ea bits set */ | |
96 | ||
97 | #ifdef CONFIG_HUGETLB_PAGE | |
98 | BEGIN_FTR_SECTION | |
99 | /* check against the hugepage ranges */ | |
100 | cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT) | |
101 | bge 6f /* >= TASK_HPAGE_END */ | |
102 | cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT) | |
103 | bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */ | |
104 | cmpldi r3,16 | |
105 | bge 6f /* 4GB..TASK_HPAGE_BASE */ | |
106 | ||
107 | lhz r9,PACAHTLBSEGS(r13) | |
108 | srd r9,r9,r3 | |
109 | andi. r9,r9,1 | |
110 | beq 6f | |
111 | ||
112 | 5: /* this is a hugepage user address */ | |
113 | li r11,(SLB_VSID_USER|SLB_VSID_L) | |
114 | END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) | |
115 | #endif /* CONFIG_HUGETLB_PAGE */ | |
116 | ||
117 | 6: ld r9,PACACONTEXTID(r13) | |
118 | rldimi r3,r9,USER_ESID_BITS,0 | |
119 | ||
120 | 9: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */ | |
121 | ASM_VSID_SCRAMBLE(r3,r9) | |
122 | ||
123 | rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */ | |
124 | ||
125 | /* | |
126 | * No need for an isync before or after this slbmte. The exception | |
127 | * we enter with and the rfid we exit with are context synchronizing. | |
128 | */ | |
129 | slbmte r11,r10 | |
130 | ||
131 | bgelr cr7 /* we're done for kernel addresses */ | |
132 | ||
133 | /* Update the slb cache */ | |
134 | lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ | |
135 | cmpldi r3,SLB_CACHE_ENTRIES | |
136 | bge 1f | |
137 | ||
138 | /* still room in the slb cache */ | |
139 | sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ | |
140 | rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ | |
141 | add r11,r11,r13 /* r11 = (u16 *)paca + offset */ | |
142 | sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ | |
143 | addi r3,r3,1 /* offset++ */ | |
144 | b 2f | |
145 | 1: /* offset >= SLB_CACHE_ENTRIES */ | |
146 | li r3,SLB_CACHE_ENTRIES+1 | |
147 | 2: | |
148 | sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ | |
149 | blr | |
150 | ||
151 | 8: /* invalid EA */ | |
152 | li r3,0 /* BAD_VSID */ | |
153 | li r11,SLB_VSID_USER /* flags don't much matter */ | |
154 | b 9b |