Commit | Line | Data |
---|---|---|
8d2169e8 DG |
1 | #ifndef _ASM_POWERPC_MMU_HASH64_H_ |
2 | #define _ASM_POWERPC_MMU_HASH64_H_ | |
3 | /* | |
4 | * PowerPC64 memory management structures | |
5 | * | |
6 | * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> | |
7 | * PPC64 rework. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <asm/asm-compat.h> | |
16 | #include <asm/page.h> | |
17 | ||
78f1dbde AK |
18 | /* |
19 | * This is necessary to get the definition of PGTABLE_RANGE which we | |
20 | * need for various slices related matters. Note that this isn't the | |
21 | * complete pgtable.h but only a portion of it. | |
22 | */ | |
23 | #include <asm/pgtable-ppc64.h> | |
24 | ||
8d2169e8 DG |
25 | /* |
26 | * Segment table | |
27 | */ | |
28 | ||
29 | #define STE_ESID_V 0x80 | |
30 | #define STE_ESID_KS 0x20 | |
31 | #define STE_ESID_KP 0x10 | |
32 | #define STE_ESID_N 0x08 | |
33 | ||
34 | #define STE_VSID_SHIFT 12 | |
35 | ||
36 | /* Location of cpu0's segment table */ | |
84493804 | 37 | #define STAB0_PAGE 0x8 |
8d2169e8 DG |
38 | #define STAB0_OFFSET (STAB0_PAGE << 12) |
39 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) | |
40 | ||
41 | #ifndef __ASSEMBLY__ | |
42 | extern char initial_stab[]; | |
43 | #endif /* ! __ASSEMBLY */ | |
44 | ||
45 | /* | |
46 | * SLB | |
47 | */ | |
48 | ||
49 | #define SLB_NUM_BOLTED 3 | |
50 | #define SLB_CACHE_ENTRIES 8 | |
46db2f86 | 51 | #define SLB_MIN_SIZE 32 |
8d2169e8 DG |
52 | |
53 | /* Bits in the SLB ESID word */ | |
54 | #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ | |
55 | ||
56 | /* Bits in the SLB VSID word */ | |
57 | #define SLB_VSID_SHIFT 12 | |
1189be65 PM |
58 | #define SLB_VSID_SHIFT_1T 24 |
59 | #define SLB_VSID_SSIZE_SHIFT 62 | |
8d2169e8 DG |
60 | #define SLB_VSID_B ASM_CONST(0xc000000000000000) |
61 | #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) | |
62 | #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) | |
63 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) | |
64 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) | |
65 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ | |
66 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) | |
67 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ | |
68 | #define SLB_VSID_LP ASM_CONST(0x0000000000000030) | |
69 | #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) | |
70 | #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) | |
71 | #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) | |
72 | #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) | |
73 | #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) | |
74 | ||
75 | #define SLB_VSID_KERNEL (SLB_VSID_KP) | |
76 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) | |
77 | ||
78 | #define SLBIE_C (0x08000000) | |
1189be65 | 79 | #define SLBIE_SSIZE_SHIFT 25 |
8d2169e8 DG |
80 | |
81 | /* | |
82 | * Hash table | |
83 | */ | |
84 | ||
85 | #define HPTES_PER_GROUP 8 | |
86 | ||
2454c7e9 | 87 | #define HPTE_V_SSIZE_SHIFT 62 |
8d2169e8 | 88 | #define HPTE_V_AVPN_SHIFT 7 |
2454c7e9 | 89 | #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80) |
8d2169e8 | 90 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) |
91bbbe22 | 91 | #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL)) |
8d2169e8 DG |
92 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) |
93 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) | |
94 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) | |
95 | #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) | |
96 | #define HPTE_V_VALID ASM_CONST(0x0000000000000001) | |
97 | ||
98 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) | |
99 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) | |
de56a948 | 100 | #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) |
8d2169e8 | 101 | #define HPTE_R_RPN_SHIFT 12 |
de56a948 | 102 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) |
8d2169e8 DG |
103 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
104 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | |
de56a948 PM |
105 | #define HPTE_R_G ASM_CONST(0x0000000000000008) |
106 | #define HPTE_R_M ASM_CONST(0x0000000000000010) | |
107 | #define HPTE_R_I ASM_CONST(0x0000000000000020) | |
108 | #define HPTE_R_W ASM_CONST(0x0000000000000040) | |
109 | #define HPTE_R_WIMG ASM_CONST(0x0000000000000078) | |
8d2169e8 DG |
110 | #define HPTE_R_C ASM_CONST(0x0000000000000080) |
111 | #define HPTE_R_R ASM_CONST(0x0000000000000100) | |
de56a948 | 112 | #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) |
8d2169e8 | 113 | |
b7abc5c5 SS |
114 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) |
115 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) | |
116 | ||
8d2169e8 | 117 | /* Values for PP (assumes Ks=0, Kp=1) */ |
8d2169e8 DG |
118 | #define PP_RWXX 0 /* Supervisor read/write, User none */ |
119 | #define PP_RWRX 1 /* Supervisor read/write, User read */ | |
120 | #define PP_RWRW 2 /* Supervisor read/write, User read/write */ | |
121 | #define PP_RXRX 3 /* Supervisor read, User read */ | |
697d3899 | 122 | #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ |
8d2169e8 | 123 | |
b4072df4 PM |
124 | /* Fields for tlbiel instruction in architecture 2.06 */ |
125 | #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ | |
126 | #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ | |
127 | #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ | |
128 | #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ | |
129 | #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */ | |
130 | #define TLBIEL_INVAL_SET_SHIFT 12 | |
131 | ||
132 | #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ | |
133 | ||
8d2169e8 DG |
134 | #ifndef __ASSEMBLY__ |
135 | ||
8e561e7e | 136 | struct hash_pte { |
8d2169e8 DG |
137 | unsigned long v; |
138 | unsigned long r; | |
8e561e7e | 139 | }; |
8d2169e8 | 140 | |
8e561e7e | 141 | extern struct hash_pte *htab_address; |
8d2169e8 DG |
142 | extern unsigned long htab_size_bytes; |
143 | extern unsigned long htab_hash_mask; | |
144 | ||
145 | /* | |
146 | * Page size definition | |
147 | * | |
148 | * shift : is the "PAGE_SHIFT" value for that page size | |
149 | * sllp : is a bit mask with the value of SLB L || LP to be or'ed | |
150 | * directly to a slbmte "vsid" value | |
151 | * penc : is the HPTE encoding mask for the "LP" field: | |
152 | * | |
153 | */ | |
154 | struct mmu_psize_def | |
155 | { | |
156 | unsigned int shift; /* number of bits */ | |
157 | unsigned int penc; /* HPTE encoding */ | |
158 | unsigned int tlbiel; /* tlbiel supported for that page size */ | |
159 | unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ | |
160 | unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ | |
161 | }; | |
162 | ||
163 | #endif /* __ASSEMBLY__ */ | |
164 | ||
2454c7e9 PM |
165 | /* |
166 | * Segment sizes. | |
167 | * These are the values used by hardware in the B field of | |
168 | * SLB entries and the first dword of MMU hashtable entries. | |
169 | * The B field is 2 bits; the values 2 and 3 are unused and reserved. | |
170 | */ | |
171 | #define MMU_SEGSIZE_256M 0 | |
172 | #define MMU_SEGSIZE_1T 1 | |
173 | ||
5524a27d AK |
174 | /* |
175 | * encode page number shift. | |
176 | * in order to fit the 78 bit va in a 64 bit variable we shift the va by | |
177 | * 12 bits. This enable us to address upto 76 bit va. | |
178 | * For hpt hash from a va we can ignore the page size bits of va and for | |
179 | * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure | |
180 | * we work in all cases including 4k page size. | |
181 | */ | |
182 | #define VPN_SHIFT 12 | |
1189be65 | 183 | |
8d2169e8 DG |
184 | #ifndef __ASSEMBLY__ |
185 | ||
5524a27d AK |
186 | static inline int segment_shift(int ssize) |
187 | { | |
188 | if (ssize == MMU_SEGSIZE_256M) | |
189 | return SID_SHIFT; | |
190 | return SID_SHIFT_1T; | |
191 | } | |
192 | ||
8d2169e8 | 193 | /* |
1189be65 | 194 | * The current system page and segment sizes |
8d2169e8 DG |
195 | */ |
196 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | |
197 | extern int mmu_linear_psize; | |
198 | extern int mmu_virtual_psize; | |
199 | extern int mmu_vmalloc_psize; | |
cec08e7a | 200 | extern int mmu_vmemmap_psize; |
8d2169e8 | 201 | extern int mmu_io_psize; |
1189be65 PM |
202 | extern int mmu_kernel_ssize; |
203 | extern int mmu_highuser_ssize; | |
584f8b71 | 204 | extern u16 mmu_slb_size; |
572fb578 | 205 | extern unsigned long tce_alloc_start, tce_alloc_end; |
8d2169e8 DG |
206 | |
207 | /* | |
208 | * If the processor supports 64k normal pages but not 64k cache | |
209 | * inhibited pages, we have to be prepared to switch processes | |
210 | * to use 4k pages when they create cache-inhibited mappings. | |
211 | * If this is the case, mmu_ci_restrictions will be set to 1. | |
212 | */ | |
213 | extern int mmu_ci_restrictions; | |
214 | ||
5524a27d AK |
215 | /* |
216 | * This computes the AVPN and B fields of the first dword of a HPTE, | |
217 | * for use when we want to match an existing PTE. The bottom 7 bits | |
218 | * of the returned value are zero. | |
219 | */ | |
220 | static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, | |
221 | int ssize) | |
222 | { | |
223 | unsigned long v; | |
224 | /* | |
225 | * The AVA field omits the low-order 23 bits of the 78 bits VA. | |
226 | * These bits are not needed in the PTE, because the | |
227 | * low-order b of these bits are part of the byte offset | |
228 | * into the virtual page and, if b < 23, the high-order | |
229 | * 23-b of these bits are always used in selecting the | |
230 | * PTEGs to be searched | |
231 | */ | |
232 | v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); | |
233 | v <<= HPTE_V_AVPN_SHIFT; | |
234 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; | |
235 | return v; | |
236 | } | |
237 | ||
8d2169e8 DG |
238 | /* |
239 | * This function sets the AVPN and L fields of the HPTE appropriately | |
240 | * for the page size | |
241 | */ | |
5524a27d AK |
242 | static inline unsigned long hpte_encode_v(unsigned long vpn, |
243 | int psize, int ssize) | |
8d2169e8 | 244 | { |
1189be65 | 245 | unsigned long v; |
5524a27d | 246 | v = hpte_encode_avpn(vpn, psize, ssize); |
8d2169e8 DG |
247 | if (psize != MMU_PAGE_4K) |
248 | v |= HPTE_V_LARGE; | |
249 | return v; | |
250 | } | |
251 | ||
252 | /* | |
253 | * This function sets the ARPN, and LP fields of the HPTE appropriately | |
254 | * for the page size. We assume the pa is already "clean" that is properly | |
255 | * aligned for the requested page size | |
256 | */ | |
257 | static inline unsigned long hpte_encode_r(unsigned long pa, int psize) | |
258 | { | |
259 | unsigned long r; | |
260 | ||
261 | /* A 4K page needs no special encoding */ | |
262 | if (psize == MMU_PAGE_4K) | |
263 | return pa & HPTE_R_RPN; | |
264 | else { | |
265 | unsigned int penc = mmu_psize_defs[psize].penc; | |
266 | unsigned int shift = mmu_psize_defs[psize].shift; | |
267 | return (pa & ~((1ul << shift) - 1)) | (penc << 12); | |
268 | } | |
269 | return r; | |
270 | } | |
271 | ||
272 | /* | |
5524a27d | 273 | * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. |
8d2169e8 | 274 | */ |
5524a27d AK |
275 | static inline unsigned long hpt_vpn(unsigned long ea, |
276 | unsigned long vsid, int ssize) | |
1189be65 | 277 | { |
5524a27d AK |
278 | unsigned long mask; |
279 | int s_shift = segment_shift(ssize); | |
280 | ||
281 | mask = (1ul << (s_shift - VPN_SHIFT)) - 1; | |
282 | return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); | |
1189be65 | 283 | } |
8d2169e8 | 284 | |
1189be65 PM |
285 | /* |
286 | * This hashes a virtual address | |
287 | */ | |
5524a27d AK |
288 | static inline unsigned long hpt_hash(unsigned long vpn, |
289 | unsigned int shift, int ssize) | |
8d2169e8 | 290 | { |
5524a27d | 291 | int mask; |
1189be65 PM |
292 | unsigned long hash, vsid; |
293 | ||
5524a27d | 294 | /* VPN_SHIFT can be atmost 12 */ |
1189be65 | 295 | if (ssize == MMU_SEGSIZE_256M) { |
5524a27d AK |
296 | mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; |
297 | hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ | |
298 | ((vpn & mask) >> (shift - VPN_SHIFT)); | |
1189be65 | 299 | } else { |
5524a27d AK |
300 | mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; |
301 | vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); | |
302 | hash = vsid ^ (vsid << 25) ^ | |
303 | ((vpn & mask) >> (shift - VPN_SHIFT)) ; | |
1189be65 PM |
304 | } |
305 | return hash & 0x7fffffffffUL; | |
8d2169e8 DG |
306 | } |
307 | ||
308 | extern int __hash_page_4K(unsigned long ea, unsigned long access, | |
309 | unsigned long vsid, pte_t *ptep, unsigned long trap, | |
fa28237c | 310 | unsigned int local, int ssize, int subpage_prot); |
8d2169e8 DG |
311 | extern int __hash_page_64K(unsigned long ea, unsigned long access, |
312 | unsigned long vsid, pte_t *ptep, unsigned long trap, | |
1189be65 | 313 | unsigned int local, int ssize); |
8d2169e8 | 314 | struct mm_struct; |
0895ecda | 315 | unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); |
8d2169e8 | 316 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); |
a4fe3ce7 DG |
317 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
318 | pte_t *ptep, unsigned long trap, int local, int ssize, | |
319 | unsigned int shift, unsigned int mmu_psize); | |
4b8692c0 BH |
320 | extern void hash_failure_debug(unsigned long ea, unsigned long access, |
321 | unsigned long vsid, unsigned long trap, | |
322 | int ssize, int psize, unsigned long pte); | |
8d2169e8 | 323 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
bc033b63 | 324 | unsigned long pstart, unsigned long prot, |
1189be65 | 325 | int psize, int ssize); |
41151e77 | 326 | extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); |
fa28237c | 327 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); |
8d2169e8 | 328 | |
8d2169e8 DG |
329 | extern void hpte_init_native(void); |
330 | extern void hpte_init_lpar(void); | |
8d2169e8 | 331 | extern void hpte_init_beat(void); |
7f2c8577 | 332 | extern void hpte_init_beat_v3(void); |
8d2169e8 DG |
333 | |
334 | extern void stabs_alloc(void); | |
335 | extern void slb_initialize(void); | |
336 | extern void slb_flush_and_rebolt(void); | |
337 | extern void stab_initialize(unsigned long stab); | |
338 | ||
67439b76 | 339 | extern void slb_vmalloc_update(void); |
46db2f86 | 340 | extern void slb_set_size(u16 size); |
8d2169e8 DG |
341 | #endif /* __ASSEMBLY__ */ |
342 | ||
343 | /* | |
f033d659 | 344 | * VSID allocation (256MB segment) |
8d2169e8 | 345 | * |
f033d659 AK |
346 | * We first generate a 38-bit "proto-VSID". For kernel addresses this |
347 | * is equal to the ESID | 1 << 37, for user addresses it is: | |
348 | * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) | |
8d2169e8 | 349 | * |
f033d659 AK |
350 | * This splits the proto-VSID into the below range |
351 | * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range | |
352 | * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range | |
353 | * | |
354 | * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 | |
355 | * That is, we assign half of the space to user processes and half | |
356 | * to the kernel. | |
8d2169e8 DG |
357 | * |
358 | * The proto-VSIDs are then scrambled into real VSIDs with the | |
359 | * multiplicative hash: | |
360 | * | |
361 | * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS | |
8d2169e8 | 362 | * |
f033d659 | 363 | * VSID_MULTIPLIER is prime, so in particular it is |
8d2169e8 DG |
364 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. |
365 | * Because the modulus is 2^n-1 we can compute it efficiently without | |
366 | * a divide or extra multiply (see below). | |
367 | * | |
368 | * This scheme has several advantages over older methods: | |
369 | * | |
f033d659 | 370 | * - We have VSIDs allocated for every kernel address |
8d2169e8 DG |
371 | * (i.e. everything above 0xC000000000000000), except the very top |
372 | * segment, which simplifies several things. | |
373 | * | |
f033d659 AK |
374 | * - We allow for USER_ESID_BITS significant bits of ESID and |
375 | * CONTEXT_BITS bits of context for user addresses. | |
376 | * i.e. 64T (46 bits) of address space for up to half a million contexts. | |
8d2169e8 | 377 | * |
f033d659 | 378 | * - The scramble function gives robust scattering in the hash |
8d2169e8 DG |
379 | * table (at least based on some initial results). The previous |
380 | * method was more susceptible to pathological cases giving excessive | |
381 | * hash collisions. | |
382 | */ | |
8d2169e8 | 383 | |
e39d1a47 AK |
384 | #define CONTEXT_BITS 19 |
385 | #define USER_ESID_BITS 18 | |
386 | #define USER_ESID_BITS_1T 6 | |
387 | ||
048ee099 AK |
388 | /* |
389 | * This should be computed such that protovosid * vsid_mulitplier | |
390 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus | |
391 | */ | |
392 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ | |
e39d1a47 | 393 | #define VSID_BITS_256M (CONTEXT_BITS + USER_ESID_BITS + 1) |
1189be65 | 394 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) |
8d2169e8 | 395 | |
1189be65 | 396 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
e39d1a47 | 397 | #define VSID_BITS_1T (CONTEXT_BITS + USER_ESID_BITS_1T + 1) |
1189be65 PM |
398 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) |
399 | ||
8d2169e8 DG |
400 | |
401 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | |
402 | ||
403 | /* | |
404 | * This macro generates asm code to compute the VSID scramble | |
405 | * function. Used in slb_allocate() and do_stab_bolted. The function | |
406 | * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS | |
407 | * | |
408 | * rt = register continaing the proto-VSID and into which the | |
409 | * VSID will be stored | |
410 | * rx = scratch register (clobbered) | |
411 | * | |
412 | * - rt and rx must be different registers | |
1189be65 | 413 | * - The answer will end up in the low VSID_BITS bits of rt. The higher |
8d2169e8 DG |
414 | * bits may contain other garbage, so you may need to mask the |
415 | * result. | |
416 | */ | |
1189be65 PM |
417 | #define ASM_VSID_SCRAMBLE(rt, rx, size) \ |
418 | lis rx,VSID_MULTIPLIER_##size@h; \ | |
419 | ori rx,rx,VSID_MULTIPLIER_##size@l; \ | |
8d2169e8 DG |
420 | mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ |
421 | \ | |
1189be65 PM |
422 | srdi rx,rt,VSID_BITS_##size; \ |
423 | clrldi rt,rt,(64-VSID_BITS_##size); \ | |
8d2169e8 DG |
424 | add rt,rt,rx; /* add high and low bits */ \ |
425 | /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | |
426 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ | |
427 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ | |
428 | * the bit clear, r3 already has the answer we want, if it \ | |
429 | * doesn't, the answer is the low 36 bits of r3+1. So in all \ | |
430 | * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ | |
431 | addi rx,rt,1; \ | |
1189be65 | 432 | srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ |
8d2169e8 DG |
433 | add rt,rt,rx |
434 | ||
78f1dbde AK |
435 | /* 4 bits per slice and we have one slice per 1TB */ |
436 | #define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41) | |
8d2169e8 DG |
437 | |
438 | #ifndef __ASSEMBLY__ | |
439 | ||
d28513bc DG |
440 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
441 | /* | |
442 | * For the sub-page protection option, we extend the PGD with one of | |
443 | * these. Basically we have a 3-level tree, with the top level being | |
444 | * the protptrs array. To optimize speed and memory consumption when | |
445 | * only addresses < 4GB are being protected, pointers to the first | |
446 | * four pages of sub-page protection words are stored in the low_prot | |
447 | * array. | |
448 | * Each page of sub-page protection words protects 1GB (4 bytes | |
449 | * protects 64k). For the 3-level tree, each page of pointers then | |
450 | * protects 8TB. | |
451 | */ | |
452 | struct subpage_prot_table { | |
453 | unsigned long maxaddr; /* only addresses < this are protected */ | |
454 | unsigned int **protptrs[2]; | |
455 | unsigned int *low_prot[4]; | |
456 | }; | |
457 | ||
458 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | |
459 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | |
460 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | |
461 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | |
462 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | |
463 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | |
464 | ||
465 | extern void subpage_prot_free(struct mm_struct *mm); | |
466 | extern void subpage_prot_init_new_context(struct mm_struct *mm); | |
467 | #else | |
468 | static inline void subpage_prot_free(struct mm_struct *mm) {} | |
469 | static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } | |
470 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | |
471 | ||
8d2169e8 | 472 | typedef unsigned long mm_context_id_t; |
851d2e2f | 473 | struct spinlock; |
8d2169e8 DG |
474 | |
475 | typedef struct { | |
476 | mm_context_id_t id; | |
d0f13e3c BH |
477 | u16 user_psize; /* page size index */ |
478 | ||
479 | #ifdef CONFIG_PPC_MM_SLICES | |
480 | u64 low_slices_psize; /* SLB page size encodings */ | |
78f1dbde | 481 | unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; |
d0f13e3c BH |
482 | #else |
483 | u16 sllp; /* SLB page size encoding */ | |
8d2169e8 DG |
484 | #endif |
485 | unsigned long vdso_base; | |
d28513bc DG |
486 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
487 | struct subpage_prot_table spt; | |
488 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | |
851d2e2f THFL |
489 | #ifdef CONFIG_PPC_ICSWX |
490 | struct spinlock *cop_lockp; /* guard acop and cop_pid */ | |
491 | unsigned long acop; /* mask of enabled coprocessor types */ | |
492 | unsigned int cop_pid; /* pid value used with coprocessors */ | |
493 | #endif /* CONFIG_PPC_ICSWX */ | |
8d2169e8 DG |
494 | } mm_context_t; |
495 | ||
496 | ||
8d2169e8 | 497 | #if 0 |
1189be65 PM |
498 | /* |
499 | * The code below is equivalent to this function for arguments | |
500 | * < 2^VSID_BITS, which is all this should ever be called | |
501 | * with. However gcc is not clever enough to compute the | |
502 | * modulus (2^n-1) without a second multiply. | |
503 | */ | |
34692708 | 504 | #define vsid_scramble(protovsid, size) \ |
1189be65 | 505 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) |
8d2169e8 | 506 | |
1189be65 PM |
507 | #else /* 1 */ |
508 | #define vsid_scramble(protovsid, size) \ | |
509 | ({ \ | |
510 | unsigned long x; \ | |
511 | x = (protovsid) * VSID_MULTIPLIER_##size; \ | |
512 | x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \ | |
513 | (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \ | |
514 | }) | |
8d2169e8 | 515 | #endif /* 1 */ |
8d2169e8 | 516 | |
048ee099 AK |
517 | /* |
518 | * This is only valid for addresses >= PAGE_OFFSET | |
519 | * The proto-VSID space is divided into two class | |
520 | * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 | |
521 | * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 | |
522 | * | |
523 | * With KERNEL_START at 0xc000000000000000, the proto vsid for | |
524 | * the kernel ends up with 0xc00000000 (36 bits). With 64TB | |
525 | * support we need to have kernel proto-VSID in the | |
526 | * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. | |
527 | */ | |
1189be65 | 528 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) |
8d2169e8 | 529 | { |
048ee099 AK |
530 | unsigned long proto_vsid; |
531 | /* | |
532 | * We need to make sure proto_vsid for the kernel is | |
533 | * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) | |
534 | */ | |
535 | if (ssize == MMU_SEGSIZE_256M) { | |
536 | proto_vsid = ea >> SID_SHIFT; | |
537 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); | |
538 | return vsid_scramble(proto_vsid, 256M); | |
539 | } | |
540 | proto_vsid = ea >> SID_SHIFT_1T; | |
541 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); | |
542 | return vsid_scramble(proto_vsid, 1T); | |
8d2169e8 DG |
543 | } |
544 | ||
1189be65 PM |
545 | /* Returns the segment size indicator for a user address */ |
546 | static inline int user_segment_size(unsigned long addr) | |
8d2169e8 | 547 | { |
1189be65 PM |
548 | /* Use 1T segments if possible for addresses >= 1T */ |
549 | if (addr >= (1UL << SID_SHIFT_1T)) | |
550 | return mmu_highuser_ssize; | |
551 | return MMU_SEGSIZE_256M; | |
8d2169e8 DG |
552 | } |
553 | ||
1189be65 PM |
554 | /* This is only valid for user addresses (which are below 2^44) */ |
555 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, | |
556 | int ssize) | |
557 | { | |
558 | if (ssize == MMU_SEGSIZE_256M) | |
559 | return vsid_scramble((context << USER_ESID_BITS) | |
560 | | (ea >> SID_SHIFT), 256M); | |
561 | return vsid_scramble((context << USER_ESID_BITS_1T) | |
562 | | (ea >> SID_SHIFT_1T), 1T); | |
563 | } | |
564 | ||
8d2169e8 DG |
565 | #endif /* __ASSEMBLY__ */ |
566 | ||
567 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ |