powerpc: Hugetlb for BookE
[deliverable/linux.git] / arch / powerpc / include / asm / mmu-hash64.h
1 #ifndef _ASM_POWERPC_MMU_HASH64_H_
2 #define _ASM_POWERPC_MMU_HASH64_H_
3 /*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17
18 /*
19 * Segment table
20 */
21
22 #define STE_ESID_V 0x80
23 #define STE_ESID_KS 0x20
24 #define STE_ESID_KP 0x10
25 #define STE_ESID_N 0x08
26
27 #define STE_VSID_SHIFT 12
28
29 /* Location of cpu0's segment table */
30 #define STAB0_PAGE 0x8
31 #define STAB0_OFFSET (STAB0_PAGE << 12)
32 #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
33
34 #ifndef __ASSEMBLY__
35 extern char initial_stab[];
36 #endif /* ! __ASSEMBLY */
37
38 /*
39 * SLB
40 */
41
42 #define SLB_NUM_BOLTED 3
43 #define SLB_CACHE_ENTRIES 8
44 #define SLB_MIN_SIZE 32
45
46 /* Bits in the SLB ESID word */
47 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
48
49 /* Bits in the SLB VSID word */
50 #define SLB_VSID_SHIFT 12
51 #define SLB_VSID_SHIFT_1T 24
52 #define SLB_VSID_SSIZE_SHIFT 62
53 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
54 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
55 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
56 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
57 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
58 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
59 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
60 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
61 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
62 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
63 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
64 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
65 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
66 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
67
68 #define SLB_VSID_KERNEL (SLB_VSID_KP)
69 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
70
71 #define SLBIE_C (0x08000000)
72 #define SLBIE_SSIZE_SHIFT 25
73
74 /*
75 * Hash table
76 */
77
78 #define HPTES_PER_GROUP 8
79
80 #define HPTE_V_SSIZE_SHIFT 62
81 #define HPTE_V_AVPN_SHIFT 7
82 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
83 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
84 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
85 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
86 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
87 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
88 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
89 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
90
91 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
92 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
93 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
94 #define HPTE_R_RPN_SHIFT 12
95 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
96 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
97 #define HPTE_R_N ASM_CONST(0x0000000000000004)
98 #define HPTE_R_G ASM_CONST(0x0000000000000008)
99 #define HPTE_R_M ASM_CONST(0x0000000000000010)
100 #define HPTE_R_I ASM_CONST(0x0000000000000020)
101 #define HPTE_R_W ASM_CONST(0x0000000000000040)
102 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
103 #define HPTE_R_C ASM_CONST(0x0000000000000080)
104 #define HPTE_R_R ASM_CONST(0x0000000000000100)
105 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
106
107 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
108 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
109
110 /* Values for PP (assumes Ks=0, Kp=1) */
111 /* pp0 will always be 0 for linux */
112 #define PP_RWXX 0 /* Supervisor read/write, User none */
113 #define PP_RWRX 1 /* Supervisor read/write, User read */
114 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
115 #define PP_RXRX 3 /* Supervisor read, User read */
116
117 #ifndef __ASSEMBLY__
118
119 struct hash_pte {
120 unsigned long v;
121 unsigned long r;
122 };
123
124 extern struct hash_pte *htab_address;
125 extern unsigned long htab_size_bytes;
126 extern unsigned long htab_hash_mask;
127
128 /*
129 * Page size definition
130 *
131 * shift : is the "PAGE_SHIFT" value for that page size
132 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
133 * directly to a slbmte "vsid" value
134 * penc : is the HPTE encoding mask for the "LP" field:
135 *
136 */
137 struct mmu_psize_def
138 {
139 unsigned int shift; /* number of bits */
140 unsigned int penc; /* HPTE encoding */
141 unsigned int tlbiel; /* tlbiel supported for that page size */
142 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
143 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
144 };
145
146 #endif /* __ASSEMBLY__ */
147
148 /*
149 * Segment sizes.
150 * These are the values used by hardware in the B field of
151 * SLB entries and the first dword of MMU hashtable entries.
152 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
153 */
154 #define MMU_SEGSIZE_256M 0
155 #define MMU_SEGSIZE_1T 1
156
157
158 #ifndef __ASSEMBLY__
159
160 /*
161 * The current system page and segment sizes
162 */
163 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
164 extern int mmu_linear_psize;
165 extern int mmu_virtual_psize;
166 extern int mmu_vmalloc_psize;
167 extern int mmu_vmemmap_psize;
168 extern int mmu_io_psize;
169 extern int mmu_kernel_ssize;
170 extern int mmu_highuser_ssize;
171 extern u16 mmu_slb_size;
172 extern unsigned long tce_alloc_start, tce_alloc_end;
173
174 /*
175 * If the processor supports 64k normal pages but not 64k cache
176 * inhibited pages, we have to be prepared to switch processes
177 * to use 4k pages when they create cache-inhibited mappings.
178 * If this is the case, mmu_ci_restrictions will be set to 1.
179 */
180 extern int mmu_ci_restrictions;
181
182 /*
183 * This function sets the AVPN and L fields of the HPTE appropriately
184 * for the page size
185 */
186 static inline unsigned long hpte_encode_v(unsigned long va, int psize,
187 int ssize)
188 {
189 unsigned long v;
190 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
191 v <<= HPTE_V_AVPN_SHIFT;
192 if (psize != MMU_PAGE_4K)
193 v |= HPTE_V_LARGE;
194 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
195 return v;
196 }
197
198 /*
199 * This function sets the ARPN, and LP fields of the HPTE appropriately
200 * for the page size. We assume the pa is already "clean" that is properly
201 * aligned for the requested page size
202 */
203 static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
204 {
205 unsigned long r;
206
207 /* A 4K page needs no special encoding */
208 if (psize == MMU_PAGE_4K)
209 return pa & HPTE_R_RPN;
210 else {
211 unsigned int penc = mmu_psize_defs[psize].penc;
212 unsigned int shift = mmu_psize_defs[psize].shift;
213 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
214 }
215 return r;
216 }
217
218 /*
219 * Build a VA given VSID, EA and segment size
220 */
221 static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
222 int ssize)
223 {
224 if (ssize == MMU_SEGSIZE_256M)
225 return (vsid << 28) | (ea & 0xfffffffUL);
226 return (vsid << 40) | (ea & 0xffffffffffUL);
227 }
228
229 /*
230 * This hashes a virtual address
231 */
232
233 static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
234 int ssize)
235 {
236 unsigned long hash, vsid;
237
238 if (ssize == MMU_SEGSIZE_256M) {
239 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
240 } else {
241 vsid = va >> 40;
242 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
243 }
244 return hash & 0x7fffffffffUL;
245 }
246
247 extern int __hash_page_4K(unsigned long ea, unsigned long access,
248 unsigned long vsid, pte_t *ptep, unsigned long trap,
249 unsigned int local, int ssize, int subpage_prot);
250 extern int __hash_page_64K(unsigned long ea, unsigned long access,
251 unsigned long vsid, pte_t *ptep, unsigned long trap,
252 unsigned int local, int ssize);
253 struct mm_struct;
254 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
255 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
256 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
257 pte_t *ptep, unsigned long trap, int local, int ssize,
258 unsigned int shift, unsigned int mmu_psize);
259 extern void hash_failure_debug(unsigned long ea, unsigned long access,
260 unsigned long vsid, unsigned long trap,
261 int ssize, int psize, unsigned long pte);
262 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
263 unsigned long pstart, unsigned long prot,
264 int psize, int ssize);
265 extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
266 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
267
268 extern void hpte_init_native(void);
269 extern void hpte_init_lpar(void);
270 extern void hpte_init_iSeries(void);
271 extern void hpte_init_beat(void);
272 extern void hpte_init_beat_v3(void);
273
274 extern void stabs_alloc(void);
275 extern void slb_initialize(void);
276 extern void slb_flush_and_rebolt(void);
277 extern void stab_initialize(unsigned long stab);
278
279 extern void slb_vmalloc_update(void);
280 extern void slb_set_size(u16 size);
281 #endif /* __ASSEMBLY__ */
282
283 /*
284 * VSID allocation
285 *
286 * We first generate a 36-bit "proto-VSID". For kernel addresses this
287 * is equal to the ESID, for user addresses it is:
288 * (context << 15) | (esid & 0x7fff)
289 *
290 * The two forms are distinguishable because the top bit is 0 for user
291 * addresses, whereas the top two bits are 1 for kernel addresses.
292 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
293 * now.
294 *
295 * The proto-VSIDs are then scrambled into real VSIDs with the
296 * multiplicative hash:
297 *
298 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
299 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
300 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
301 *
302 * This scramble is only well defined for proto-VSIDs below
303 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
304 * reserved. VSID_MULTIPLIER is prime, so in particular it is
305 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
306 * Because the modulus is 2^n-1 we can compute it efficiently without
307 * a divide or extra multiply (see below).
308 *
309 * This scheme has several advantages over older methods:
310 *
311 * - We have VSIDs allocated for every kernel address
312 * (i.e. everything above 0xC000000000000000), except the very top
313 * segment, which simplifies several things.
314 *
315 * - We allow for 15 significant bits of ESID and 20 bits of
316 * context for user addresses. i.e. 8T (43 bits) of address space for
317 * up to 1M contexts (although the page table structure and context
318 * allocation will need changes to take advantage of this).
319 *
320 * - The scramble function gives robust scattering in the hash
321 * table (at least based on some initial results). The previous
322 * method was more susceptible to pathological cases giving excessive
323 * hash collisions.
324 */
325 /*
326 * WARNING - If you change these you must make sure the asm
327 * implementations in slb_allocate (slb_low.S), do_stab_bolted
328 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
329 *
330 * You'll also need to change the precomputed VSID values in head.S
331 * which are used by the iSeries firmware.
332 */
333
334 #define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
335 #define VSID_BITS_256M 36
336 #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
337
338 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
339 #define VSID_BITS_1T 24
340 #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
341
342 #define CONTEXT_BITS 19
343 #define USER_ESID_BITS 16
344 #define USER_ESID_BITS_1T 4
345
346 #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
347
348 /*
349 * This macro generates asm code to compute the VSID scramble
350 * function. Used in slb_allocate() and do_stab_bolted. The function
351 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
352 *
353 * rt = register continaing the proto-VSID and into which the
354 * VSID will be stored
355 * rx = scratch register (clobbered)
356 *
357 * - rt and rx must be different registers
358 * - The answer will end up in the low VSID_BITS bits of rt. The higher
359 * bits may contain other garbage, so you may need to mask the
360 * result.
361 */
362 #define ASM_VSID_SCRAMBLE(rt, rx, size) \
363 lis rx,VSID_MULTIPLIER_##size@h; \
364 ori rx,rx,VSID_MULTIPLIER_##size@l; \
365 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
366 \
367 srdi rx,rt,VSID_BITS_##size; \
368 clrldi rt,rt,(64-VSID_BITS_##size); \
369 add rt,rt,rx; /* add high and low bits */ \
370 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
371 * 2^36-1+2^28-1. That in particular means that if r3 >= \
372 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
373 * the bit clear, r3 already has the answer we want, if it \
374 * doesn't, the answer is the low 36 bits of r3+1. So in all \
375 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
376 addi rx,rt,1; \
377 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
378 add rt,rt,rx
379
380
381 #ifndef __ASSEMBLY__
382
383 #ifdef CONFIG_PPC_SUBPAGE_PROT
384 /*
385 * For the sub-page protection option, we extend the PGD with one of
386 * these. Basically we have a 3-level tree, with the top level being
387 * the protptrs array. To optimize speed and memory consumption when
388 * only addresses < 4GB are being protected, pointers to the first
389 * four pages of sub-page protection words are stored in the low_prot
390 * array.
391 * Each page of sub-page protection words protects 1GB (4 bytes
392 * protects 64k). For the 3-level tree, each page of pointers then
393 * protects 8TB.
394 */
395 struct subpage_prot_table {
396 unsigned long maxaddr; /* only addresses < this are protected */
397 unsigned int **protptrs[2];
398 unsigned int *low_prot[4];
399 };
400
401 #define SBP_L1_BITS (PAGE_SHIFT - 2)
402 #define SBP_L2_BITS (PAGE_SHIFT - 3)
403 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
404 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
405 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
406 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
407
408 extern void subpage_prot_free(struct mm_struct *mm);
409 extern void subpage_prot_init_new_context(struct mm_struct *mm);
410 #else
411 static inline void subpage_prot_free(struct mm_struct *mm) {}
412 static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
413 #endif /* CONFIG_PPC_SUBPAGE_PROT */
414
415 typedef unsigned long mm_context_id_t;
416 struct spinlock;
417
418 typedef struct {
419 mm_context_id_t id;
420 u16 user_psize; /* page size index */
421
422 #ifdef CONFIG_PPC_MM_SLICES
423 u64 low_slices_psize; /* SLB page size encodings */
424 u64 high_slices_psize; /* 4 bits per slice for now */
425 #else
426 u16 sllp; /* SLB page size encoding */
427 #endif
428 unsigned long vdso_base;
429 #ifdef CONFIG_PPC_SUBPAGE_PROT
430 struct subpage_prot_table spt;
431 #endif /* CONFIG_PPC_SUBPAGE_PROT */
432 #ifdef CONFIG_PPC_ICSWX
433 struct spinlock *cop_lockp; /* guard acop and cop_pid */
434 unsigned long acop; /* mask of enabled coprocessor types */
435 unsigned int cop_pid; /* pid value used with coprocessors */
436 #endif /* CONFIG_PPC_ICSWX */
437 } mm_context_t;
438
439
440 #if 0
441 /*
442 * The code below is equivalent to this function for arguments
443 * < 2^VSID_BITS, which is all this should ever be called
444 * with. However gcc is not clever enough to compute the
445 * modulus (2^n-1) without a second multiply.
446 */
447 #define vsid_scramble(protovsid, size) \
448 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
449
450 #else /* 1 */
451 #define vsid_scramble(protovsid, size) \
452 ({ \
453 unsigned long x; \
454 x = (protovsid) * VSID_MULTIPLIER_##size; \
455 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
456 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
457 })
458 #endif /* 1 */
459
460 /* This is only valid for addresses >= PAGE_OFFSET */
461 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
462 {
463 if (ssize == MMU_SEGSIZE_256M)
464 return vsid_scramble(ea >> SID_SHIFT, 256M);
465 return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
466 }
467
468 /* Returns the segment size indicator for a user address */
469 static inline int user_segment_size(unsigned long addr)
470 {
471 /* Use 1T segments if possible for addresses >= 1T */
472 if (addr >= (1UL << SID_SHIFT_1T))
473 return mmu_highuser_ssize;
474 return MMU_SEGSIZE_256M;
475 }
476
477 /* This is only valid for user addresses (which are below 2^44) */
478 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
479 int ssize)
480 {
481 if (ssize == MMU_SEGSIZE_256M)
482 return vsid_scramble((context << USER_ESID_BITS)
483 | (ea >> SID_SHIFT), 256M);
484 return vsid_scramble((context << USER_ESID_BITS_1T)
485 | (ea >> SID_SHIFT_1T), 1T);
486 }
487
488 /*
489 * This is only used on legacy iSeries in lparmap.c,
490 * hence the 256MB segment assumption.
491 */
492 #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
493 VSID_MODULUS_256M)
494 #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
495
496 #endif /* __ASSEMBLY__ */
497
498 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
This page took 0.042685 seconds and 5 git commands to generate.