powerpc/mm/thp: Abstraction for THP functions
[deliverable/linux.git] / arch / powerpc / include / asm / book3s / 64 / mmu-hash.h
1 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
2 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 /*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17 #include <asm/bug.h>
18
19 /*
20 * This is necessary to get the definition of PGTABLE_RANGE which we
21 * need for various slices related matters. Note that this isn't the
22 * complete pgtable.h but only a portion of it.
23 */
24 #include <asm/book3s/64/pgtable.h>
25 #include <asm/bug.h>
26 #include <asm/processor.h>
27
28 /*
29 * SLB
30 */
31
32 #define SLB_NUM_BOLTED 3
33 #define SLB_CACHE_ENTRIES 8
34 #define SLB_MIN_SIZE 32
35
36 /* Bits in the SLB ESID word */
37 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
38
39 /* Bits in the SLB VSID word */
40 #define SLB_VSID_SHIFT 12
41 #define SLB_VSID_SHIFT_1T 24
42 #define SLB_VSID_SSIZE_SHIFT 62
43 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
44 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
45 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
46 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
47 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
48 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
49 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
50 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
51 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
52 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
53 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
54 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
55 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
56 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
57
58 #define SLB_VSID_KERNEL (SLB_VSID_KP)
59 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
60
61 #define SLBIE_C (0x08000000)
62 #define SLBIE_SSIZE_SHIFT 25
63
64 /*
65 * Hash table
66 */
67
68 #define HPTES_PER_GROUP 8
69
70 #define HPTE_V_SSIZE_SHIFT 62
71 #define HPTE_V_AVPN_SHIFT 7
72 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
73 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
75 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
76 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
77 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
78 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
79 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
80
81 /*
82 * ISA 3.0 have a different HPTE format.
83 */
84 #define HPTE_R_3_0_SSIZE_SHIFT 58
85 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
86 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
87 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
88 #define HPTE_R_RPN_SHIFT 12
89 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
90 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
91 #define HPTE_R_N ASM_CONST(0x0000000000000004)
92 #define HPTE_R_G ASM_CONST(0x0000000000000008)
93 #define HPTE_R_M ASM_CONST(0x0000000000000010)
94 #define HPTE_R_I ASM_CONST(0x0000000000000020)
95 #define HPTE_R_W ASM_CONST(0x0000000000000040)
96 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
97 #define HPTE_R_C ASM_CONST(0x0000000000000080)
98 #define HPTE_R_R ASM_CONST(0x0000000000000100)
99 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
100
101 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
102 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
103
104 /* Values for PP (assumes Ks=0, Kp=1) */
105 #define PP_RWXX 0 /* Supervisor read/write, User none */
106 #define PP_RWRX 1 /* Supervisor read/write, User read */
107 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
108 #define PP_RXRX 3 /* Supervisor read, User read */
109 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
110
111 /* Fields for tlbiel instruction in architecture 2.06 */
112 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
113 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
114 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
115 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
116 #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
117 #define TLBIEL_INVAL_SET_SHIFT 12
118
119 #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
120 #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
121 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
122 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
123
124 #ifndef __ASSEMBLY__
125
126 struct hash_pte {
127 __be64 v;
128 __be64 r;
129 };
130
131 extern struct hash_pte *htab_address;
132 extern unsigned long htab_size_bytes;
133 extern unsigned long htab_hash_mask;
134
135
136 static inline int shift_to_mmu_psize(unsigned int shift)
137 {
138 int psize;
139
140 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
141 if (mmu_psize_defs[psize].shift == shift)
142 return psize;
143 return -1;
144 }
145
146 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
147 {
148 if (mmu_psize_defs[mmu_psize].shift)
149 return mmu_psize_defs[mmu_psize].shift;
150 BUG();
151 }
152
153 #endif /* __ASSEMBLY__ */
154
155 /*
156 * Segment sizes.
157 * These are the values used by hardware in the B field of
158 * SLB entries and the first dword of MMU hashtable entries.
159 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
160 */
161 #define MMU_SEGSIZE_256M 0
162 #define MMU_SEGSIZE_1T 1
163
164 /*
165 * encode page number shift.
166 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
167 * 12 bits. This enable us to address upto 76 bit va.
168 * For hpt hash from a va we can ignore the page size bits of va and for
169 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
170 * we work in all cases including 4k page size.
171 */
172 #define VPN_SHIFT 12
173
174 /*
175 * HPTE Large Page (LP) details
176 */
177 #define LP_SHIFT 12
178 #define LP_BITS 8
179 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
180
181 #ifndef __ASSEMBLY__
182
183 static inline int slb_vsid_shift(int ssize)
184 {
185 if (ssize == MMU_SEGSIZE_256M)
186 return SLB_VSID_SHIFT;
187 return SLB_VSID_SHIFT_1T;
188 }
189
190 static inline int segment_shift(int ssize)
191 {
192 if (ssize == MMU_SEGSIZE_256M)
193 return SID_SHIFT;
194 return SID_SHIFT_1T;
195 }
196
197 /*
198 * The current system page and segment sizes
199 */
200 extern int mmu_kernel_ssize;
201 extern int mmu_highuser_ssize;
202 extern u16 mmu_slb_size;
203 extern unsigned long tce_alloc_start, tce_alloc_end;
204
205 /*
206 * If the processor supports 64k normal pages but not 64k cache
207 * inhibited pages, we have to be prepared to switch processes
208 * to use 4k pages when they create cache-inhibited mappings.
209 * If this is the case, mmu_ci_restrictions will be set to 1.
210 */
211 extern int mmu_ci_restrictions;
212
213 /*
214 * This computes the AVPN and B fields of the first dword of a HPTE,
215 * for use when we want to match an existing PTE. The bottom 7 bits
216 * of the returned value are zero.
217 */
218 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
219 int ssize)
220 {
221 unsigned long v;
222 /*
223 * The AVA field omits the low-order 23 bits of the 78 bits VA.
224 * These bits are not needed in the PTE, because the
225 * low-order b of these bits are part of the byte offset
226 * into the virtual page and, if b < 23, the high-order
227 * 23-b of these bits are always used in selecting the
228 * PTEGs to be searched
229 */
230 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
231 v <<= HPTE_V_AVPN_SHIFT;
232 if (!cpu_has_feature(CPU_FTR_ARCH_300))
233 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
234 return v;
235 }
236
237 /*
238 * This function sets the AVPN and L fields of the HPTE appropriately
239 * using the base page size and actual page size.
240 */
241 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
242 int actual_psize, int ssize)
243 {
244 unsigned long v;
245 v = hpte_encode_avpn(vpn, base_psize, ssize);
246 if (actual_psize != MMU_PAGE_4K)
247 v |= HPTE_V_LARGE;
248 return v;
249 }
250
251 /*
252 * This function sets the ARPN, and LP fields of the HPTE appropriately
253 * for the page size. We assume the pa is already "clean" that is properly
254 * aligned for the requested page size
255 */
256 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
257 int actual_psize, int ssize)
258 {
259
260 if (cpu_has_feature(CPU_FTR_ARCH_300))
261 pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
262
263 /* A 4K page needs no special encoding */
264 if (actual_psize == MMU_PAGE_4K)
265 return pa & HPTE_R_RPN;
266 else {
267 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
268 unsigned int shift = mmu_psize_defs[actual_psize].shift;
269 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
270 }
271 }
272
273 /*
274 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
275 */
276 static inline unsigned long hpt_vpn(unsigned long ea,
277 unsigned long vsid, int ssize)
278 {
279 unsigned long mask;
280 int s_shift = segment_shift(ssize);
281
282 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
283 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
284 }
285
286 /*
287 * This hashes a virtual address
288 */
289 static inline unsigned long hpt_hash(unsigned long vpn,
290 unsigned int shift, int ssize)
291 {
292 int mask;
293 unsigned long hash, vsid;
294
295 /* VPN_SHIFT can be atmost 12 */
296 if (ssize == MMU_SEGSIZE_256M) {
297 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
298 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
299 ((vpn & mask) >> (shift - VPN_SHIFT));
300 } else {
301 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
302 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
303 hash = vsid ^ (vsid << 25) ^
304 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
305 }
306 return hash & 0x7fffffffffUL;
307 }
308
309 #define HPTE_LOCAL_UPDATE 0x1
310 #define HPTE_NOHPTE_UPDATE 0x2
311
312 extern int __hash_page_4K(unsigned long ea, unsigned long access,
313 unsigned long vsid, pte_t *ptep, unsigned long trap,
314 unsigned long flags, int ssize, int subpage_prot);
315 extern int __hash_page_64K(unsigned long ea, unsigned long access,
316 unsigned long vsid, pte_t *ptep, unsigned long trap,
317 unsigned long flags, int ssize);
318 struct mm_struct;
319 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
320 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
321 unsigned long access, unsigned long trap,
322 unsigned long flags);
323 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
324 unsigned long dsisr);
325 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
326 pte_t *ptep, unsigned long trap, unsigned long flags,
327 int ssize, unsigned int shift, unsigned int mmu_psize);
328 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
329 extern int __hash_page_thp(unsigned long ea, unsigned long access,
330 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
331 unsigned long flags, int ssize, unsigned int psize);
332 #else
333 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
334 unsigned long vsid, pmd_t *pmdp,
335 unsigned long trap, unsigned long flags,
336 int ssize, unsigned int psize)
337 {
338 BUG();
339 return -1;
340 }
341 #endif
342 extern void hash_failure_debug(unsigned long ea, unsigned long access,
343 unsigned long vsid, unsigned long trap,
344 int ssize, int psize, int lpsize,
345 unsigned long pte);
346 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
347 unsigned long pstart, unsigned long prot,
348 int psize, int ssize);
349 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
350 int psize, int ssize);
351 extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
352 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
353
354 extern void hpte_init_native(void);
355 extern void hpte_init_lpar(void);
356 extern void hpte_init_beat(void);
357 extern void hpte_init_beat_v3(void);
358
359 extern void slb_initialize(void);
360 extern void slb_flush_and_rebolt(void);
361
362 extern void slb_vmalloc_update(void);
363 extern void slb_set_size(u16 size);
364 #endif /* __ASSEMBLY__ */
365
366 /*
367 * VSID allocation (256MB segment)
368 *
369 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
370 * from mmu context id and effective segment id of the address.
371 *
372 * For user processes max context id is limited to ((1ul << 19) - 5)
373 * for kernel space, we use the top 4 context ids to map address as below
374 * NOTE: each context only support 64TB now.
375 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
376 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
377 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
378 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
379 *
380 * The proto-VSIDs are then scrambled into real VSIDs with the
381 * multiplicative hash:
382 *
383 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
384 *
385 * VSID_MULTIPLIER is prime, so in particular it is
386 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
387 * Because the modulus is 2^n-1 we can compute it efficiently without
388 * a divide or extra multiply (see below). The scramble function gives
389 * robust scattering in the hash table (at least based on some initial
390 * results).
391 *
392 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
393 * bad address. This enables us to consolidate bad address handling in
394 * hash_page.
395 *
396 * We also need to avoid the last segment of the last context, because that
397 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
398 * because of the modulo operation in vsid scramble. But the vmemmap
399 * (which is what uses region 0xf) will never be close to 64TB in size
400 * (it's 56 bytes per page of system memory).
401 */
402
403 #define CONTEXT_BITS 19
404 #define ESID_BITS 18
405 #define ESID_BITS_1T 6
406
407 /*
408 * 256MB segment
409 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
410 * available for user + kernel mapping. The top 4 contexts are used for
411 * kernel mapping. Each segment contains 2^28 bytes. Each
412 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
413 * (19 == 37 + 28 - 46).
414 */
415 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
416
417 /*
418 * This should be computed such that protovosid * vsid_mulitplier
419 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
420 */
421 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
422 #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
423 #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
424
425 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
426 #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
427 #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
428
429
430 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
431
432 /*
433 * This macro generates asm code to compute the VSID scramble
434 * function. Used in slb_allocate() and do_stab_bolted. The function
435 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
436 *
437 * rt = register continaing the proto-VSID and into which the
438 * VSID will be stored
439 * rx = scratch register (clobbered)
440 *
441 * - rt and rx must be different registers
442 * - The answer will end up in the low VSID_BITS bits of rt. The higher
443 * bits may contain other garbage, so you may need to mask the
444 * result.
445 */
446 #define ASM_VSID_SCRAMBLE(rt, rx, size) \
447 lis rx,VSID_MULTIPLIER_##size@h; \
448 ori rx,rx,VSID_MULTIPLIER_##size@l; \
449 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
450 \
451 srdi rx,rt,VSID_BITS_##size; \
452 clrldi rt,rt,(64-VSID_BITS_##size); \
453 add rt,rt,rx; /* add high and low bits */ \
454 /* NOTE: explanation based on VSID_BITS_##size = 36 \
455 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
456 * 2^36-1+2^28-1. That in particular means that if r3 >= \
457 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
458 * the bit clear, r3 already has the answer we want, if it \
459 * doesn't, the answer is the low 36 bits of r3+1. So in all \
460 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
461 addi rx,rt,1; \
462 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
463 add rt,rt,rx
464
465 /* 4 bits per slice and we have one slice per 1TB */
466 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
467
468 #ifndef __ASSEMBLY__
469
470 #ifdef CONFIG_PPC_SUBPAGE_PROT
471 /*
472 * For the sub-page protection option, we extend the PGD with one of
473 * these. Basically we have a 3-level tree, with the top level being
474 * the protptrs array. To optimize speed and memory consumption when
475 * only addresses < 4GB are being protected, pointers to the first
476 * four pages of sub-page protection words are stored in the low_prot
477 * array.
478 * Each page of sub-page protection words protects 1GB (4 bytes
479 * protects 64k). For the 3-level tree, each page of pointers then
480 * protects 8TB.
481 */
482 struct subpage_prot_table {
483 unsigned long maxaddr; /* only addresses < this are protected */
484 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
485 unsigned int *low_prot[4];
486 };
487
488 #define SBP_L1_BITS (PAGE_SHIFT - 2)
489 #define SBP_L2_BITS (PAGE_SHIFT - 3)
490 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
491 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
492 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
493 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
494
495 extern void subpage_prot_free(struct mm_struct *mm);
496 extern void subpage_prot_init_new_context(struct mm_struct *mm);
497 #else
498 static inline void subpage_prot_free(struct mm_struct *mm) {}
499 static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
500 #endif /* CONFIG_PPC_SUBPAGE_PROT */
501
502 #if 0
503 /*
504 * The code below is equivalent to this function for arguments
505 * < 2^VSID_BITS, which is all this should ever be called
506 * with. However gcc is not clever enough to compute the
507 * modulus (2^n-1) without a second multiply.
508 */
509 #define vsid_scramble(protovsid, size) \
510 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
511
512 #else /* 1 */
513 #define vsid_scramble(protovsid, size) \
514 ({ \
515 unsigned long x; \
516 x = (protovsid) * VSID_MULTIPLIER_##size; \
517 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
518 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
519 })
520 #endif /* 1 */
521
522 /* Returns the segment size indicator for a user address */
523 static inline int user_segment_size(unsigned long addr)
524 {
525 /* Use 1T segments if possible for addresses >= 1T */
526 if (addr >= (1UL << SID_SHIFT_1T))
527 return mmu_highuser_ssize;
528 return MMU_SEGSIZE_256M;
529 }
530
531 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
532 int ssize)
533 {
534 /*
535 * Bad address. We return VSID 0 for that
536 */
537 if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
538 return 0;
539
540 if (ssize == MMU_SEGSIZE_256M)
541 return vsid_scramble((context << ESID_BITS)
542 | (ea >> SID_SHIFT), 256M);
543 return vsid_scramble((context << ESID_BITS_1T)
544 | (ea >> SID_SHIFT_1T), 1T);
545 }
546
547 /*
548 * This is only valid for addresses >= PAGE_OFFSET
549 *
550 * For kernel space, we use the top 4 context ids to map address as below
551 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
552 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
553 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
554 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
555 */
556 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
557 {
558 unsigned long context;
559
560 /*
561 * kernel take the top 4 context from the available range
562 */
563 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
564 return get_vsid(context, ea, ssize);
565 }
566
567 unsigned htab_shift_for_mem_size(unsigned long mem_size);
568
569 #endif /* __ASSEMBLY__ */
570
571 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
This page took 0.077631 seconds and 5 git commands to generate.