Merge tag 'md/4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[deliverable/linux.git] / arch / powerpc / include / asm / book3s / 64 / mmu-hash.h
1 #ifndef _ASM_POWERPC_MMU_HASH64_H_
2 #define _ASM_POWERPC_MMU_HASH64_H_
3 /*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17 #include <asm/bug.h>
18
19 /*
20 * This is necessary to get the definition of PGTABLE_RANGE which we
21 * need for various slices related matters. Note that this isn't the
22 * complete pgtable.h but only a portion of it.
23 */
24 #include <asm/book3s/64/pgtable.h>
25 #include <asm/bug.h>
26 #include <asm/processor.h>
27
28 /*
29 * SLB
30 */
31
32 #define SLB_NUM_BOLTED 3
33 #define SLB_CACHE_ENTRIES 8
34 #define SLB_MIN_SIZE 32
35
36 /* Bits in the SLB ESID word */
37 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
38
39 /* Bits in the SLB VSID word */
40 #define SLB_VSID_SHIFT 12
41 #define SLB_VSID_SHIFT_1T 24
42 #define SLB_VSID_SSIZE_SHIFT 62
43 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
44 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
45 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
46 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
47 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
48 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
49 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
50 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
51 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
52 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
53 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
54 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
55 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
56 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
57
58 #define SLB_VSID_KERNEL (SLB_VSID_KP)
59 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
60
61 #define SLBIE_C (0x08000000)
62 #define SLBIE_SSIZE_SHIFT 25
63
64 /*
65 * Hash table
66 */
67
68 #define HPTES_PER_GROUP 8
69
70 #define HPTE_V_SSIZE_SHIFT 62
71 #define HPTE_V_AVPN_SHIFT 7
72 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
73 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
75 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
76 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
77 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
78 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
79 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
80
81 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
82 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
83 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
84 #define HPTE_R_RPN_SHIFT 12
85 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
86 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
87 #define HPTE_R_N ASM_CONST(0x0000000000000004)
88 #define HPTE_R_G ASM_CONST(0x0000000000000008)
89 #define HPTE_R_M ASM_CONST(0x0000000000000010)
90 #define HPTE_R_I ASM_CONST(0x0000000000000020)
91 #define HPTE_R_W ASM_CONST(0x0000000000000040)
92 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
93 #define HPTE_R_C ASM_CONST(0x0000000000000080)
94 #define HPTE_R_R ASM_CONST(0x0000000000000100)
95 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
96
97 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
98 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
99
100 /* Values for PP (assumes Ks=0, Kp=1) */
101 #define PP_RWXX 0 /* Supervisor read/write, User none */
102 #define PP_RWRX 1 /* Supervisor read/write, User read */
103 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
104 #define PP_RXRX 3 /* Supervisor read, User read */
105 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
106
107 /* Fields for tlbiel instruction in architecture 2.06 */
108 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
109 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
110 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
111 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
112 #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
113 #define TLBIEL_INVAL_SET_SHIFT 12
114
115 #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
116 #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
117 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
118
119 #ifndef __ASSEMBLY__
120
121 struct hash_pte {
122 __be64 v;
123 __be64 r;
124 };
125
126 extern struct hash_pte *htab_address;
127 extern unsigned long htab_size_bytes;
128 extern unsigned long htab_hash_mask;
129
130 /*
131 * Page size definition
132 *
133 * shift : is the "PAGE_SHIFT" value for that page size
134 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
135 * directly to a slbmte "vsid" value
136 * penc : is the HPTE encoding mask for the "LP" field:
137 *
138 */
139 struct mmu_psize_def
140 {
141 unsigned int shift; /* number of bits */
142 int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
143 unsigned int tlbiel; /* tlbiel supported for that page size */
144 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
145 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
146 };
147 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
148
149 static inline int shift_to_mmu_psize(unsigned int shift)
150 {
151 int psize;
152
153 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
154 if (mmu_psize_defs[psize].shift == shift)
155 return psize;
156 return -1;
157 }
158
159 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
160 {
161 if (mmu_psize_defs[mmu_psize].shift)
162 return mmu_psize_defs[mmu_psize].shift;
163 BUG();
164 }
165
166 #endif /* __ASSEMBLY__ */
167
168 /*
169 * Segment sizes.
170 * These are the values used by hardware in the B field of
171 * SLB entries and the first dword of MMU hashtable entries.
172 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
173 */
174 #define MMU_SEGSIZE_256M 0
175 #define MMU_SEGSIZE_1T 1
176
177 /*
178 * encode page number shift.
179 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
180 * 12 bits. This enable us to address upto 76 bit va.
181 * For hpt hash from a va we can ignore the page size bits of va and for
182 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
183 * we work in all cases including 4k page size.
184 */
185 #define VPN_SHIFT 12
186
187 /*
188 * HPTE Large Page (LP) details
189 */
190 #define LP_SHIFT 12
191 #define LP_BITS 8
192 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
193
194 #ifndef __ASSEMBLY__
195
196 static inline int slb_vsid_shift(int ssize)
197 {
198 if (ssize == MMU_SEGSIZE_256M)
199 return SLB_VSID_SHIFT;
200 return SLB_VSID_SHIFT_1T;
201 }
202
203 static inline int segment_shift(int ssize)
204 {
205 if (ssize == MMU_SEGSIZE_256M)
206 return SID_SHIFT;
207 return SID_SHIFT_1T;
208 }
209
210 /*
211 * The current system page and segment sizes
212 */
213 extern int mmu_linear_psize;
214 extern int mmu_virtual_psize;
215 extern int mmu_vmalloc_psize;
216 extern int mmu_vmemmap_psize;
217 extern int mmu_io_psize;
218 extern int mmu_kernel_ssize;
219 extern int mmu_highuser_ssize;
220 extern u16 mmu_slb_size;
221 extern unsigned long tce_alloc_start, tce_alloc_end;
222
223 /*
224 * If the processor supports 64k normal pages but not 64k cache
225 * inhibited pages, we have to be prepared to switch processes
226 * to use 4k pages when they create cache-inhibited mappings.
227 * If this is the case, mmu_ci_restrictions will be set to 1.
228 */
229 extern int mmu_ci_restrictions;
230
231 /*
232 * This computes the AVPN and B fields of the first dword of a HPTE,
233 * for use when we want to match an existing PTE. The bottom 7 bits
234 * of the returned value are zero.
235 */
236 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
237 int ssize)
238 {
239 unsigned long v;
240 /*
241 * The AVA field omits the low-order 23 bits of the 78 bits VA.
242 * These bits are not needed in the PTE, because the
243 * low-order b of these bits are part of the byte offset
244 * into the virtual page and, if b < 23, the high-order
245 * 23-b of these bits are always used in selecting the
246 * PTEGs to be searched
247 */
248 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
249 v <<= HPTE_V_AVPN_SHIFT;
250 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
251 return v;
252 }
253
254 /*
255 * This function sets the AVPN and L fields of the HPTE appropriately
256 * using the base page size and actual page size.
257 */
258 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
259 int actual_psize, int ssize)
260 {
261 unsigned long v;
262 v = hpte_encode_avpn(vpn, base_psize, ssize);
263 if (actual_psize != MMU_PAGE_4K)
264 v |= HPTE_V_LARGE;
265 return v;
266 }
267
268 /*
269 * This function sets the ARPN, and LP fields of the HPTE appropriately
270 * for the page size. We assume the pa is already "clean" that is properly
271 * aligned for the requested page size
272 */
273 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
274 int actual_psize)
275 {
276 /* A 4K page needs no special encoding */
277 if (actual_psize == MMU_PAGE_4K)
278 return pa & HPTE_R_RPN;
279 else {
280 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
281 unsigned int shift = mmu_psize_defs[actual_psize].shift;
282 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
283 }
284 }
285
286 /*
287 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
288 */
289 static inline unsigned long hpt_vpn(unsigned long ea,
290 unsigned long vsid, int ssize)
291 {
292 unsigned long mask;
293 int s_shift = segment_shift(ssize);
294
295 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
296 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
297 }
298
299 /*
300 * This hashes a virtual address
301 */
302 static inline unsigned long hpt_hash(unsigned long vpn,
303 unsigned int shift, int ssize)
304 {
305 int mask;
306 unsigned long hash, vsid;
307
308 /* VPN_SHIFT can be atmost 12 */
309 if (ssize == MMU_SEGSIZE_256M) {
310 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
311 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
312 ((vpn & mask) >> (shift - VPN_SHIFT));
313 } else {
314 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
315 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
316 hash = vsid ^ (vsid << 25) ^
317 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
318 }
319 return hash & 0x7fffffffffUL;
320 }
321
322 #define HPTE_LOCAL_UPDATE 0x1
323 #define HPTE_NOHPTE_UPDATE 0x2
324
325 extern int __hash_page_4K(unsigned long ea, unsigned long access,
326 unsigned long vsid, pte_t *ptep, unsigned long trap,
327 unsigned long flags, int ssize, int subpage_prot);
328 extern int __hash_page_64K(unsigned long ea, unsigned long access,
329 unsigned long vsid, pte_t *ptep, unsigned long trap,
330 unsigned long flags, int ssize);
331 struct mm_struct;
332 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
333 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
334 unsigned long access, unsigned long trap,
335 unsigned long flags);
336 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
337 unsigned long dsisr);
338 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
339 pte_t *ptep, unsigned long trap, unsigned long flags,
340 int ssize, unsigned int shift, unsigned int mmu_psize);
341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 extern int __hash_page_thp(unsigned long ea, unsigned long access,
343 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
344 unsigned long flags, int ssize, unsigned int psize);
345 #else
346 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
347 unsigned long vsid, pmd_t *pmdp,
348 unsigned long trap, unsigned long flags,
349 int ssize, unsigned int psize)
350 {
351 BUG();
352 return -1;
353 }
354 #endif
355 extern void hash_failure_debug(unsigned long ea, unsigned long access,
356 unsigned long vsid, unsigned long trap,
357 int ssize, int psize, int lpsize,
358 unsigned long pte);
359 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
360 unsigned long pstart, unsigned long prot,
361 int psize, int ssize);
362 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
363 int psize, int ssize);
364 extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
365 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
366
367 extern void hpte_init_native(void);
368 extern void hpte_init_lpar(void);
369 extern void hpte_init_beat(void);
370 extern void hpte_init_beat_v3(void);
371
372 extern void slb_initialize(void);
373 extern void slb_flush_and_rebolt(void);
374
375 extern void slb_vmalloc_update(void);
376 extern void slb_set_size(u16 size);
377 #endif /* __ASSEMBLY__ */
378
379 /*
380 * VSID allocation (256MB segment)
381 *
382 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
383 * from mmu context id and effective segment id of the address.
384 *
385 * For user processes max context id is limited to ((1ul << 19) - 5)
386 * for kernel space, we use the top 4 context ids to map address as below
387 * NOTE: each context only support 64TB now.
388 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
389 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
390 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
391 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
392 *
393 * The proto-VSIDs are then scrambled into real VSIDs with the
394 * multiplicative hash:
395 *
396 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
397 *
398 * VSID_MULTIPLIER is prime, so in particular it is
399 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
400 * Because the modulus is 2^n-1 we can compute it efficiently without
401 * a divide or extra multiply (see below). The scramble function gives
402 * robust scattering in the hash table (at least based on some initial
403 * results).
404 *
405 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
406 * bad address. This enables us to consolidate bad address handling in
407 * hash_page.
408 *
409 * We also need to avoid the last segment of the last context, because that
410 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
411 * because of the modulo operation in vsid scramble. But the vmemmap
412 * (which is what uses region 0xf) will never be close to 64TB in size
413 * (it's 56 bytes per page of system memory).
414 */
415
416 #define CONTEXT_BITS 19
417 #define ESID_BITS 18
418 #define ESID_BITS_1T 6
419
420 /*
421 * 256MB segment
422 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
423 * available for user + kernel mapping. The top 4 contexts are used for
424 * kernel mapping. Each segment contains 2^28 bytes. Each
425 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
426 * (19 == 37 + 28 - 46).
427 */
428 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
429
430 /*
431 * This should be computed such that protovosid * vsid_mulitplier
432 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
433 */
434 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
435 #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
436 #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
437
438 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
439 #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
440 #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
441
442
443 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
444
445 /*
446 * This macro generates asm code to compute the VSID scramble
447 * function. Used in slb_allocate() and do_stab_bolted. The function
448 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
449 *
450 * rt = register continaing the proto-VSID and into which the
451 * VSID will be stored
452 * rx = scratch register (clobbered)
453 *
454 * - rt and rx must be different registers
455 * - The answer will end up in the low VSID_BITS bits of rt. The higher
456 * bits may contain other garbage, so you may need to mask the
457 * result.
458 */
459 #define ASM_VSID_SCRAMBLE(rt, rx, size) \
460 lis rx,VSID_MULTIPLIER_##size@h; \
461 ori rx,rx,VSID_MULTIPLIER_##size@l; \
462 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
463 \
464 srdi rx,rt,VSID_BITS_##size; \
465 clrldi rt,rt,(64-VSID_BITS_##size); \
466 add rt,rt,rx; /* add high and low bits */ \
467 /* NOTE: explanation based on VSID_BITS_##size = 36 \
468 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
469 * 2^36-1+2^28-1. That in particular means that if r3 >= \
470 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
471 * the bit clear, r3 already has the answer we want, if it \
472 * doesn't, the answer is the low 36 bits of r3+1. So in all \
473 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
474 addi rx,rt,1; \
475 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
476 add rt,rt,rx
477
478 /* 4 bits per slice and we have one slice per 1TB */
479 #define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
480
481 #ifndef __ASSEMBLY__
482
483 #ifdef CONFIG_PPC_SUBPAGE_PROT
484 /*
485 * For the sub-page protection option, we extend the PGD with one of
486 * these. Basically we have a 3-level tree, with the top level being
487 * the protptrs array. To optimize speed and memory consumption when
488 * only addresses < 4GB are being protected, pointers to the first
489 * four pages of sub-page protection words are stored in the low_prot
490 * array.
491 * Each page of sub-page protection words protects 1GB (4 bytes
492 * protects 64k). For the 3-level tree, each page of pointers then
493 * protects 8TB.
494 */
495 struct subpage_prot_table {
496 unsigned long maxaddr; /* only addresses < this are protected */
497 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
498 unsigned int *low_prot[4];
499 };
500
501 #define SBP_L1_BITS (PAGE_SHIFT - 2)
502 #define SBP_L2_BITS (PAGE_SHIFT - 3)
503 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
504 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
505 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
506 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
507
508 extern void subpage_prot_free(struct mm_struct *mm);
509 extern void subpage_prot_init_new_context(struct mm_struct *mm);
510 #else
511 static inline void subpage_prot_free(struct mm_struct *mm) {}
512 static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
513 #endif /* CONFIG_PPC_SUBPAGE_PROT */
514
515 typedef unsigned long mm_context_id_t;
516 struct spinlock;
517
518 typedef struct {
519 mm_context_id_t id;
520 u16 user_psize; /* page size index */
521
522 #ifdef CONFIG_PPC_MM_SLICES
523 u64 low_slices_psize; /* SLB page size encodings */
524 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
525 #else
526 u16 sllp; /* SLB page size encoding */
527 #endif
528 unsigned long vdso_base;
529 #ifdef CONFIG_PPC_SUBPAGE_PROT
530 struct subpage_prot_table spt;
531 #endif /* CONFIG_PPC_SUBPAGE_PROT */
532 #ifdef CONFIG_PPC_ICSWX
533 struct spinlock *cop_lockp; /* guard acop and cop_pid */
534 unsigned long acop; /* mask of enabled coprocessor types */
535 unsigned int cop_pid; /* pid value used with coprocessors */
536 #endif /* CONFIG_PPC_ICSWX */
537 #ifdef CONFIG_PPC_64K_PAGES
538 /* for 4K PTE fragment support */
539 void *pte_frag;
540 #endif
541 #ifdef CONFIG_SPAPR_TCE_IOMMU
542 struct list_head iommu_group_mem_list;
543 #endif
544 } mm_context_t;
545
546
547 #if 0
548 /*
549 * The code below is equivalent to this function for arguments
550 * < 2^VSID_BITS, which is all this should ever be called
551 * with. However gcc is not clever enough to compute the
552 * modulus (2^n-1) without a second multiply.
553 */
554 #define vsid_scramble(protovsid, size) \
555 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
556
557 #else /* 1 */
558 #define vsid_scramble(protovsid, size) \
559 ({ \
560 unsigned long x; \
561 x = (protovsid) * VSID_MULTIPLIER_##size; \
562 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
563 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
564 })
565 #endif /* 1 */
566
567 /* Returns the segment size indicator for a user address */
568 static inline int user_segment_size(unsigned long addr)
569 {
570 /* Use 1T segments if possible for addresses >= 1T */
571 if (addr >= (1UL << SID_SHIFT_1T))
572 return mmu_highuser_ssize;
573 return MMU_SEGSIZE_256M;
574 }
575
576 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
577 int ssize)
578 {
579 /*
580 * Bad address. We return VSID 0 for that
581 */
582 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
583 return 0;
584
585 if (ssize == MMU_SEGSIZE_256M)
586 return vsid_scramble((context << ESID_BITS)
587 | (ea >> SID_SHIFT), 256M);
588 return vsid_scramble((context << ESID_BITS_1T)
589 | (ea >> SID_SHIFT_1T), 1T);
590 }
591
592 /*
593 * This is only valid for addresses >= PAGE_OFFSET
594 *
595 * For kernel space, we use the top 4 context ids to map address as below
596 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
597 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
598 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
599 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
600 */
601 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
602 {
603 unsigned long context;
604
605 /*
606 * kernel take the top 4 context from the available range
607 */
608 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
609 return get_vsid(context, ea, ssize);
610 }
611
612 unsigned htab_shift_for_mem_size(unsigned long mem_size);
613
614 #endif /* __ASSEMBLY__ */
615
616 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
This page took 0.044443 seconds and 6 git commands to generate.