powerpc/pmac: Remove spurrious machine type test
[deliverable/linux.git] / arch / powerpc / mm / hash_utils_64.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
3c726f8d 22#undef DEBUG_LOW
1da177e4 23
1da177e4
LT
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
66b15db6 30#include <linux/export.h>
1da177e4
LT
31#include <linux/ctype.h>
32#include <linux/cache.h>
33#include <linux/init.h>
34#include <linux/signal.h>
95f72d1e 35#include <linux/memblock.h>
ba12eede 36#include <linux/context_tracking.h>
5556ecf5 37#include <linux/libfdt.h>
1da177e4 38
1da177e4
LT
39#include <asm/processor.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/mmu_context.h>
43#include <asm/page.h>
44#include <asm/types.h>
1da177e4
LT
45#include <asm/uaccess.h>
46#include <asm/machdep.h>
d9b2b2a2 47#include <asm/prom.h>
1da177e4
LT
48#include <asm/tlbflush.h>
49#include <asm/io.h>
50#include <asm/eeh.h>
51#include <asm/tlb.h>
52#include <asm/cacheflush.h>
53#include <asm/cputable.h>
1da177e4 54#include <asm/sections.h>
be3ebfe8 55#include <asm/copro.h>
aa39be09 56#include <asm/udbg.h>
b68a70c4 57#include <asm/code-patching.h>
3ccc00a7 58#include <asm/fadump.h>
f5339277 59#include <asm/firmware.h>
bc2a9408 60#include <asm/tm.h>
cfcb3d80 61#include <asm/trace.h>
166dd7d3 62#include <asm/ps3.h>
1da177e4
LT
63
64#ifdef DEBUG
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
3c726f8d
BH
70#ifdef DEBUG_LOW
71#define DBG_LOW(fmt...) udbg_printf(fmt)
72#else
73#define DBG_LOW(fmt...)
74#endif
75
76#define KB (1024)
77#define MB (1024*KB)
658013e9 78#define GB (1024L*MB)
3c726f8d 79
1da177e4
LT
80/*
81 * Note: pte --> Linux PTE
82 * HPTE --> PowerPC Hashed Page Table Entry
83 *
84 * Execution context:
85 * htab_initialize is called with the MMU off (of course), but
86 * the kernel has been copied down to zero so it can directly
87 * reference global data. At this point it is very difficult
88 * to print debug info.
89 *
90 */
91
799d6046
PM
92static unsigned long _SDR1;
93struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
e1802b06 94EXPORT_SYMBOL_GPL(mmu_psize_defs);
799d6046 95
8e561e7e 96struct hash_pte *htab_address;
337a7128 97unsigned long htab_size_bytes;
96e28449 98unsigned long htab_hash_mask;
4ab79aa8 99EXPORT_SYMBOL_GPL(htab_hash_mask);
3c726f8d 100int mmu_linear_psize = MMU_PAGE_4K;
8ca7a82f 101EXPORT_SYMBOL_GPL(mmu_linear_psize);
3c726f8d 102int mmu_virtual_psize = MMU_PAGE_4K;
bf72aeba 103int mmu_vmalloc_psize = MMU_PAGE_4K;
cec08e7a
BH
104#ifdef CONFIG_SPARSEMEM_VMEMMAP
105int mmu_vmemmap_psize = MMU_PAGE_4K;
106#endif
bf72aeba 107int mmu_io_psize = MMU_PAGE_4K;
1189be65 108int mmu_kernel_ssize = MMU_SEGSIZE_256M;
8ca7a82f 109EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
1189be65 110int mmu_highuser_ssize = MMU_SEGSIZE_256M;
584f8b71 111u16 mmu_slb_size = 64;
4ab79aa8 112EXPORT_SYMBOL_GPL(mmu_slb_size);
bf72aeba
PM
113#ifdef CONFIG_PPC_64K_PAGES
114int mmu_ci_restrictions;
115#endif
370a908d
BH
116#ifdef CONFIG_DEBUG_PAGEALLOC
117static u8 *linear_map_hash_slots;
118static unsigned long linear_map_hash_count;
ed166692 119static DEFINE_SPINLOCK(linear_map_hash_lock);
370a908d 120#endif /* CONFIG_DEBUG_PAGEALLOC */
1da177e4 121
3c726f8d
BH
122/* There are definitions of page sizes arrays to be used when none
123 * is provided by the firmware.
124 */
1da177e4 125
3c726f8d
BH
126/* Pre-POWER4 CPUs (4k pages only)
127 */
09de9ff8 128static struct mmu_psize_def mmu_psize_defaults_old[] = {
3c726f8d
BH
129 [MMU_PAGE_4K] = {
130 .shift = 12,
131 .sllp = 0,
b1022fbd 132 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
3c726f8d
BH
133 .avpnm = 0,
134 .tlbiel = 0,
135 },
136};
137
138/* POWER4, GPUL, POWER5
139 *
140 * Support for 16Mb large pages
141 */
09de9ff8 142static struct mmu_psize_def mmu_psize_defaults_gp[] = {
3c726f8d
BH
143 [MMU_PAGE_4K] = {
144 .shift = 12,
145 .sllp = 0,
b1022fbd 146 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
3c726f8d
BH
147 .avpnm = 0,
148 .tlbiel = 1,
149 },
150 [MMU_PAGE_16M] = {
151 .shift = 24,
152 .sllp = SLB_VSID_L,
b1022fbd
AK
153 .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
154 [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
3c726f8d
BH
155 .avpnm = 0x1UL,
156 .tlbiel = 0,
157 },
158};
159
dc47c0c1
AK
160/*
161 * 'R' and 'C' update notes:
162 * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
163 * create writeable HPTEs without C set, because the hcall H_PROTECT
164 * that we use in that case will not update C
165 * - The above is however not a problem, because we also don't do that
166 * fancy "no flush" variant of eviction and we use H_REMOVE which will
167 * do the right thing and thus we don't have the race I described earlier
168 *
169 * - Under bare metal, we do have the race, so we need R and C set
170 * - We make sure R is always set and never lost
171 * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
172 */
c6a3c495 173unsigned long htab_convert_pte_flags(unsigned long pteflags)
bc033b63 174{
c6a3c495 175 unsigned long rflags = 0;
bc033b63
BH
176
177 /* _PAGE_EXEC -> NOEXEC */
178 if ((pteflags & _PAGE_EXEC) == 0)
179 rflags |= HPTE_R_N;
c6a3c495 180 /*
e58e87ad 181 * PPP bits:
1ec3f937 182 * Linux uses slb key 0 for kernel and 1 for user.
e58e87ad
AK
183 * kernel RW areas are mapped with PPP=0b000
184 * User area is mapped with PPP=0b010 for read/write
185 * or PPP=0b011 for read-only (including writeable but clean pages).
bc033b63 186 */
e58e87ad
AK
187 if (pteflags & _PAGE_PRIVILEGED) {
188 /*
189 * Kernel read only mapped with ppp bits 0b110
190 */
191 if (!(pteflags & _PAGE_WRITE))
192 rflags |= (HPTE_R_PP0 | 0x2);
193 } else {
c7d54842
AK
194 if (pteflags & _PAGE_RWX)
195 rflags |= 0x2;
196 if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
c6a3c495
AK
197 rflags |= 0x1;
198 }
c8c06f5a 199 /*
dc47c0c1
AK
200 * We can't allow hardware to update hpte bits. Hence always
201 * set 'R' bit and set 'C' if it is a write fault
c8c06f5a 202 */
e568006b 203 rflags |= HPTE_R_R;
dc47c0c1
AK
204
205 if (pteflags & _PAGE_DIRTY)
206 rflags |= HPTE_R_C;
40e8550a
AK
207 /*
208 * Add in WIG bits
209 */
30bda41a
AK
210
211 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
40e8550a 212 rflags |= HPTE_R_I;
e568006b 213 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
30bda41a 214 rflags |= (HPTE_R_I | HPTE_R_G);
e568006b
AK
215 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
216 rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
217 else
218 /*
219 * Add memory coherence if cache inhibited is not set
220 */
221 rflags |= HPTE_R_M;
40e8550a
AK
222
223 return rflags;
bc033b63 224}
3c726f8d
BH
225
226int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
bc033b63 227 unsigned long pstart, unsigned long prot,
1189be65 228 int psize, int ssize)
1da177e4 229{
3c726f8d
BH
230 unsigned long vaddr, paddr;
231 unsigned int step, shift;
3c726f8d 232 int ret = 0;
1da177e4 233
3c726f8d
BH
234 shift = mmu_psize_defs[psize].shift;
235 step = 1 << shift;
1da177e4 236
bc033b63
BH
237 prot = htab_convert_pte_flags(prot);
238
239 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
240 vstart, vend, pstart, prot, psize, ssize);
241
3c726f8d
BH
242 for (vaddr = vstart, paddr = pstart; vaddr < vend;
243 vaddr += step, paddr += step) {
370a908d 244 unsigned long hash, hpteg;
1189be65 245 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
5524a27d 246 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
9e88ba4e
PM
247 unsigned long tprot = prot;
248
c60ac569
AK
249 /*
250 * If we hit a bad address return error.
251 */
252 if (!vsid)
253 return -1;
9e88ba4e 254 /* Make kernel text executable */
549e8152 255 if (overlaps_kernel_text(vaddr, vaddr + step))
9e88ba4e 256 tprot &= ~HPTE_R_N;
1da177e4 257
b18db0b8
AG
258 /* Make kvm guest trampolines executable */
259 if (overlaps_kvm_tmp(vaddr, vaddr + step))
260 tprot &= ~HPTE_R_N;
261
429d2e83
MS
262 /*
263 * If relocatable, check if it overlaps interrupt vectors that
264 * are copied down to real 0. For relocatable kernel
265 * (e.g. kdump case) we copy interrupt vectors down to real
266 * address 0. Mark that region as executable. This is
267 * because on p8 system with relocation on exception feature
268 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
269 * in order to execute the interrupt handlers in virtual
270 * mode the vector region need to be marked as executable.
271 */
272 if ((PHYSICAL_START > MEMORY_START) &&
273 overlaps_interrupt_vector_text(vaddr, vaddr + step))
274 tprot &= ~HPTE_R_N;
275
5524a27d 276 hash = hpt_hash(vpn, shift, ssize);
1da177e4
LT
277 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
278
c30a4df3 279 BUG_ON(!ppc_md.hpte_insert);
5524a27d 280 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
b1022fbd 281 HPTE_V_BOLTED, psize, psize, ssize);
c30a4df3 282
3c726f8d
BH
283 if (ret < 0)
284 break;
e7df0d88 285
370a908d 286#ifdef CONFIG_DEBUG_PAGEALLOC
e7df0d88
JK
287 if (debug_pagealloc_enabled() &&
288 (paddr >> PAGE_SHIFT) < linear_map_hash_count)
370a908d
BH
289 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
290#endif /* CONFIG_DEBUG_PAGEALLOC */
3c726f8d
BH
291 }
292 return ret < 0 ? ret : 0;
293}
1da177e4 294
ed5694a8 295int htab_remove_mapping(unsigned long vstart, unsigned long vend,
f8c8803b
BP
296 int psize, int ssize)
297{
298 unsigned long vaddr;
299 unsigned int step, shift;
27828f98
DG
300 int rc;
301 int ret = 0;
f8c8803b
BP
302
303 shift = mmu_psize_defs[psize].shift;
304 step = 1 << shift;
305
abd0a0e7
DG
306 if (!ppc_md.hpte_removebolted)
307 return -ENODEV;
f8c8803b 308
27828f98
DG
309 for (vaddr = vstart; vaddr < vend; vaddr += step) {
310 rc = ppc_md.hpte_removebolted(vaddr, psize, ssize);
311 if (rc == -ENOENT) {
312 ret = -ENOENT;
313 continue;
314 }
315 if (rc < 0)
316 return rc;
317 }
52db9b44 318
27828f98 319 return ret;
f8c8803b
BP
320}
321
faf78829
OH
322static bool disable_1tb_segments = false;
323
324static int __init parse_disable_1tb_segments(char *p)
325{
326 disable_1tb_segments = true;
327 return 0;
328}
329early_param("disable_1tb_segments", parse_disable_1tb_segments);
330
1189be65
PM
331static int __init htab_dt_scan_seg_sizes(unsigned long node,
332 const char *uname, int depth,
333 void *data)
334{
9d0c4dfe
RH
335 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
336 const __be32 *prop;
337 int size = 0;
1189be65
PM
338
339 /* We are scanning "cpu" nodes only */
340 if (type == NULL || strcmp(type, "cpu") != 0)
341 return 0;
342
12f04f2b 343 prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
1189be65
PM
344 if (prop == NULL)
345 return 0;
346 for (; size >= 4; size -= 4, ++prop) {
12f04f2b 347 if (be32_to_cpu(prop[0]) == 40) {
1189be65 348 DBG("1T segment support detected\n");
faf78829
OH
349
350 if (disable_1tb_segments) {
351 DBG("1T segments disabled by command line\n");
352 break;
353 }
354
44ae3ab3 355 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
f5534004 356 return 1;
1189be65 357 }
1189be65 358 }
44ae3ab3 359 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
1189be65
PM
360 return 0;
361}
362
363static void __init htab_init_seg_sizes(void)
364{
365 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
366}
367
b1022fbd
AK
368static int __init get_idx_from_shift(unsigned int shift)
369{
370 int idx = -1;
371
372 switch (shift) {
373 case 0xc:
374 idx = MMU_PAGE_4K;
375 break;
376 case 0x10:
377 idx = MMU_PAGE_64K;
378 break;
379 case 0x14:
380 idx = MMU_PAGE_1M;
381 break;
382 case 0x18:
383 idx = MMU_PAGE_16M;
384 break;
385 case 0x22:
386 idx = MMU_PAGE_16G;
387 break;
388 }
389 return idx;
390}
391
3c726f8d
BH
392static int __init htab_dt_scan_page_sizes(unsigned long node,
393 const char *uname, int depth,
394 void *data)
395{
9d0c4dfe
RH
396 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
397 const __be32 *prop;
398 int size = 0;
3c726f8d
BH
399
400 /* We are scanning "cpu" nodes only */
401 if (type == NULL || strcmp(type, "cpu") != 0)
402 return 0;
403
12f04f2b 404 prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
9e34992a
ME
405 if (!prop)
406 return 0;
407
408 pr_info("Page sizes from device-tree:\n");
409 size /= 4;
410 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
411 while(size > 0) {
412 unsigned int base_shift = be32_to_cpu(prop[0]);
413 unsigned int slbenc = be32_to_cpu(prop[1]);
414 unsigned int lpnum = be32_to_cpu(prop[2]);
415 struct mmu_psize_def *def;
416 int idx, base_idx;
417
418 size -= 3; prop += 3;
419 base_idx = get_idx_from_shift(base_shift);
420 if (base_idx < 0) {
421 /* skip the pte encoding also */
422 prop += lpnum * 2; size -= lpnum * 2;
423 continue;
424 }
425 def = &mmu_psize_defs[base_idx];
426 if (base_idx == MMU_PAGE_16M)
427 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
428
429 def->shift = base_shift;
430 if (base_shift <= 23)
431 def->avpnm = 0;
432 else
433 def->avpnm = (1 << (base_shift - 23)) - 1;
434 def->sllp = slbenc;
435 /*
436 * We don't know for sure what's up with tlbiel, so
437 * for now we only set it for 4K and 64K pages
438 */
439 if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
440 def->tlbiel = 1;
441 else
442 def->tlbiel = 0;
443
444 while (size > 0 && lpnum) {
445 unsigned int shift = be32_to_cpu(prop[0]);
446 int penc = be32_to_cpu(prop[1]);
447
448 prop += 2; size -= 2;
449 lpnum--;
450
451 idx = get_idx_from_shift(shift);
452 if (idx < 0)
b1022fbd 453 continue;
9e34992a
ME
454
455 if (penc == -1)
456 pr_err("Invalid penc for base_shift=%d "
457 "shift=%d\n", base_shift, shift);
458
459 def->penc[idx] = penc;
460 pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
461 " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
462 base_shift, shift, def->sllp,
463 def->avpnm, def->tlbiel, def->penc[idx]);
1da177e4 464 }
3c726f8d 465 }
9e34992a
ME
466
467 return 1;
3c726f8d
BH
468}
469
e16a9c09 470#ifdef CONFIG_HUGETLB_PAGE
658013e9
JT
471/* Scan for 16G memory blocks that have been set aside for huge pages
472 * and reserve those blocks for 16G huge pages.
473 */
474static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
475 const char *uname, int depth,
476 void *data) {
9d0c4dfe
RH
477 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
478 const __be64 *addr_prop;
479 const __be32 *page_count_prop;
658013e9
JT
480 unsigned int expected_pages;
481 long unsigned int phys_addr;
482 long unsigned int block_size;
483
484 /* We are scanning "memory" nodes only */
485 if (type == NULL || strcmp(type, "memory") != 0)
486 return 0;
487
488 /* This property is the log base 2 of the number of virtual pages that
489 * will represent this memory block. */
490 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
491 if (page_count_prop == NULL)
492 return 0;
12f04f2b 493 expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
658013e9
JT
494 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
495 if (addr_prop == NULL)
496 return 0;
12f04f2b
AB
497 phys_addr = be64_to_cpu(addr_prop[0]);
498 block_size = be64_to_cpu(addr_prop[1]);
658013e9
JT
499 if (block_size != (16 * GB))
500 return 0;
501 printk(KERN_INFO "Huge page(16GB) memory: "
502 "addr = 0x%lX size = 0x%lX pages = %d\n",
503 phys_addr, block_size, expected_pages);
95f72d1e
YL
504 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
505 memblock_reserve(phys_addr, block_size * expected_pages);
4792adba
JT
506 add_gpage(phys_addr, block_size, expected_pages);
507 }
658013e9
JT
508 return 0;
509}
e16a9c09 510#endif /* CONFIG_HUGETLB_PAGE */
658013e9 511
b1022fbd
AK
512static void mmu_psize_set_default_penc(void)
513{
514 int bpsize, apsize;
515 for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
516 for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
517 mmu_psize_defs[bpsize].penc[apsize] = -1;
518}
519
9048e648
AG
520#ifdef CONFIG_PPC_64K_PAGES
521
522static bool might_have_hea(void)
523{
524 /*
525 * The HEA ethernet adapter requires awareness of the
526 * GX bus. Without that awareness we can easily assume
527 * we will never see an HEA ethernet device.
528 */
529#ifdef CONFIG_IBMEBUS
2b4e3ad8
BH
530 return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
531 !firmware_has_feature(FW_FEATURE_SPLPAR);
9048e648
AG
532#else
533 return false;
534#endif
535}
536
537#endif /* #ifdef CONFIG_PPC_64K_PAGES */
538
3c726f8d
BH
539static void __init htab_init_page_sizes(void)
540{
541 int rc;
542
b1022fbd
AK
543 /* se the invalid penc to -1 */
544 mmu_psize_set_default_penc();
545
3c726f8d
BH
546 /* Default to 4K pages only */
547 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
548 sizeof(mmu_psize_defaults_old));
549
550 /*
551 * Try to find the available page sizes in the device-tree
552 */
553 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
554 if (rc != 0) /* Found */
555 goto found;
556
557 /*
558 * Not in the device-tree, let's fallback on known size
559 * list for 16M capable GP & GR
560 */
44ae3ab3 561 if (mmu_has_feature(MMU_FTR_16M_PAGE))
3c726f8d
BH
562 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
563 sizeof(mmu_psize_defaults_gp));
e7df0d88
JK
564found:
565 if (!debug_pagealloc_enabled()) {
566 /*
567 * Pick a size for the linear mapping. Currently, we only
568 * support 16M, 1M and 4K which is the default
569 */
570 if (mmu_psize_defs[MMU_PAGE_16M].shift)
571 mmu_linear_psize = MMU_PAGE_16M;
572 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
573 mmu_linear_psize = MMU_PAGE_1M;
574 }
3c726f8d 575
bf72aeba 576#ifdef CONFIG_PPC_64K_PAGES
3c726f8d
BH
577 /*
578 * Pick a size for the ordinary pages. Default is 4K, we support
bf72aeba
PM
579 * 64K for user mappings and vmalloc if supported by the processor.
580 * We only use 64k for ioremap if the processor
581 * (and firmware) support cache-inhibited large pages.
582 * If not, we use 4k and set mmu_ci_restrictions so that
583 * hash_page knows to switch processes that use cache-inhibited
584 * mappings to 4k pages.
3c726f8d 585 */
bf72aeba 586 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
3c726f8d 587 mmu_virtual_psize = MMU_PAGE_64K;
bf72aeba 588 mmu_vmalloc_psize = MMU_PAGE_64K;
370a908d
BH
589 if (mmu_linear_psize == MMU_PAGE_4K)
590 mmu_linear_psize = MMU_PAGE_64K;
44ae3ab3 591 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
cfe666b1 592 /*
9048e648
AG
593 * When running on pSeries using 64k pages for ioremap
594 * would stop us accessing the HEA ethernet. So if we
595 * have the chance of ever seeing one, stay at 4k.
cfe666b1 596 */
2b4e3ad8 597 if (!might_have_hea())
cfe666b1
PM
598 mmu_io_psize = MMU_PAGE_64K;
599 } else
bf72aeba
PM
600 mmu_ci_restrictions = 1;
601 }
370a908d 602#endif /* CONFIG_PPC_64K_PAGES */
3c726f8d 603
cec08e7a
BH
604#ifdef CONFIG_SPARSEMEM_VMEMMAP
605 /* We try to use 16M pages for vmemmap if that is supported
606 * and we have at least 1G of RAM at boot
607 */
608 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
95f72d1e 609 memblock_phys_mem_size() >= 0x40000000)
cec08e7a
BH
610 mmu_vmemmap_psize = MMU_PAGE_16M;
611 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
612 mmu_vmemmap_psize = MMU_PAGE_64K;
613 else
614 mmu_vmemmap_psize = MMU_PAGE_4K;
615#endif /* CONFIG_SPARSEMEM_VMEMMAP */
616
bf72aeba 617 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
cec08e7a
BH
618 "virtual = %d, io = %d"
619#ifdef CONFIG_SPARSEMEM_VMEMMAP
620 ", vmemmap = %d"
621#endif
622 "\n",
3c726f8d 623 mmu_psize_defs[mmu_linear_psize].shift,
bf72aeba 624 mmu_psize_defs[mmu_virtual_psize].shift,
cec08e7a
BH
625 mmu_psize_defs[mmu_io_psize].shift
626#ifdef CONFIG_SPARSEMEM_VMEMMAP
627 ,mmu_psize_defs[mmu_vmemmap_psize].shift
628#endif
629 );
3c726f8d
BH
630
631#ifdef CONFIG_HUGETLB_PAGE
658013e9
JT
632 /* Reserve 16G huge page memory sections for huge pages */
633 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
3c726f8d
BH
634#endif /* CONFIG_HUGETLB_PAGE */
635}
636
637static int __init htab_dt_scan_pftsize(unsigned long node,
638 const char *uname, int depth,
639 void *data)
640{
9d0c4dfe
RH
641 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
642 const __be32 *prop;
3c726f8d
BH
643
644 /* We are scanning "cpu" nodes only */
645 if (type == NULL || strcmp(type, "cpu") != 0)
646 return 0;
647
12f04f2b 648 prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
3c726f8d
BH
649 if (prop != NULL) {
650 /* pft_size[0] is the NUMA CEC cookie */
12f04f2b 651 ppc64_pft_size = be32_to_cpu(prop[1]);
3c726f8d 652 return 1;
1da177e4 653 }
3c726f8d 654 return 0;
1da177e4
LT
655}
656
5c3c7ede 657unsigned htab_shift_for_mem_size(unsigned long mem_size)
3eac8c69 658{
5c3c7ede
DG
659 unsigned memshift = __ilog2(mem_size);
660 unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift;
661 unsigned pteg_shift;
662
663 /* round mem_size up to next power of 2 */
664 if ((1UL << memshift) < mem_size)
665 memshift += 1;
3eac8c69 666
5c3c7ede
DG
667 /* aim for 2 pages / pteg */
668 pteg_shift = memshift - (pshift + 1);
3eac8c69 669
5c3c7ede
DG
670 /*
671 * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab
672 * size permitted by the architecture.
673 */
674 return max(pteg_shift + 7, 18U);
675}
676
677static unsigned long __init htab_get_table_size(void)
678{
3c726f8d 679 /* If hash size isn't already provided by the platform, we try to
943ffb58 680 * retrieve it from the device-tree. If it's not there neither, we
3c726f8d 681 * calculate it now based on the total RAM size
3eac8c69 682 */
3c726f8d
BH
683 if (ppc64_pft_size == 0)
684 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
3eac8c69
PM
685 if (ppc64_pft_size)
686 return 1UL << ppc64_pft_size;
687
5c3c7ede 688 return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size());
3eac8c69
PM
689}
690
54b79248 691#ifdef CONFIG_MEMORY_HOTPLUG
a1194097 692int create_section_mapping(unsigned long start, unsigned long end)
54b79248 693{
1dace6c6
DG
694 int rc = htab_bolt_mapping(start, end, __pa(start),
695 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
696 mmu_kernel_ssize);
697
698 if (rc < 0) {
699 int rc2 = htab_remove_mapping(start, end, mmu_linear_psize,
700 mmu_kernel_ssize);
701 BUG_ON(rc2 && (rc2 != -ENOENT));
702 }
703 return rc;
54b79248 704}
f8c8803b 705
52db9b44 706int remove_section_mapping(unsigned long start, unsigned long end)
f8c8803b 707{
abd0a0e7
DG
708 int rc = htab_remove_mapping(start, end, mmu_linear_psize,
709 mmu_kernel_ssize);
710 WARN_ON(rc < 0);
711 return rc;
f8c8803b 712}
54b79248
MK
713#endif /* CONFIG_MEMORY_HOTPLUG */
714
50de596d 715static void __init hash_init_partition_table(phys_addr_t hash_table,
4b7a3504 716 unsigned long htab_size)
50de596d
AK
717{
718 unsigned long ps_field;
50de596d
AK
719 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
720
721 /*
722 * slb llp encoding for the page size used in VPM real mode.
723 * We can ignore that for lpid 0
724 */
725 ps_field = 0;
4b7a3504 726 htab_size = __ilog2(htab_size) - 18;
50de596d
AK
727
728 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
729 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
730 MEMBLOCK_ALLOC_ANYWHERE));
731
732 /* Initialize the Partition Table with no entries */
733 memset((void *)partition_tb, 0, patb_size);
734 partition_tb->patb0 = cpu_to_be64(ps_field | hash_table | htab_size);
735 /*
736 * FIXME!! This should be done via update_partition table
737 * For now UPRT is 0 for us.
738 */
739 partition_tb->patb1 = 0;
56547411 740 pr_info("Partition table %p\n", partition_tb);
50de596d
AK
741 /*
742 * update partition table control register,
743 * 64 K size.
744 */
745 mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
746
747}
748
757c74d2 749static void __init htab_initialize(void)
1da177e4 750{
337a7128 751 unsigned long table;
1da177e4 752 unsigned long pteg_count;
9e88ba4e 753 unsigned long prot;
5556ecf5 754 unsigned long base = 0, size = 0;
28be7072 755 struct memblock_region *reg;
3c726f8d 756
1da177e4
LT
757 DBG(" -> htab_initialize()\n");
758
1189be65
PM
759 /* Initialize segment sizes */
760 htab_init_seg_sizes();
761
3c726f8d
BH
762 /* Initialize page sizes */
763 htab_init_page_sizes();
764
44ae3ab3 765 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
1189be65
PM
766 mmu_kernel_ssize = MMU_SEGSIZE_1T;
767 mmu_highuser_ssize = MMU_SEGSIZE_1T;
768 printk(KERN_INFO "Using 1TB segments\n");
769 }
770
1da177e4
LT
771 /*
772 * Calculate the required size of the htab. We want the number of
773 * PTEGs to equal one half the number of real pages.
774 */
3c726f8d 775 htab_size_bytes = htab_get_table_size();
1da177e4
LT
776 pteg_count = htab_size_bytes >> 7;
777
1da177e4
LT
778 htab_hash_mask = pteg_count - 1;
779
5556ecf5
BH
780 if (firmware_has_feature(FW_FEATURE_LPAR) ||
781 firmware_has_feature(FW_FEATURE_PS3_LV1)) {
1da177e4
LT
782 /* Using a hypervisor which owns the htab */
783 htab_address = NULL;
784 _SDR1 = 0;
3ccc00a7
MS
785#ifdef CONFIG_FA_DUMP
786 /*
787 * If firmware assisted dump is active firmware preserves
788 * the contents of htab along with entire partition memory.
789 * Clear the htab if firmware assisted dump is active so
790 * that we dont end up using old mappings.
791 */
792 if (is_fadump_active() && ppc_md.hpte_clear_all)
793 ppc_md.hpte_clear_all();
794#endif
1da177e4 795 } else {
5556ecf5
BH
796 unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
797
798#ifdef CONFIG_PPC_CELL
799 /*
800 * Cell may require the hash table down low when using the
801 * Axon IOMMU in order to fit the dynamic region over it, see
802 * comments in cell/iommu.c
1da177e4 803 */
5556ecf5 804 if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) {
31bf1119 805 limit = 0x80000000;
5556ecf5
BH
806 pr_info("Hash table forced below 2G for Axon IOMMU\n");
807 }
808#endif /* CONFIG_PPC_CELL */
41d824bf 809
5556ecf5
BH
810 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
811 limit);
1da177e4
LT
812
813 DBG("Hash table allocated at %lx, size: %lx\n", table,
814 htab_size_bytes);
815
70267a7f 816 htab_address = __va(table);
1da177e4
LT
817
818 /* htab absolute addr + encoded htabsize */
4b7a3504 819 _SDR1 = table + __ilog2(htab_size_bytes) - 18;
1da177e4
LT
820
821 /* Initialize the HPT with no entries */
822 memset((void *)table, 0, htab_size_bytes);
799d6046 823
50de596d
AK
824 if (!cpu_has_feature(CPU_FTR_ARCH_300))
825 /* Set SDR1 */
826 mtspr(SPRN_SDR1, _SDR1);
827 else
4b7a3504 828 hash_init_partition_table(table, htab_size_bytes);
1da177e4
LT
829 }
830
f5ea64dc 831 prot = pgprot_val(PAGE_KERNEL);
1da177e4 832
370a908d 833#ifdef CONFIG_DEBUG_PAGEALLOC
e7df0d88
JK
834 if (debug_pagealloc_enabled()) {
835 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
836 linear_map_hash_slots = __va(memblock_alloc_base(
837 linear_map_hash_count, 1, ppc64_rma_size));
838 memset(linear_map_hash_slots, 0, linear_map_hash_count);
839 }
370a908d
BH
840#endif /* CONFIG_DEBUG_PAGEALLOC */
841
1da177e4
LT
842 /* On U3 based machines, we need to reserve the DART area and
843 * _NOT_ map it to avoid cache paradoxes as it's remapped non
844 * cacheable later on
845 */
1da177e4
LT
846
847 /* create bolted the linear mapping in the hash table */
28be7072
BH
848 for_each_memblock(memory, reg) {
849 base = (unsigned long)__va(reg->base);
850 size = reg->size;
1da177e4 851
5c339919 852 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
9e88ba4e 853 base, size, prot);
1da177e4 854
caf80e57 855 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
9e88ba4e 856 prot, mmu_linear_psize, mmu_kernel_ssize));
e63075a3
BH
857 }
858 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
1da177e4
LT
859
860 /*
861 * If we have a memory_limit and we've allocated TCEs then we need to
862 * explicitly map the TCE area at the top of RAM. We also cope with the
863 * case that the TCEs start below memory_limit.
864 * tce_alloc_start/end are 16MB aligned so the mapping should work
865 * for either 4K or 16MB pages.
866 */
867 if (tce_alloc_start) {
b5666f70
ME
868 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
869 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
1da177e4
LT
870
871 if (base + size >= tce_alloc_start)
872 tce_alloc_start = base + size + 1;
873
caf80e57 874 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
bc033b63 875 __pa(tce_alloc_start), prot,
1189be65 876 mmu_linear_psize, mmu_kernel_ssize));
1da177e4
LT
877 }
878
7d0daae4 879
1da177e4
LT
880 DBG(" <- htab_initialize()\n");
881}
882#undef KB
883#undef MB
1da177e4 884
166dd7d3
BH
885void __init __weak hpte_init_lpar(void)
886{
887 panic("FW_FEATURE_LPAR set but no LPAR support compiled\n");
888}
889
756d08d1 890void __init hash__early_init_mmu(void)
799d6046 891{
dd1842a2
AK
892 /*
893 * initialize page table size
894 */
5ed7ecd0
AK
895 __pte_frag_nr = H_PTE_FRAG_NR;
896 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
897
dd1842a2
AK
898 __pte_index_size = H_PTE_INDEX_SIZE;
899 __pmd_index_size = H_PMD_INDEX_SIZE;
900 __pud_index_size = H_PUD_INDEX_SIZE;
901 __pgd_index_size = H_PGD_INDEX_SIZE;
902 __pmd_cache_index = H_PMD_CACHE_INDEX;
903 __pte_table_size = H_PTE_TABLE_SIZE;
904 __pmd_table_size = H_PMD_TABLE_SIZE;
905 __pud_table_size = H_PUD_TABLE_SIZE;
906 __pgd_table_size = H_PGD_TABLE_SIZE;
a2f41eb9
AK
907 /*
908 * 4k use hugepd format, so for hash set then to
909 * zero
910 */
911 __pmd_val_bits = 0;
912 __pud_val_bits = 0;
913 __pgd_val_bits = 0;
d6a9996e
AK
914
915 __kernel_virt_start = H_KERN_VIRT_START;
916 __kernel_virt_size = H_KERN_VIRT_SIZE;
917 __vmalloc_start = H_VMALLOC_START;
918 __vmalloc_end = H_VMALLOC_END;
919 vmemmap = (struct page *)H_VMEMMAP_BASE;
920 ioremap_bot = IOREMAP_BASE;
921
bfa37087
DS
922#ifdef CONFIG_PCI
923 pci_io_base = ISA_IO_BASE;
924#endif
925
166dd7d3
BH
926 /* Select appropriate backend */
927 if (firmware_has_feature(FW_FEATURE_PS3_LV1))
928 ps3_early_mm_init();
929 else if (firmware_has_feature(FW_FEATURE_LPAR))
930 hpte_init_lpar();
931 else
932 hpte_init_native();
933
757c74d2 934 /* Initialize the MMU Hash table and create the linear mapping
376af594
ME
935 * of memory. Has to be done before SLB initialization as this is
936 * currently where the page size encoding is obtained.
757c74d2
BH
937 */
938 htab_initialize();
939
56547411 940 pr_info("Initializing hash mmu with SLB\n");
376af594 941 /* Initialize SLB management */
13b3d13b 942 slb_initialize();
757c74d2
BH
943}
944
945#ifdef CONFIG_SMP
756d08d1 946void hash__early_init_mmu_secondary(void)
757c74d2
BH
947{
948 /* Initialize hash table for that CPU */
b5dcc609
AK
949 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
950 if (!cpu_has_feature(CPU_FTR_ARCH_300))
951 mtspr(SPRN_SDR1, _SDR1);
952 else
953 mtspr(SPRN_PTCR,
954 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
955 }
376af594 956 /* Initialize SLB */
13b3d13b 957 slb_initialize();
799d6046 958}
757c74d2 959#endif /* CONFIG_SMP */
799d6046 960
1da177e4
LT
961/*
962 * Called by asm hashtable.S for doing lazy icache flush
963 */
964unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
965{
966 struct page *page;
967
76c8e25b
BH
968 if (!pfn_valid(pte_pfn(pte)))
969 return pp;
970
1da177e4
LT
971 page = pte_page(pte);
972
973 /* page is dirty */
974 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
975 if (trap == 0x400) {
0895ecda 976 flush_dcache_icache_page(page);
1da177e4
LT
977 set_bit(PG_arch_1, &page->flags);
978 } else
3c726f8d 979 pp |= HPTE_R_N;
1da177e4
LT
980 }
981 return pp;
982}
983
3a8247cc 984#ifdef CONFIG_PPC_MM_SLICES
e51df2c1 985static unsigned int get_paca_psize(unsigned long addr)
3a8247cc 986{
7aa0727f
AK
987 u64 lpsizes;
988 unsigned char *hpsizes;
989 unsigned long index, mask_index;
3a8247cc
PM
990
991 if (addr < SLICE_LOW_TOP) {
2fc251a8 992 lpsizes = get_paca()->mm_ctx_low_slices_psize;
3a8247cc 993 index = GET_LOW_SLICE_INDEX(addr);
7aa0727f 994 return (lpsizes >> (index * 4)) & 0xF;
3a8247cc 995 }
2fc251a8 996 hpsizes = get_paca()->mm_ctx_high_slices_psize;
7aa0727f
AK
997 index = GET_HIGH_SLICE_INDEX(addr);
998 mask_index = index & 0x1;
999 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
3a8247cc
PM
1000}
1001
1002#else
1003unsigned int get_paca_psize(unsigned long addr)
1004{
c33e54fa 1005 return get_paca()->mm_ctx_user_psize;
3a8247cc
PM
1006}
1007#endif
1008
721151d0
PM
1009/*
1010 * Demote a segment to using 4k pages.
1011 * For now this makes the whole process use 4k pages.
1012 */
721151d0 1013#ifdef CONFIG_PPC_64K_PAGES
fa28237c 1014void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
16f1c746 1015{
3a8247cc 1016 if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
721151d0 1017 return;
3a8247cc 1018 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
be3ebfe8 1019 copro_flush_all_slbs(mm);
a1dca346 1020 if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
c395465d
MN
1021
1022 copy_mm_to_paca(&mm->context);
fa28237c
PM
1023 slb_flush_and_rebolt();
1024 }
721151d0 1025}
16f1c746 1026#endif /* CONFIG_PPC_64K_PAGES */
721151d0 1027
fa28237c
PM
1028#ifdef CONFIG_PPC_SUBPAGE_PROT
1029/*
1030 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
1031 * Userspace sets the subpage permissions using the subpage_prot system call.
1032 *
1033 * Result is 0: full permissions, _PAGE_RW: read-only,
73a1441a 1034 * _PAGE_RWX: no access.
fa28237c 1035 */
d28513bc 1036static int subpage_protection(struct mm_struct *mm, unsigned long ea)
fa28237c 1037{
d28513bc 1038 struct subpage_prot_table *spt = &mm->context.spt;
fa28237c
PM
1039 u32 spp = 0;
1040 u32 **sbpm, *sbpp;
1041
1042 if (ea >= spt->maxaddr)
1043 return 0;
b0d436c7 1044 if (ea < 0x100000000UL) {
fa28237c
PM
1045 /* addresses below 4GB use spt->low_prot */
1046 sbpm = spt->low_prot;
1047 } else {
1048 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
1049 if (!sbpm)
1050 return 0;
1051 }
1052 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
1053 if (!sbpp)
1054 return 0;
1055 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
1056
1057 /* extract 2-bit bitfield for this 4k subpage */
1058 spp >>= 30 - 2 * ((ea >> 12) & 0xf);
1059
73a1441a
AK
1060 /*
1061 * 0 -> full premission
1062 * 1 -> Read only
1063 * 2 -> no access.
1064 * We return the flag that need to be cleared.
1065 */
1066 spp = ((spp & 2) ? _PAGE_RWX : 0) | ((spp & 1) ? _PAGE_WRITE : 0);
fa28237c
PM
1067 return spp;
1068}
1069
1070#else /* CONFIG_PPC_SUBPAGE_PROT */
d28513bc 1071static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
fa28237c
PM
1072{
1073 return 0;
1074}
1075#endif
1076
4b8692c0
BH
1077void hash_failure_debug(unsigned long ea, unsigned long access,
1078 unsigned long vsid, unsigned long trap,
d8139ebf 1079 int ssize, int psize, int lpsize, unsigned long pte)
4b8692c0
BH
1080{
1081 if (!printk_ratelimit())
1082 return;
1083 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
1084 ea, access, current->comm);
d8139ebf
AK
1085 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
1086 trap, vsid, ssize, psize, lpsize, pte);
4b8692c0
BH
1087}
1088
09567e7f
ME
1089static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
1090 int psize, bool user_region)
1091{
1092 if (user_region) {
1093 if (psize != get_paca_psize(ea)) {
c395465d 1094 copy_mm_to_paca(&mm->context);
09567e7f
ME
1095 slb_flush_and_rebolt();
1096 }
1097 } else if (get_paca()->vmalloc_sllp !=
1098 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
1099 get_paca()->vmalloc_sllp =
1100 mmu_psize_defs[mmu_vmalloc_psize].sllp;
1101 slb_vmalloc_update();
1102 }
1103}
1104
1da177e4
LT
1105/* Result code is:
1106 * 0 - handled
1107 * 1 - normal page fault
1108 * -1 - critical hash insertion error
fa28237c 1109 * -2 - access not permitted by subpage protection mechanism
1da177e4 1110 */
aefa5688
AK
1111int hash_page_mm(struct mm_struct *mm, unsigned long ea,
1112 unsigned long access, unsigned long trap,
1113 unsigned long flags)
1da177e4 1114{
891121e6 1115 bool is_thp;
ba12eede 1116 enum ctx_state prev_state = exception_enter();
a1128f8f 1117 pgd_t *pgdir;
1da177e4 1118 unsigned long vsid;
1da177e4 1119 pte_t *ptep;
a4fe3ce7 1120 unsigned hugeshift;
56aa4129 1121 const struct cpumask *tmp;
aefa5688 1122 int rc, user_region = 0;
1189be65 1123 int psize, ssize;
1da177e4 1124
3c726f8d
BH
1125 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
1126 ea, access, trap);
cfcb3d80 1127 trace_hash_fault(ea, access, trap);
1f8d419e 1128
3c726f8d 1129 /* Get region & vsid */
1da177e4
LT
1130 switch (REGION_ID(ea)) {
1131 case USER_REGION_ID:
1132 user_region = 1;
3c726f8d
BH
1133 if (! mm) {
1134 DBG_LOW(" user region with no mm !\n");
ba12eede
LZ
1135 rc = 1;
1136 goto bail;
3c726f8d 1137 }
16c2d476 1138 psize = get_slice_psize(mm, ea);
1189be65
PM
1139 ssize = user_segment_size(ea);
1140 vsid = get_vsid(mm->context.id, ea, ssize);
1da177e4 1141 break;
1da177e4 1142 case VMALLOC_REGION_ID:
1189be65 1143 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
bf72aeba
PM
1144 if (ea < VMALLOC_END)
1145 psize = mmu_vmalloc_psize;
1146 else
1147 psize = mmu_io_psize;
1189be65 1148 ssize = mmu_kernel_ssize;
1da177e4 1149 break;
1da177e4
LT
1150 default:
1151 /* Not a valid range
1152 * Send the problem up to do_page_fault
1153 */
ba12eede
LZ
1154 rc = 1;
1155 goto bail;
1da177e4 1156 }
3c726f8d 1157 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
1da177e4 1158
c60ac569
AK
1159 /* Bad address. */
1160 if (!vsid) {
1161 DBG_LOW("Bad address!\n");
ba12eede
LZ
1162 rc = 1;
1163 goto bail;
c60ac569 1164 }
3c726f8d 1165 /* Get pgdir */
1da177e4 1166 pgdir = mm->pgd;
ba12eede
LZ
1167 if (pgdir == NULL) {
1168 rc = 1;
1169 goto bail;
1170 }
1da177e4 1171
3c726f8d 1172 /* Check CPU locality */
56aa4129
RR
1173 tmp = cpumask_of(smp_processor_id());
1174 if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
aefa5688 1175 flags |= HPTE_LOCAL_UPDATE;
1da177e4 1176
16c2d476 1177#ifndef CONFIG_PPC_64K_PAGES
a4fe3ce7
DG
1178 /* If we use 4K pages and our psize is not 4K, then we might
1179 * be hitting a special driver mapping, and need to align the
1180 * address before we fetch the PTE.
1181 *
1182 * It could also be a hugepage mapping, in which case this is
1183 * not necessary, but it's not harmful, either.
16c2d476
BH
1184 */
1185 if (psize != MMU_PAGE_4K)
1186 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
1187#endif /* CONFIG_PPC_64K_PAGES */
1188
3c726f8d 1189 /* Get PTE and page size from page tables */
891121e6 1190 ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift);
3c726f8d
BH
1191 if (ptep == NULL || !pte_present(*ptep)) {
1192 DBG_LOW(" no PTE !\n");
ba12eede
LZ
1193 rc = 1;
1194 goto bail;
3c726f8d
BH
1195 }
1196
ca91e6c0
BH
1197 /* Add _PAGE_PRESENT to the required access perm */
1198 access |= _PAGE_PRESENT;
1199
1200 /* Pre-check access permissions (will be re-checked atomically
1201 * in __hash_page_XX but this pre-check is a fast path
1202 */
ac29c640 1203 if (!check_pte_access(access, pte_val(*ptep))) {
ca91e6c0 1204 DBG_LOW(" no access !\n");
ba12eede
LZ
1205 rc = 1;
1206 goto bail;
ca91e6c0
BH
1207 }
1208
ba12eede 1209 if (hugeshift) {
891121e6 1210 if (is_thp)
6d492ecc 1211 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
aefa5688 1212 trap, flags, ssize, psize);
6d492ecc
AK
1213#ifdef CONFIG_HUGETLB_PAGE
1214 else
1215 rc = __hash_page_huge(ea, access, vsid, ptep, trap,
aefa5688 1216 flags, ssize, hugeshift, psize);
6d492ecc
AK
1217#else
1218 else {
1219 /*
1220 * if we have hugeshift, and is not transhuge with
1221 * hugetlb disabled, something is really wrong.
1222 */
1223 rc = 1;
1224 WARN_ON(1);
1225 }
1226#endif
a1dca346
IM
1227 if (current->mm == mm)
1228 check_paca_psize(ea, mm, psize, user_region);
09567e7f 1229
ba12eede
LZ
1230 goto bail;
1231 }
a4fe3ce7 1232
3c726f8d
BH
1233#ifndef CONFIG_PPC_64K_PAGES
1234 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
1235#else
1236 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
1237 pte_val(*(ptep + PTRS_PER_PTE)));
1238#endif
3c726f8d 1239 /* Do actual hashing */
16c2d476 1240#ifdef CONFIG_PPC_64K_PAGES
945537df
AK
1241 /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */
1242 if ((pte_val(*ptep) & H_PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
721151d0
PM
1243 demote_segment_4k(mm, ea);
1244 psize = MMU_PAGE_4K;
1245 }
1246
16f1c746
BH
1247 /* If this PTE is non-cacheable and we have restrictions on
1248 * using non cacheable large pages, then we switch to 4k
1249 */
30bda41a 1250 if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
16f1c746
BH
1251 if (user_region) {
1252 demote_segment_4k(mm, ea);
1253 psize = MMU_PAGE_4K;
1254 } else if (ea < VMALLOC_END) {
1255 /*
1256 * some driver did a non-cacheable mapping
1257 * in vmalloc space, so switch vmalloc
1258 * to 4k pages
1259 */
1260 printk(KERN_ALERT "Reducing vmalloc segment "
1261 "to 4kB pages because of "
1262 "non-cacheable mapping\n");
1263 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
be3ebfe8 1264 copro_flush_all_slbs(mm);
bf72aeba 1265 }
16f1c746 1266 }
09567e7f 1267
0863d7f2
AK
1268#endif /* CONFIG_PPC_64K_PAGES */
1269
a1dca346
IM
1270 if (current->mm == mm)
1271 check_paca_psize(ea, mm, psize, user_region);
16f1c746 1272
73b341ef 1273#ifdef CONFIG_PPC_64K_PAGES
bf72aeba 1274 if (psize == MMU_PAGE_64K)
aefa5688
AK
1275 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1276 flags, ssize);
3c726f8d 1277 else
73b341ef 1278#endif /* CONFIG_PPC_64K_PAGES */
fa28237c 1279 {
a1128f8f 1280 int spp = subpage_protection(mm, ea);
fa28237c
PM
1281 if (access & spp)
1282 rc = -2;
1283 else
1284 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
aefa5688 1285 flags, ssize, spp);
fa28237c 1286 }
3c726f8d 1287
4b8692c0
BH
1288 /* Dump some info in case of hash insertion failure, they should
1289 * never happen so it is really useful to know if/when they do
1290 */
1291 if (rc == -1)
1292 hash_failure_debug(ea, access, vsid, trap, ssize, psize,
d8139ebf 1293 psize, pte_val(*ptep));
3c726f8d
BH
1294#ifndef CONFIG_PPC_64K_PAGES
1295 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1296#else
1297 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1298 pte_val(*(ptep + PTRS_PER_PTE)));
1299#endif
1300 DBG_LOW(" -> rc=%d\n", rc);
ba12eede
LZ
1301
1302bail:
1303 exception_exit(prev_state);
3c726f8d 1304 return rc;
1da177e4 1305}
a1dca346
IM
1306EXPORT_SYMBOL_GPL(hash_page_mm);
1307
aefa5688
AK
1308int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
1309 unsigned long dsisr)
a1dca346 1310{
aefa5688 1311 unsigned long flags = 0;
a1dca346
IM
1312 struct mm_struct *mm = current->mm;
1313
1314 if (REGION_ID(ea) == VMALLOC_REGION_ID)
1315 mm = &init_mm;
1316
aefa5688
AK
1317 if (dsisr & DSISR_NOHPTE)
1318 flags |= HPTE_NOHPTE_UPDATE;
1319
1320 return hash_page_mm(mm, ea, access, trap, flags);
a1dca346 1321}
67207b96 1322EXPORT_SYMBOL_GPL(hash_page);
1da177e4 1323
106713a1
AK
1324int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
1325 unsigned long dsisr)
1326{
c7d54842 1327 unsigned long access = _PAGE_PRESENT | _PAGE_READ;
106713a1
AK
1328 unsigned long flags = 0;
1329 struct mm_struct *mm = current->mm;
1330
1331 if (REGION_ID(ea) == VMALLOC_REGION_ID)
1332 mm = &init_mm;
1333
1334 if (dsisr & DSISR_NOHPTE)
1335 flags |= HPTE_NOHPTE_UPDATE;
1336
1337 if (dsisr & DSISR_ISSTORE)
c7d54842 1338 access |= _PAGE_WRITE;
106713a1 1339 /*
ac29c640
AK
1340 * We set _PAGE_PRIVILEGED only when
1341 * kernel mode access kernel space.
1342 *
1343 * _PAGE_PRIVILEGED is NOT set
1344 * 1) when kernel mode access user space
1345 * 2) user space access kernel space.
106713a1 1346 */
ac29c640 1347 access |= _PAGE_PRIVILEGED;
106713a1 1348 if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
ac29c640 1349 access &= ~_PAGE_PRIVILEGED;
106713a1
AK
1350
1351 if (trap == 0x400)
1352 access |= _PAGE_EXEC;
1353
1354 return hash_page_mm(mm, ea, access, trap, flags);
1355}
1356
8bbc9b7b
ME
1357#ifdef CONFIG_PPC_MM_SLICES
1358static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
1359{
aac55d75
ME
1360 int psize = get_slice_psize(mm, ea);
1361
8bbc9b7b 1362 /* We only prefault standard pages for now */
aac55d75
ME
1363 if (unlikely(psize != mm->context.user_psize))
1364 return false;
1365
1366 /*
1367 * Don't prefault if subpage protection is enabled for the EA.
1368 */
1369 if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea)))
8bbc9b7b
ME
1370 return false;
1371
1372 return true;
1373}
1374#else
1375static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
1376{
1377 return true;
1378}
1379#endif
1380
3c726f8d
BH
1381void hash_preload(struct mm_struct *mm, unsigned long ea,
1382 unsigned long access, unsigned long trap)
1da177e4 1383{
12bc9f6f 1384 int hugepage_shift;
3c726f8d 1385 unsigned long vsid;
0b97fee0 1386 pgd_t *pgdir;
3c726f8d 1387 pte_t *ptep;
3c726f8d 1388 unsigned long flags;
aefa5688 1389 int rc, ssize, update_flags = 0;
3c726f8d 1390
d0f13e3c
BH
1391 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1392
8bbc9b7b 1393 if (!should_hash_preload(mm, ea))
3c726f8d
BH
1394 return;
1395
1396 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1397 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1da177e4 1398
16f1c746 1399 /* Get Linux PTE if available */
3c726f8d
BH
1400 pgdir = mm->pgd;
1401 if (pgdir == NULL)
1402 return;
0ac52dd7
AK
1403
1404 /* Get VSID */
1405 ssize = user_segment_size(ea);
1406 vsid = get_vsid(mm->context.id, ea, ssize);
1407 if (!vsid)
1408 return;
1409 /*
1410 * Hash doesn't like irqs. Walking linux page table with irq disabled
1411 * saves us from holding multiple locks.
1412 */
1413 local_irq_save(flags);
1414
12bc9f6f
AK
1415 /*
1416 * THP pages use update_mmu_cache_pmd. We don't do
1417 * hash preload there. Hence can ignore THP here
1418 */
891121e6 1419 ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift);
3c726f8d 1420 if (!ptep)
0ac52dd7 1421 goto out_exit;
16f1c746 1422
12bc9f6f 1423 WARN_ON(hugepage_shift);
16f1c746 1424#ifdef CONFIG_PPC_64K_PAGES
945537df 1425 /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
16f1c746
BH
1426 * a 64K kernel), then we don't preload, hash_page() will take
1427 * care of it once we actually try to access the page.
1428 * That way we don't have to duplicate all of the logic for segment
1429 * page size demotion here
1430 */
945537df 1431 if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
0ac52dd7 1432 goto out_exit;
16f1c746
BH
1433#endif /* CONFIG_PPC_64K_PAGES */
1434
16c2d476 1435 /* Is that local to this CPU ? */
56aa4129 1436 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
aefa5688 1437 update_flags |= HPTE_LOCAL_UPDATE;
16c2d476
BH
1438
1439 /* Hash it in */
73b341ef 1440#ifdef CONFIG_PPC_64K_PAGES
bf72aeba 1441 if (mm->context.user_psize == MMU_PAGE_64K)
aefa5688
AK
1442 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1443 update_flags, ssize);
1da177e4 1444 else
73b341ef 1445#endif /* CONFIG_PPC_64K_PAGES */
aefa5688
AK
1446 rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
1447 ssize, subpage_protection(mm, ea));
4b8692c0
BH
1448
1449 /* Dump some info in case of hash insertion failure, they should
1450 * never happen so it is really useful to know if/when they do
1451 */
1452 if (rc == -1)
1453 hash_failure_debug(ea, access, vsid, trap, ssize,
d8139ebf
AK
1454 mm->context.user_psize,
1455 mm->context.user_psize,
1456 pte_val(*ptep));
0ac52dd7 1457out_exit:
3c726f8d
BH
1458 local_irq_restore(flags);
1459}
1460
f6ab0b92
BH
1461/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1462 * do not forget to update the assembly call site !
1463 */
5524a27d 1464void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
aefa5688 1465 unsigned long flags)
3c726f8d
BH
1466{
1467 unsigned long hash, index, shift, hidx, slot;
aefa5688 1468 int local = flags & HPTE_LOCAL_UPDATE;
3c726f8d 1469
5524a27d
AK
1470 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1471 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1472 hash = hpt_hash(vpn, shift, ssize);
3c726f8d
BH
1473 hidx = __rpte_to_hidx(pte, index);
1474 if (hidx & _PTEIDX_SECONDARY)
1475 hash = ~hash;
1476 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1477 slot += hidx & _PTEIDX_GROUP_IX;
5c339919 1478 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
db3d8534
AK
1479 /*
1480 * We use same base page size and actual psize, because we don't
1481 * use these functions for hugepage
1482 */
1483 ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
3c726f8d 1484 } pte_iterate_hashed_end();
bc2a9408
MN
1485
1486#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1487 /* Transactions are not aborted by tlbiel, only tlbie.
1488 * Without, syncing a page back to a block device w/ PIO could pick up
1489 * transactional data (bad!) so we force an abort here. Before the
1490 * sync the page will be made read-only, which will flush_hash_page.
1491 * BIG ISSUE here: if the kernel uses a page from userspace without
1492 * unmapping it first, it may see the speculated version.
1493 */
1494 if (local && cpu_has_feature(CPU_FTR_TM) &&
c2fd22df 1495 current->thread.regs &&
bc2a9408
MN
1496 MSR_TM_ACTIVE(current->thread.regs->msr)) {
1497 tm_enable();
1498 tm_abort(TM_CAUSE_TLBI);
1499 }
1500#endif
1da177e4
LT
1501}
1502
f1581bf1
AK
1503#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1504void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
aefa5688
AK
1505 pmd_t *pmdp, unsigned int psize, int ssize,
1506 unsigned long flags)
f1581bf1
AK
1507{
1508 int i, max_hpte_count, valid;
1509 unsigned long s_addr;
1510 unsigned char *hpte_slot_array;
1511 unsigned long hidx, shift, vpn, hash, slot;
aefa5688 1512 int local = flags & HPTE_LOCAL_UPDATE;
f1581bf1
AK
1513
1514 s_addr = addr & HPAGE_PMD_MASK;
1515 hpte_slot_array = get_hpte_slot_array(pmdp);
1516 /*
1517 * IF we try to do a HUGE PTE update after a withdraw is done.
1518 * we will find the below NULL. This happens when we do
1519 * split_huge_page_pmd
1520 */
1521 if (!hpte_slot_array)
1522 return;
1523
d557b098
AK
1524 if (ppc_md.hugepage_invalidate) {
1525 ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
1526 psize, ssize, local);
1527 goto tm_abort;
1528 }
f1581bf1
AK
1529 /*
1530 * No bluk hpte removal support, invalidate each entry
1531 */
1532 shift = mmu_psize_defs[psize].shift;
1533 max_hpte_count = HPAGE_PMD_SIZE >> shift;
1534 for (i = 0; i < max_hpte_count; i++) {
1535 /*
1536 * 8 bits per each hpte entries
1537 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
1538 */
1539 valid = hpte_valid(hpte_slot_array, i);
1540 if (!valid)
1541 continue;
1542 hidx = hpte_hash_index(hpte_slot_array, i);
1543
1544 /* get the vpn */
1545 addr = s_addr + (i * (1ul << shift));
1546 vpn = hpt_vpn(addr, vsid, ssize);
1547 hash = hpt_hash(vpn, shift, ssize);
1548 if (hidx & _PTEIDX_SECONDARY)
1549 hash = ~hash;
1550
1551 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1552 slot += hidx & _PTEIDX_GROUP_IX;
1553 ppc_md.hpte_invalidate(slot, vpn, psize,
d557b098
AK
1554 MMU_PAGE_16M, ssize, local);
1555 }
1556tm_abort:
1557#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1558 /* Transactions are not aborted by tlbiel, only tlbie.
1559 * Without, syncing a page back to a block device w/ PIO could pick up
1560 * transactional data (bad!) so we force an abort here. Before the
1561 * sync the page will be made read-only, which will flush_hash_page.
1562 * BIG ISSUE here: if the kernel uses a page from userspace without
1563 * unmapping it first, it may see the speculated version.
1564 */
1565 if (local && cpu_has_feature(CPU_FTR_TM) &&
1566 current->thread.regs &&
1567 MSR_TM_ACTIVE(current->thread.regs->msr)) {
1568 tm_enable();
1569 tm_abort(TM_CAUSE_TLBI);
f1581bf1 1570 }
d557b098 1571#endif
2e826695 1572 return;
f1581bf1
AK
1573}
1574#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1575
61b1a942 1576void flush_hash_range(unsigned long number, int local)
1da177e4 1577{
3c726f8d 1578 if (ppc_md.flush_hash_range)
61b1a942 1579 ppc_md.flush_hash_range(number, local);
3c726f8d 1580 else {
1da177e4 1581 int i;
61b1a942 1582 struct ppc64_tlb_batch *batch =
69111bac 1583 this_cpu_ptr(&ppc64_tlb_batch);
1da177e4
LT
1584
1585 for (i = 0; i < number; i++)
5524a27d 1586 flush_hash_page(batch->vpn[i], batch->pte[i],
1189be65 1587 batch->psize, batch->ssize, local);
1da177e4
LT
1588 }
1589}
1590
1da177e4
LT
1591/*
1592 * low_hash_fault is called when we the low level hash code failed
1593 * to instert a PTE due to an hypervisor error
1594 */
fa28237c 1595void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1da177e4 1596{
ba12eede
LZ
1597 enum ctx_state prev_state = exception_enter();
1598
1da177e4 1599 if (user_mode(regs)) {
fa28237c
PM
1600#ifdef CONFIG_PPC_SUBPAGE_PROT
1601 if (rc == -2)
1602 _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1603 else
1604#endif
1605 _exception(SIGBUS, regs, BUS_ADRERR, address);
1606 } else
1607 bad_page_fault(regs, address, SIGBUS);
ba12eede
LZ
1608
1609 exception_exit(prev_state);
1da177e4 1610}
370a908d 1611
b170bd3d
LZ
1612long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
1613 unsigned long pa, unsigned long rflags,
1614 unsigned long vflags, int psize, int ssize)
1615{
1616 unsigned long hpte_group;
1617 long slot;
1618
1619repeat:
1620 hpte_group = ((hash & htab_hash_mask) *
1621 HPTES_PER_GROUP) & ~0x7UL;
1622
1623 /* Insert into the hash table, primary slot */
1624 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
b1022fbd 1625 psize, psize, ssize);
b170bd3d
LZ
1626
1627 /* Primary is full, try the secondary */
1628 if (unlikely(slot == -1)) {
1629 hpte_group = ((~hash & htab_hash_mask) *
1630 HPTES_PER_GROUP) & ~0x7UL;
1631 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
1632 vflags | HPTE_V_SECONDARY,
b1022fbd 1633 psize, psize, ssize);
b170bd3d
LZ
1634 if (slot == -1) {
1635 if (mftb() & 0x1)
1636 hpte_group = ((hash & htab_hash_mask) *
1637 HPTES_PER_GROUP)&~0x7UL;
1638
1639 ppc_md.hpte_remove(hpte_group);
1640 goto repeat;
1641 }
1642 }
1643
1644 return slot;
1645}
1646
370a908d
BH
1647#ifdef CONFIG_DEBUG_PAGEALLOC
1648static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1649{
016af59f 1650 unsigned long hash;
1189be65 1651 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
5524a27d 1652 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
09f3f326 1653 unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
016af59f 1654 long ret;
370a908d 1655
5524a27d 1656 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
370a908d 1657
c60ac569
AK
1658 /* Don't create HPTE entries for bad address */
1659 if (!vsid)
1660 return;
016af59f
LZ
1661
1662 ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
1663 HPTE_V_BOLTED,
1664 mmu_linear_psize, mmu_kernel_ssize);
1665
370a908d
BH
1666 BUG_ON (ret < 0);
1667 spin_lock(&linear_map_hash_lock);
1668 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1669 linear_map_hash_slots[lmi] = ret | 0x80;
1670 spin_unlock(&linear_map_hash_lock);
1671}
1672
1673static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1674{
1189be65
PM
1675 unsigned long hash, hidx, slot;
1676 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
5524a27d 1677 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
370a908d 1678
5524a27d 1679 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
370a908d
BH
1680 spin_lock(&linear_map_hash_lock);
1681 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1682 hidx = linear_map_hash_slots[lmi] & 0x7f;
1683 linear_map_hash_slots[lmi] = 0;
1684 spin_unlock(&linear_map_hash_lock);
1685 if (hidx & _PTEIDX_SECONDARY)
1686 hash = ~hash;
1687 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1688 slot += hidx & _PTEIDX_GROUP_IX;
db3d8534
AK
1689 ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
1690 mmu_kernel_ssize, 0);
370a908d
BH
1691}
1692
031bc574 1693void __kernel_map_pages(struct page *page, int numpages, int enable)
370a908d
BH
1694{
1695 unsigned long flags, vaddr, lmi;
1696 int i;
1697
1698 local_irq_save(flags);
1699 for (i = 0; i < numpages; i++, page++) {
1700 vaddr = (unsigned long)page_address(page);
1701 lmi = __pa(vaddr) >> PAGE_SHIFT;
1702 if (lmi >= linear_map_hash_count)
1703 continue;
1704 if (enable)
1705 kernel_map_linear_page(vaddr, lmi);
1706 else
1707 kernel_unmap_linear_page(vaddr, lmi);
1708 }
1709 local_irq_restore(flags);
1710}
1711#endif /* CONFIG_DEBUG_PAGEALLOC */
cd3db0c4 1712
756d08d1 1713void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
cd3db0c4
BH
1714 phys_addr_t first_memblock_size)
1715{
1716 /* We don't currently support the first MEMBLOCK not mapping 0
1717 * physical on those processors
1718 */
1719 BUG_ON(first_memblock_base != 0);
1720
1721 /* On LPAR systems, the first entry is our RMA region,
1722 * non-LPAR 64-bit hash MMU systems don't have a limitation
1723 * on real mode access, but using the first entry works well
1724 * enough. We also clamp it to 1G to avoid some funky things
1725 * such as RTAS bugs etc...
1726 */
1727 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
1728
1729 /* Finally limit subsequent allocations */
1730 memblock_set_current_limit(ppc64_rma_size);
1731}
This page took 0.888308 seconds and 5 git commands to generate.