[POWERPC] Use SLB size from the device tree
[deliverable/linux.git] / arch / powerpc / mm / hash_utils_64.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
3c726f8d 22#undef DEBUG_LOW
1da177e4 23
1da177e4
LT
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
34
1da177e4
LT
35#include <asm/processor.h>
36#include <asm/pgtable.h>
37#include <asm/mmu.h>
38#include <asm/mmu_context.h>
39#include <asm/page.h>
40#include <asm/types.h>
41#include <asm/system.h>
42#include <asm/uaccess.h>
43#include <asm/machdep.h>
44#include <asm/lmb.h>
45#include <asm/abs_addr.h>
46#include <asm/tlbflush.h>
47#include <asm/io.h>
48#include <asm/eeh.h>
49#include <asm/tlb.h>
50#include <asm/cacheflush.h>
51#include <asm/cputable.h>
1da177e4 52#include <asm/sections.h>
d0f13e3c 53#include <asm/spu.h>
aa39be09 54#include <asm/udbg.h>
1da177e4
LT
55
56#ifdef DEBUG
57#define DBG(fmt...) udbg_printf(fmt)
58#else
59#define DBG(fmt...)
60#endif
61
3c726f8d
BH
62#ifdef DEBUG_LOW
63#define DBG_LOW(fmt...) udbg_printf(fmt)
64#else
65#define DBG_LOW(fmt...)
66#endif
67
68#define KB (1024)
69#define MB (1024*KB)
70
1da177e4
LT
71/*
72 * Note: pte --> Linux PTE
73 * HPTE --> PowerPC Hashed Page Table Entry
74 *
75 * Execution context:
76 * htab_initialize is called with the MMU off (of course), but
77 * the kernel has been copied down to zero so it can directly
78 * reference global data. At this point it is very difficult
79 * to print debug info.
80 *
81 */
82
83#ifdef CONFIG_U3_DART
84extern unsigned long dart_tablebase;
85#endif /* CONFIG_U3_DART */
86
799d6046
PM
87static unsigned long _SDR1;
88struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
89
8e561e7e 90struct hash_pte *htab_address;
337a7128 91unsigned long htab_size_bytes;
96e28449 92unsigned long htab_hash_mask;
3c726f8d
BH
93int mmu_linear_psize = MMU_PAGE_4K;
94int mmu_virtual_psize = MMU_PAGE_4K;
bf72aeba
PM
95int mmu_vmalloc_psize = MMU_PAGE_4K;
96int mmu_io_psize = MMU_PAGE_4K;
1189be65
PM
97int mmu_kernel_ssize = MMU_SEGSIZE_256M;
98int mmu_highuser_ssize = MMU_SEGSIZE_256M;
584f8b71 99u16 mmu_slb_size = 64;
3c726f8d
BH
100#ifdef CONFIG_HUGETLB_PAGE
101int mmu_huge_psize = MMU_PAGE_16M;
102unsigned int HPAGE_SHIFT;
103#endif
bf72aeba
PM
104#ifdef CONFIG_PPC_64K_PAGES
105int mmu_ci_restrictions;
106#endif
370a908d
BH
107#ifdef CONFIG_DEBUG_PAGEALLOC
108static u8 *linear_map_hash_slots;
109static unsigned long linear_map_hash_count;
ed166692 110static DEFINE_SPINLOCK(linear_map_hash_lock);
370a908d 111#endif /* CONFIG_DEBUG_PAGEALLOC */
1da177e4 112
3c726f8d
BH
113/* There are definitions of page sizes arrays to be used when none
114 * is provided by the firmware.
115 */
1da177e4 116
3c726f8d
BH
117/* Pre-POWER4 CPUs (4k pages only)
118 */
119struct mmu_psize_def mmu_psize_defaults_old[] = {
120 [MMU_PAGE_4K] = {
121 .shift = 12,
122 .sllp = 0,
123 .penc = 0,
124 .avpnm = 0,
125 .tlbiel = 0,
126 },
127};
128
129/* POWER4, GPUL, POWER5
130 *
131 * Support for 16Mb large pages
132 */
133struct mmu_psize_def mmu_psize_defaults_gp[] = {
134 [MMU_PAGE_4K] = {
135 .shift = 12,
136 .sllp = 0,
137 .penc = 0,
138 .avpnm = 0,
139 .tlbiel = 1,
140 },
141 [MMU_PAGE_16M] = {
142 .shift = 24,
143 .sllp = SLB_VSID_L,
144 .penc = 0,
145 .avpnm = 0x1UL,
146 .tlbiel = 0,
147 },
148};
149
150
151int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
1189be65
PM
152 unsigned long pstart, unsigned long mode,
153 int psize, int ssize)
1da177e4 154{
3c726f8d
BH
155 unsigned long vaddr, paddr;
156 unsigned int step, shift;
1da177e4 157 unsigned long tmp_mode;
3c726f8d 158 int ret = 0;
1da177e4 159
3c726f8d
BH
160 shift = mmu_psize_defs[psize].shift;
161 step = 1 << shift;
1da177e4 162
3c726f8d
BH
163 for (vaddr = vstart, paddr = pstart; vaddr < vend;
164 vaddr += step, paddr += step) {
370a908d 165 unsigned long hash, hpteg;
1189be65
PM
166 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
167 unsigned long va = hpt_va(vaddr, vsid, ssize);
1da177e4
LT
168
169 tmp_mode = mode;
170
171 /* Make non-kernel text non-executable */
3c726f8d
BH
172 if (!in_kernel_text(vaddr))
173 tmp_mode = mode | HPTE_R_N;
1da177e4 174
1189be65 175 hash = hpt_hash(va, shift, ssize);
1da177e4
LT
176 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
177
c30a4df3
ME
178 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
179
180 BUG_ON(!ppc_md.hpte_insert);
181 ret = ppc_md.hpte_insert(hpteg, va, paddr,
1189be65 182 tmp_mode, HPTE_V_BOLTED, psize, ssize);
c30a4df3 183
3c726f8d
BH
184 if (ret < 0)
185 break;
370a908d
BH
186#ifdef CONFIG_DEBUG_PAGEALLOC
187 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
188 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
189#endif /* CONFIG_DEBUG_PAGEALLOC */
3c726f8d
BH
190 }
191 return ret < 0 ? ret : 0;
192}
1da177e4 193
1189be65
PM
194static int __init htab_dt_scan_seg_sizes(unsigned long node,
195 const char *uname, int depth,
196 void *data)
197{
198 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
199 u32 *prop;
200 unsigned long size = 0;
201
202 /* We are scanning "cpu" nodes only */
203 if (type == NULL || strcmp(type, "cpu") != 0)
204 return 0;
205
206 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
207 &size);
208 if (prop == NULL)
209 return 0;
210 for (; size >= 4; size -= 4, ++prop) {
211 if (prop[0] == 40) {
212 DBG("1T segment support detected\n");
213 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
f5534004 214 return 1;
1189be65 215 }
1189be65 216 }
f66bce5e 217 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
1189be65
PM
218 return 0;
219}
220
221static void __init htab_init_seg_sizes(void)
222{
223 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
224}
225
3c726f8d
BH
226static int __init htab_dt_scan_page_sizes(unsigned long node,
227 const char *uname, int depth,
228 void *data)
229{
230 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
231 u32 *prop;
232 unsigned long size = 0;
233
234 /* We are scanning "cpu" nodes only */
235 if (type == NULL || strcmp(type, "cpu") != 0)
236 return 0;
237
238 prop = (u32 *)of_get_flat_dt_prop(node,
239 "ibm,segment-page-sizes", &size);
240 if (prop != NULL) {
241 DBG("Page sizes from device-tree:\n");
242 size /= 4;
243 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
244 while(size > 0) {
245 unsigned int shift = prop[0];
246 unsigned int slbenc = prop[1];
247 unsigned int lpnum = prop[2];
248 unsigned int lpenc = 0;
249 struct mmu_psize_def *def;
250 int idx = -1;
251
252 size -= 3; prop += 3;
253 while(size > 0 && lpnum) {
254 if (prop[0] == shift)
255 lpenc = prop[1];
256 prop += 2; size -= 2;
257 lpnum--;
258 }
259 switch(shift) {
260 case 0xc:
261 idx = MMU_PAGE_4K;
262 break;
263 case 0x10:
264 idx = MMU_PAGE_64K;
265 break;
266 case 0x14:
267 idx = MMU_PAGE_1M;
268 break;
269 case 0x18:
270 idx = MMU_PAGE_16M;
271 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
272 break;
273 case 0x22:
274 idx = MMU_PAGE_16G;
275 break;
276 }
277 if (idx < 0)
278 continue;
279 def = &mmu_psize_defs[idx];
280 def->shift = shift;
281 if (shift <= 23)
282 def->avpnm = 0;
283 else
284 def->avpnm = (1 << (shift - 23)) - 1;
285 def->sllp = slbenc;
286 def->penc = lpenc;
287 /* We don't know for sure what's up with tlbiel, so
288 * for now we only set it for 4K and 64K pages
289 */
290 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
291 def->tlbiel = 1;
292 else
293 def->tlbiel = 0;
294
295 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
296 "tlbiel=%d, penc=%d\n",
297 idx, shift, def->sllp, def->avpnm, def->tlbiel,
298 def->penc);
1da177e4 299 }
3c726f8d
BH
300 return 1;
301 }
302 return 0;
303}
304
3c726f8d
BH
305static void __init htab_init_page_sizes(void)
306{
307 int rc;
308
309 /* Default to 4K pages only */
310 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
311 sizeof(mmu_psize_defaults_old));
312
313 /*
314 * Try to find the available page sizes in the device-tree
315 */
316 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
317 if (rc != 0) /* Found */
318 goto found;
319
320 /*
321 * Not in the device-tree, let's fallback on known size
322 * list for 16M capable GP & GR
323 */
0470466d 324 if (cpu_has_feature(CPU_FTR_16M_PAGE))
3c726f8d
BH
325 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
326 sizeof(mmu_psize_defaults_gp));
327 found:
370a908d 328#ifndef CONFIG_DEBUG_PAGEALLOC
3c726f8d
BH
329 /*
330 * Pick a size for the linear mapping. Currently, we only support
331 * 16M, 1M and 4K which is the default
332 */
333 if (mmu_psize_defs[MMU_PAGE_16M].shift)
334 mmu_linear_psize = MMU_PAGE_16M;
335 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
336 mmu_linear_psize = MMU_PAGE_1M;
370a908d 337#endif /* CONFIG_DEBUG_PAGEALLOC */
3c726f8d 338
bf72aeba 339#ifdef CONFIG_PPC_64K_PAGES
3c726f8d
BH
340 /*
341 * Pick a size for the ordinary pages. Default is 4K, we support
bf72aeba
PM
342 * 64K for user mappings and vmalloc if supported by the processor.
343 * We only use 64k for ioremap if the processor
344 * (and firmware) support cache-inhibited large pages.
345 * If not, we use 4k and set mmu_ci_restrictions so that
346 * hash_page knows to switch processes that use cache-inhibited
347 * mappings to 4k pages.
3c726f8d 348 */
bf72aeba 349 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
3c726f8d 350 mmu_virtual_psize = MMU_PAGE_64K;
bf72aeba 351 mmu_vmalloc_psize = MMU_PAGE_64K;
370a908d
BH
352 if (mmu_linear_psize == MMU_PAGE_4K)
353 mmu_linear_psize = MMU_PAGE_64K;
bf72aeba
PM
354 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
355 mmu_io_psize = MMU_PAGE_64K;
356 else
357 mmu_ci_restrictions = 1;
358 }
370a908d 359#endif /* CONFIG_PPC_64K_PAGES */
3c726f8d 360
bf72aeba
PM
361 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
362 "virtual = %d, io = %d\n",
3c726f8d 363 mmu_psize_defs[mmu_linear_psize].shift,
bf72aeba
PM
364 mmu_psize_defs[mmu_virtual_psize].shift,
365 mmu_psize_defs[mmu_io_psize].shift);
3c726f8d
BH
366
367#ifdef CONFIG_HUGETLB_PAGE
368 /* Init large page size. Currently, we pick 16M or 1M depending
369 * on what is available
370 */
371 if (mmu_psize_defs[MMU_PAGE_16M].shift)
372 mmu_huge_psize = MMU_PAGE_16M;
7d24f0b8
DG
373 /* With 4k/4level pagetables, we can't (for now) cope with a
374 * huge page size < PMD_SIZE */
3c726f8d
BH
375 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
376 mmu_huge_psize = MMU_PAGE_1M;
377
378 /* Calculate HPAGE_SHIFT and sanity check it */
7d24f0b8
DG
379 if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
380 mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
3c726f8d
BH
381 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
382 else
383 HPAGE_SHIFT = 0; /* No huge pages dude ! */
384#endif /* CONFIG_HUGETLB_PAGE */
385}
386
387static int __init htab_dt_scan_pftsize(unsigned long node,
388 const char *uname, int depth,
389 void *data)
390{
391 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
392 u32 *prop;
393
394 /* We are scanning "cpu" nodes only */
395 if (type == NULL || strcmp(type, "cpu") != 0)
396 return 0;
397
398 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
399 if (prop != NULL) {
400 /* pft_size[0] is the NUMA CEC cookie */
401 ppc64_pft_size = prop[1];
402 return 1;
1da177e4 403 }
3c726f8d 404 return 0;
1da177e4
LT
405}
406
3c726f8d 407static unsigned long __init htab_get_table_size(void)
3eac8c69 408{
799d6046 409 unsigned long mem_size, rnd_mem_size, pteg_count;
3eac8c69 410
3c726f8d 411 /* If hash size isn't already provided by the platform, we try to
943ffb58 412 * retrieve it from the device-tree. If it's not there neither, we
3c726f8d 413 * calculate it now based on the total RAM size
3eac8c69 414 */
3c726f8d
BH
415 if (ppc64_pft_size == 0)
416 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
3eac8c69
PM
417 if (ppc64_pft_size)
418 return 1UL << ppc64_pft_size;
419
420 /* round mem_size up to next power of 2 */
799d6046
PM
421 mem_size = lmb_phys_mem_size();
422 rnd_mem_size = 1UL << __ilog2(mem_size);
423 if (rnd_mem_size < mem_size)
3eac8c69
PM
424 rnd_mem_size <<= 1;
425
426 /* # pages / 2 */
427 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
428
429 return pteg_count << 7;
430}
431
54b79248
MK
432#ifdef CONFIG_MEMORY_HOTPLUG
433void create_section_mapping(unsigned long start, unsigned long end)
434{
caf80e57 435 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
54b79248 436 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
1189be65 437 mmu_linear_psize, mmu_kernel_ssize));
54b79248
MK
438}
439#endif /* CONFIG_MEMORY_HOTPLUG */
440
7d0daae4
ME
441static inline void make_bl(unsigned int *insn_addr, void *func)
442{
443 unsigned long funcp = *((unsigned long *)func);
444 int offset = funcp - (unsigned long)insn_addr;
445
446 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
447 flush_icache_range((unsigned long)insn_addr, 4+
448 (unsigned long)insn_addr);
449}
450
451static void __init htab_finish_init(void)
452{
453 extern unsigned int *htab_call_hpte_insert1;
454 extern unsigned int *htab_call_hpte_insert2;
455 extern unsigned int *htab_call_hpte_remove;
456 extern unsigned int *htab_call_hpte_updatepp;
457
16c2d476 458#ifdef CONFIG_PPC_HAS_HASH_64K
7d0daae4
ME
459 extern unsigned int *ht64_call_hpte_insert1;
460 extern unsigned int *ht64_call_hpte_insert2;
461 extern unsigned int *ht64_call_hpte_remove;
462 extern unsigned int *ht64_call_hpte_updatepp;
463
464 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
465 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
466 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
467 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
5b825831 468#endif /* CONFIG_PPC_HAS_HASH_64K */
7d0daae4
ME
469
470 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
471 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
472 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
473 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
474}
475
1da177e4
LT
476void __init htab_initialize(void)
477{
337a7128 478 unsigned long table;
1da177e4
LT
479 unsigned long pteg_count;
480 unsigned long mode_rw;
1da177e4 481 unsigned long base = 0, size = 0;
3c726f8d
BH
482 int i;
483
1da177e4
LT
484 extern unsigned long tce_alloc_start, tce_alloc_end;
485
486 DBG(" -> htab_initialize()\n");
487
1189be65
PM
488 /* Initialize segment sizes */
489 htab_init_seg_sizes();
490
3c726f8d
BH
491 /* Initialize page sizes */
492 htab_init_page_sizes();
493
1189be65
PM
494 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
495 mmu_kernel_ssize = MMU_SEGSIZE_1T;
496 mmu_highuser_ssize = MMU_SEGSIZE_1T;
497 printk(KERN_INFO "Using 1TB segments\n");
498 }
499
1da177e4
LT
500 /*
501 * Calculate the required size of the htab. We want the number of
502 * PTEGs to equal one half the number of real pages.
503 */
3c726f8d 504 htab_size_bytes = htab_get_table_size();
1da177e4
LT
505 pteg_count = htab_size_bytes >> 7;
506
1da177e4
LT
507 htab_hash_mask = pteg_count - 1;
508
57cfb814 509 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1da177e4
LT
510 /* Using a hypervisor which owns the htab */
511 htab_address = NULL;
512 _SDR1 = 0;
513 } else {
514 /* Find storage for the HPT. Must be contiguous in
515 * the absolute address space.
516 */
517 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
518
519 DBG("Hash table allocated at %lx, size: %lx\n", table,
520 htab_size_bytes);
521
1da177e4
LT
522 htab_address = abs_to_virt(table);
523
524 /* htab absolute addr + encoded htabsize */
525 _SDR1 = table + __ilog2(pteg_count) - 11;
526
527 /* Initialize the HPT with no entries */
528 memset((void *)table, 0, htab_size_bytes);
799d6046
PM
529
530 /* Set SDR1 */
531 mtspr(SPRN_SDR1, _SDR1);
1da177e4
LT
532 }
533
515bae9c 534 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
1da177e4 535
370a908d
BH
536#ifdef CONFIG_DEBUG_PAGEALLOC
537 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
538 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
539 1, lmb.rmo_size));
540 memset(linear_map_hash_slots, 0, linear_map_hash_count);
541#endif /* CONFIG_DEBUG_PAGEALLOC */
542
1da177e4
LT
543 /* On U3 based machines, we need to reserve the DART area and
544 * _NOT_ map it to avoid cache paradoxes as it's remapped non
545 * cacheable later on
546 */
1da177e4
LT
547
548 /* create bolted the linear mapping in the hash table */
549 for (i=0; i < lmb.memory.cnt; i++) {
b5666f70 550 base = (unsigned long)__va(lmb.memory.region[i].base);
1da177e4
LT
551 size = lmb.memory.region[i].size;
552
553 DBG("creating mapping for region: %lx : %lx\n", base, size);
554
555#ifdef CONFIG_U3_DART
556 /* Do not map the DART space. Fortunately, it will be aligned
3c726f8d
BH
557 * in such a way that it will not cross two lmb regions and
558 * will fit within a single 16Mb page.
559 * The DART space is assumed to be a full 16Mb region even if
560 * we only use 2Mb of that space. We will use more of it later
561 * for AGP GART. We have to use a full 16Mb large page.
1da177e4
LT
562 */
563 DBG("DART base: %lx\n", dart_tablebase);
564
565 if (dart_tablebase != 0 && dart_tablebase >= base
566 && dart_tablebase < (base + size)) {
caf80e57 567 unsigned long dart_table_end = dart_tablebase + 16 * MB;
1da177e4 568 if (base != dart_tablebase)
3c726f8d 569 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
caf80e57 570 __pa(base), mode_rw,
1189be65
PM
571 mmu_linear_psize,
572 mmu_kernel_ssize));
caf80e57 573 if ((base + size) > dart_table_end)
3c726f8d 574 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
caf80e57
ME
575 base + size,
576 __pa(dart_table_end),
3c726f8d 577 mode_rw,
1189be65
PM
578 mmu_linear_psize,
579 mmu_kernel_ssize));
1da177e4
LT
580 continue;
581 }
582#endif /* CONFIG_U3_DART */
caf80e57 583 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
1189be65 584 mode_rw, mmu_linear_psize, mmu_kernel_ssize));
3c726f8d 585 }
1da177e4
LT
586
587 /*
588 * If we have a memory_limit and we've allocated TCEs then we need to
589 * explicitly map the TCE area at the top of RAM. We also cope with the
590 * case that the TCEs start below memory_limit.
591 * tce_alloc_start/end are 16MB aligned so the mapping should work
592 * for either 4K or 16MB pages.
593 */
594 if (tce_alloc_start) {
b5666f70
ME
595 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
596 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
1da177e4
LT
597
598 if (base + size >= tce_alloc_start)
599 tce_alloc_start = base + size + 1;
600
caf80e57
ME
601 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
602 __pa(tce_alloc_start), mode_rw,
1189be65 603 mmu_linear_psize, mmu_kernel_ssize));
1da177e4
LT
604 }
605
7d0daae4
ME
606 htab_finish_init();
607
1da177e4
LT
608 DBG(" <- htab_initialize()\n");
609}
610#undef KB
611#undef MB
1da177e4 612
e597cb32 613void htab_initialize_secondary(void)
799d6046 614{
57cfb814 615 if (!firmware_has_feature(FW_FEATURE_LPAR))
799d6046
PM
616 mtspr(SPRN_SDR1, _SDR1);
617}
618
1da177e4
LT
619/*
620 * Called by asm hashtable.S for doing lazy icache flush
621 */
622unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
623{
624 struct page *page;
625
76c8e25b
BH
626 if (!pfn_valid(pte_pfn(pte)))
627 return pp;
628
1da177e4
LT
629 page = pte_page(pte);
630
631 /* page is dirty */
632 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
633 if (trap == 0x400) {
634 __flush_dcache_icache(page_address(page));
635 set_bit(PG_arch_1, &page->flags);
636 } else
3c726f8d 637 pp |= HPTE_R_N;
1da177e4
LT
638 }
639 return pp;
640}
641
721151d0
PM
642/*
643 * Demote a segment to using 4k pages.
644 * For now this makes the whole process use 4k pages.
645 */
721151d0 646#ifdef CONFIG_PPC_64K_PAGES
16f1c746
BH
647static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
648{
721151d0
PM
649 if (mm->context.user_psize == MMU_PAGE_4K)
650 return;
d0f13e3c 651 slice_set_user_psize(mm, MMU_PAGE_4K);
1e57ba8d 652#ifdef CONFIG_SPU_BASE
721151d0
PM
653 spu_flush_all_slbs(mm);
654#endif
721151d0 655}
16f1c746 656#endif /* CONFIG_PPC_64K_PAGES */
721151d0 657
1da177e4
LT
658/* Result code is:
659 * 0 - handled
660 * 1 - normal page fault
661 * -1 - critical hash insertion error
662 */
663int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
664{
665 void *pgdir;
666 unsigned long vsid;
667 struct mm_struct *mm;
668 pte_t *ptep;
1da177e4 669 cpumask_t tmp;
3c726f8d 670 int rc, user_region = 0, local = 0;
1189be65 671 int psize, ssize;
1da177e4 672
3c726f8d
BH
673 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
674 ea, access, trap);
1f8d419e 675
3c726f8d
BH
676 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
677 DBG_LOW(" out of pgtable range !\n");
678 return 1;
679 }
680
681 /* Get region & vsid */
1da177e4
LT
682 switch (REGION_ID(ea)) {
683 case USER_REGION_ID:
684 user_region = 1;
685 mm = current->mm;
3c726f8d
BH
686 if (! mm) {
687 DBG_LOW(" user region with no mm !\n");
1da177e4 688 return 1;
3c726f8d 689 }
16c2d476
BH
690#ifdef CONFIG_PPC_MM_SLICES
691 psize = get_slice_psize(mm, ea);
692#else
bf72aeba 693 psize = mm->context.user_psize;
16c2d476 694#endif
1189be65
PM
695 ssize = user_segment_size(ea);
696 vsid = get_vsid(mm->context.id, ea, ssize);
1da177e4 697 break;
1da177e4 698 case VMALLOC_REGION_ID:
1da177e4 699 mm = &init_mm;
1189be65 700 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
bf72aeba
PM
701 if (ea < VMALLOC_END)
702 psize = mmu_vmalloc_psize;
703 else
704 psize = mmu_io_psize;
1189be65 705 ssize = mmu_kernel_ssize;
1da177e4 706 break;
1da177e4
LT
707 default:
708 /* Not a valid range
709 * Send the problem up to do_page_fault
710 */
711 return 1;
1da177e4 712 }
3c726f8d 713 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
1da177e4 714
3c726f8d 715 /* Get pgdir */
1da177e4 716 pgdir = mm->pgd;
1da177e4
LT
717 if (pgdir == NULL)
718 return 1;
719
3c726f8d 720 /* Check CPU locality */
1da177e4
LT
721 tmp = cpumask_of_cpu(smp_processor_id());
722 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
723 local = 1;
724
d0f13e3c 725#ifdef CONFIG_HUGETLB_PAGE
3c726f8d 726 /* Handle hugepage regions */
16c2d476 727 if (HPAGE_SHIFT && psize == mmu_huge_psize) {
3c726f8d 728 DBG_LOW(" -> huge page !\n");
cbf52afd 729 return hash_huge_page(mm, access, ea, vsid, local, trap);
3c726f8d 730 }
d0f13e3c 731#endif /* CONFIG_HUGETLB_PAGE */
3c726f8d 732
16c2d476
BH
733#ifndef CONFIG_PPC_64K_PAGES
734 /* If we use 4K pages and our psize is not 4K, then we are hitting
735 * a special driver mapping, we need to align the address before
736 * we fetch the PTE
737 */
738 if (psize != MMU_PAGE_4K)
739 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
740#endif /* CONFIG_PPC_64K_PAGES */
741
3c726f8d
BH
742 /* Get PTE and page size from page tables */
743 ptep = find_linux_pte(pgdir, ea);
744 if (ptep == NULL || !pte_present(*ptep)) {
745 DBG_LOW(" no PTE !\n");
746 return 1;
747 }
748
749#ifndef CONFIG_PPC_64K_PAGES
750 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
751#else
752 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
753 pte_val(*(ptep + PTRS_PER_PTE)));
754#endif
755 /* Pre-check access permissions (will be re-checked atomically
756 * in __hash_page_XX but this pre-check is a fast path
757 */
758 if (access & ~pte_val(*ptep)) {
759 DBG_LOW(" no access !\n");
760 return 1;
1da177e4
LT
761 }
762
3c726f8d 763 /* Do actual hashing */
16c2d476 764#ifdef CONFIG_PPC_64K_PAGES
721151d0
PM
765 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
766 if (pte_val(*ptep) & _PAGE_4K_PFN) {
767 demote_segment_4k(mm, ea);
768 psize = MMU_PAGE_4K;
769 }
770
16f1c746
BH
771 /* If this PTE is non-cacheable and we have restrictions on
772 * using non cacheable large pages, then we switch to 4k
773 */
774 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
775 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
776 if (user_region) {
777 demote_segment_4k(mm, ea);
778 psize = MMU_PAGE_4K;
779 } else if (ea < VMALLOC_END) {
780 /*
781 * some driver did a non-cacheable mapping
782 * in vmalloc space, so switch vmalloc
783 * to 4k pages
784 */
785 printk(KERN_ALERT "Reducing vmalloc segment "
786 "to 4kB pages because of "
787 "non-cacheable mapping\n");
788 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
1e57ba8d 789#ifdef CONFIG_SPU_BASE
94b2a439
BH
790 spu_flush_all_slbs(mm);
791#endif
bf72aeba 792 }
16f1c746
BH
793 }
794 if (user_region) {
795 if (psize != get_paca()->context.user_psize) {
f6ab0b92 796 get_paca()->context = mm->context;
bf72aeba
PM
797 slb_flush_and_rebolt();
798 }
16f1c746
BH
799 } else if (get_paca()->vmalloc_sllp !=
800 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
801 get_paca()->vmalloc_sllp =
802 mmu_psize_defs[mmu_vmalloc_psize].sllp;
67439b76 803 slb_vmalloc_update();
bf72aeba 804 }
16c2d476 805#endif /* CONFIG_PPC_64K_PAGES */
16f1c746 806
16c2d476 807#ifdef CONFIG_PPC_HAS_HASH_64K
bf72aeba 808 if (psize == MMU_PAGE_64K)
1189be65 809 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
3c726f8d 810 else
16c2d476 811#endif /* CONFIG_PPC_HAS_HASH_64K */
1189be65 812 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
3c726f8d
BH
813
814#ifndef CONFIG_PPC_64K_PAGES
815 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
816#else
817 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
818 pte_val(*(ptep + PTRS_PER_PTE)));
819#endif
820 DBG_LOW(" -> rc=%d\n", rc);
821 return rc;
1da177e4 822}
67207b96 823EXPORT_SYMBOL_GPL(hash_page);
1da177e4 824
3c726f8d
BH
825void hash_preload(struct mm_struct *mm, unsigned long ea,
826 unsigned long access, unsigned long trap)
1da177e4 827{
3c726f8d
BH
828 unsigned long vsid;
829 void *pgdir;
830 pte_t *ptep;
831 cpumask_t mask;
832 unsigned long flags;
833 int local = 0;
1189be65 834 int ssize;
3c726f8d 835
d0f13e3c
BH
836 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
837
838#ifdef CONFIG_PPC_MM_SLICES
839 /* We only prefault standard pages for now */
2b02d139 840 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
3c726f8d 841 return;
d0f13e3c 842#endif
3c726f8d
BH
843
844 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
845 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1da177e4 846
16f1c746 847 /* Get Linux PTE if available */
3c726f8d
BH
848 pgdir = mm->pgd;
849 if (pgdir == NULL)
850 return;
851 ptep = find_linux_pte(pgdir, ea);
852 if (!ptep)
853 return;
16f1c746
BH
854
855#ifdef CONFIG_PPC_64K_PAGES
856 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
857 * a 64K kernel), then we don't preload, hash_page() will take
858 * care of it once we actually try to access the page.
859 * That way we don't have to duplicate all of the logic for segment
860 * page size demotion here
861 */
862 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
863 return;
864#endif /* CONFIG_PPC_64K_PAGES */
865
866 /* Get VSID */
1189be65
PM
867 ssize = user_segment_size(ea);
868 vsid = get_vsid(mm->context.id, ea, ssize);
3c726f8d 869
16c2d476 870 /* Hash doesn't like irqs */
3c726f8d 871 local_irq_save(flags);
16c2d476
BH
872
873 /* Is that local to this CPU ? */
3c726f8d
BH
874 mask = cpumask_of_cpu(smp_processor_id());
875 if (cpus_equal(mm->cpu_vm_mask, mask))
876 local = 1;
16c2d476
BH
877
878 /* Hash it in */
879#ifdef CONFIG_PPC_HAS_HASH_64K
bf72aeba 880 if (mm->context.user_psize == MMU_PAGE_64K)
1189be65 881 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
1da177e4 882 else
5b825831 883#endif /* CONFIG_PPC_HAS_HASH_64K */
1189be65 884 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
16c2d476 885
3c726f8d
BH
886 local_irq_restore(flags);
887}
888
f6ab0b92
BH
889/* WARNING: This is called from hash_low_64.S, if you change this prototype,
890 * do not forget to update the assembly call site !
891 */
1189be65
PM
892void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
893 int local)
3c726f8d
BH
894{
895 unsigned long hash, index, shift, hidx, slot;
896
897 DBG_LOW("flush_hash_page(va=%016x)\n", va);
898 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
1189be65 899 hash = hpt_hash(va, shift, ssize);
3c726f8d
BH
900 hidx = __rpte_to_hidx(pte, index);
901 if (hidx & _PTEIDX_SECONDARY)
902 hash = ~hash;
903 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
904 slot += hidx & _PTEIDX_GROUP_IX;
905 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
1189be65 906 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
3c726f8d 907 } pte_iterate_hashed_end();
1da177e4
LT
908}
909
61b1a942 910void flush_hash_range(unsigned long number, int local)
1da177e4 911{
3c726f8d 912 if (ppc_md.flush_hash_range)
61b1a942 913 ppc_md.flush_hash_range(number, local);
3c726f8d 914 else {
1da177e4 915 int i;
61b1a942
BH
916 struct ppc64_tlb_batch *batch =
917 &__get_cpu_var(ppc64_tlb_batch);
1da177e4
LT
918
919 for (i = 0; i < number; i++)
3c726f8d 920 flush_hash_page(batch->vaddr[i], batch->pte[i],
1189be65 921 batch->psize, batch->ssize, local);
1da177e4
LT
922 }
923}
924
1da177e4
LT
925/*
926 * low_hash_fault is called when we the low level hash code failed
927 * to instert a PTE due to an hypervisor error
928 */
929void low_hash_fault(struct pt_regs *regs, unsigned long address)
930{
931 if (user_mode(regs)) {
932 siginfo_t info;
933
934 info.si_signo = SIGBUS;
935 info.si_errno = 0;
936 info.si_code = BUS_ADRERR;
937 info.si_addr = (void __user *)address;
938 force_sig_info(SIGBUS, &info, current);
939 return;
940 }
941 bad_page_fault(regs, address, SIGBUS);
942}
370a908d
BH
943
944#ifdef CONFIG_DEBUG_PAGEALLOC
945static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
946{
1189be65
PM
947 unsigned long hash, hpteg;
948 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
949 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
370a908d
BH
950 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
951 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
952 int ret;
953
1189be65 954 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
370a908d
BH
955 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
956
957 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
1189be65
PM
958 mode, HPTE_V_BOLTED,
959 mmu_linear_psize, mmu_kernel_ssize);
370a908d
BH
960 BUG_ON (ret < 0);
961 spin_lock(&linear_map_hash_lock);
962 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
963 linear_map_hash_slots[lmi] = ret | 0x80;
964 spin_unlock(&linear_map_hash_lock);
965}
966
967static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
968{
1189be65
PM
969 unsigned long hash, hidx, slot;
970 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
971 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
370a908d 972
1189be65 973 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
370a908d
BH
974 spin_lock(&linear_map_hash_lock);
975 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
976 hidx = linear_map_hash_slots[lmi] & 0x7f;
977 linear_map_hash_slots[lmi] = 0;
978 spin_unlock(&linear_map_hash_lock);
979 if (hidx & _PTEIDX_SECONDARY)
980 hash = ~hash;
981 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
982 slot += hidx & _PTEIDX_GROUP_IX;
1189be65 983 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
370a908d
BH
984}
985
986void kernel_map_pages(struct page *page, int numpages, int enable)
987{
988 unsigned long flags, vaddr, lmi;
989 int i;
990
991 local_irq_save(flags);
992 for (i = 0; i < numpages; i++, page++) {
993 vaddr = (unsigned long)page_address(page);
994 lmi = __pa(vaddr) >> PAGE_SHIFT;
995 if (lmi >= linear_map_hash_count)
996 continue;
997 if (enable)
998 kernel_map_linear_page(vaddr, lmi);
999 else
1000 kernel_unmap_linear_page(vaddr, lmi);
1001 }
1002 local_irq_restore(flags);
1003}
1004#endif /* CONFIG_DEBUG_PAGEALLOC */
This page took 0.281502 seconds and 5 git commands to generate.