[POWERPC] Clean out a bunch of duplicate includes
[deliverable/linux.git] / arch / powerpc / mm / hash_utils_64.c
CommitLineData
1da177e4
LT
1/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
3c726f8d 22#undef DEBUG_LOW
1da177e4 23
1da177e4
LT
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
34
1da177e4
LT
35#include <asm/processor.h>
36#include <asm/pgtable.h>
37#include <asm/mmu.h>
38#include <asm/mmu_context.h>
39#include <asm/page.h>
40#include <asm/types.h>
41#include <asm/system.h>
42#include <asm/uaccess.h>
43#include <asm/machdep.h>
44#include <asm/lmb.h>
45#include <asm/abs_addr.h>
46#include <asm/tlbflush.h>
47#include <asm/io.h>
48#include <asm/eeh.h>
49#include <asm/tlb.h>
50#include <asm/cacheflush.h>
51#include <asm/cputable.h>
1da177e4 52#include <asm/sections.h>
d0f13e3c 53#include <asm/spu.h>
1da177e4
LT
54
55#ifdef DEBUG
56#define DBG(fmt...) udbg_printf(fmt)
57#else
58#define DBG(fmt...)
59#endif
60
3c726f8d
BH
61#ifdef DEBUG_LOW
62#define DBG_LOW(fmt...) udbg_printf(fmt)
63#else
64#define DBG_LOW(fmt...)
65#endif
66
67#define KB (1024)
68#define MB (1024*KB)
69
1da177e4
LT
70/*
71 * Note: pte --> Linux PTE
72 * HPTE --> PowerPC Hashed Page Table Entry
73 *
74 * Execution context:
75 * htab_initialize is called with the MMU off (of course), but
76 * the kernel has been copied down to zero so it can directly
77 * reference global data. At this point it is very difficult
78 * to print debug info.
79 *
80 */
81
82#ifdef CONFIG_U3_DART
83extern unsigned long dart_tablebase;
84#endif /* CONFIG_U3_DART */
85
799d6046
PM
86static unsigned long _SDR1;
87struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
88
8e561e7e 89struct hash_pte *htab_address;
337a7128 90unsigned long htab_size_bytes;
96e28449 91unsigned long htab_hash_mask;
3c726f8d
BH
92int mmu_linear_psize = MMU_PAGE_4K;
93int mmu_virtual_psize = MMU_PAGE_4K;
bf72aeba
PM
94int mmu_vmalloc_psize = MMU_PAGE_4K;
95int mmu_io_psize = MMU_PAGE_4K;
3c726f8d
BH
96#ifdef CONFIG_HUGETLB_PAGE
97int mmu_huge_psize = MMU_PAGE_16M;
98unsigned int HPAGE_SHIFT;
99#endif
bf72aeba
PM
100#ifdef CONFIG_PPC_64K_PAGES
101int mmu_ci_restrictions;
102#endif
370a908d
BH
103#ifdef CONFIG_DEBUG_PAGEALLOC
104static u8 *linear_map_hash_slots;
105static unsigned long linear_map_hash_count;
ed166692 106static DEFINE_SPINLOCK(linear_map_hash_lock);
370a908d 107#endif /* CONFIG_DEBUG_PAGEALLOC */
1da177e4 108
3c726f8d
BH
109/* There are definitions of page sizes arrays to be used when none
110 * is provided by the firmware.
111 */
1da177e4 112
3c726f8d
BH
113/* Pre-POWER4 CPUs (4k pages only)
114 */
115struct mmu_psize_def mmu_psize_defaults_old[] = {
116 [MMU_PAGE_4K] = {
117 .shift = 12,
118 .sllp = 0,
119 .penc = 0,
120 .avpnm = 0,
121 .tlbiel = 0,
122 },
123};
124
125/* POWER4, GPUL, POWER5
126 *
127 * Support for 16Mb large pages
128 */
129struct mmu_psize_def mmu_psize_defaults_gp[] = {
130 [MMU_PAGE_4K] = {
131 .shift = 12,
132 .sllp = 0,
133 .penc = 0,
134 .avpnm = 0,
135 .tlbiel = 1,
136 },
137 [MMU_PAGE_16M] = {
138 .shift = 24,
139 .sllp = SLB_VSID_L,
140 .penc = 0,
141 .avpnm = 0x1UL,
142 .tlbiel = 0,
143 },
144};
145
146
147int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
148 unsigned long pstart, unsigned long mode, int psize)
1da177e4 149{
3c726f8d
BH
150 unsigned long vaddr, paddr;
151 unsigned int step, shift;
1da177e4 152 unsigned long tmp_mode;
3c726f8d 153 int ret = 0;
1da177e4 154
3c726f8d
BH
155 shift = mmu_psize_defs[psize].shift;
156 step = 1 << shift;
1da177e4 157
3c726f8d
BH
158 for (vaddr = vstart, paddr = pstart; vaddr < vend;
159 vaddr += step, paddr += step) {
370a908d 160 unsigned long hash, hpteg;
3c726f8d
BH
161 unsigned long vsid = get_kernel_vsid(vaddr);
162 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
1da177e4
LT
163
164 tmp_mode = mode;
165
166 /* Make non-kernel text non-executable */
3c726f8d
BH
167 if (!in_kernel_text(vaddr))
168 tmp_mode = mode | HPTE_R_N;
1da177e4 169
3c726f8d 170 hash = hpt_hash(va, shift);
1da177e4
LT
171 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
172
c30a4df3
ME
173 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
174
175 BUG_ON(!ppc_md.hpte_insert);
176 ret = ppc_md.hpte_insert(hpteg, va, paddr,
177 tmp_mode, HPTE_V_BOLTED, psize);
178
3c726f8d
BH
179 if (ret < 0)
180 break;
370a908d
BH
181#ifdef CONFIG_DEBUG_PAGEALLOC
182 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
183 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
184#endif /* CONFIG_DEBUG_PAGEALLOC */
3c726f8d
BH
185 }
186 return ret < 0 ? ret : 0;
187}
1da177e4 188
3c726f8d
BH
189static int __init htab_dt_scan_page_sizes(unsigned long node,
190 const char *uname, int depth,
191 void *data)
192{
193 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
194 u32 *prop;
195 unsigned long size = 0;
196
197 /* We are scanning "cpu" nodes only */
198 if (type == NULL || strcmp(type, "cpu") != 0)
199 return 0;
200
201 prop = (u32 *)of_get_flat_dt_prop(node,
202 "ibm,segment-page-sizes", &size);
203 if (prop != NULL) {
204 DBG("Page sizes from device-tree:\n");
205 size /= 4;
206 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
207 while(size > 0) {
208 unsigned int shift = prop[0];
209 unsigned int slbenc = prop[1];
210 unsigned int lpnum = prop[2];
211 unsigned int lpenc = 0;
212 struct mmu_psize_def *def;
213 int idx = -1;
214
215 size -= 3; prop += 3;
216 while(size > 0 && lpnum) {
217 if (prop[0] == shift)
218 lpenc = prop[1];
219 prop += 2; size -= 2;
220 lpnum--;
221 }
222 switch(shift) {
223 case 0xc:
224 idx = MMU_PAGE_4K;
225 break;
226 case 0x10:
227 idx = MMU_PAGE_64K;
228 break;
229 case 0x14:
230 idx = MMU_PAGE_1M;
231 break;
232 case 0x18:
233 idx = MMU_PAGE_16M;
234 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
235 break;
236 case 0x22:
237 idx = MMU_PAGE_16G;
238 break;
239 }
240 if (idx < 0)
241 continue;
242 def = &mmu_psize_defs[idx];
243 def->shift = shift;
244 if (shift <= 23)
245 def->avpnm = 0;
246 else
247 def->avpnm = (1 << (shift - 23)) - 1;
248 def->sllp = slbenc;
249 def->penc = lpenc;
250 /* We don't know for sure what's up with tlbiel, so
251 * for now we only set it for 4K and 64K pages
252 */
253 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
254 def->tlbiel = 1;
255 else
256 def->tlbiel = 0;
257
258 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
259 "tlbiel=%d, penc=%d\n",
260 idx, shift, def->sllp, def->avpnm, def->tlbiel,
261 def->penc);
1da177e4 262 }
3c726f8d
BH
263 return 1;
264 }
265 return 0;
266}
267
268
269static void __init htab_init_page_sizes(void)
270{
271 int rc;
272
273 /* Default to 4K pages only */
274 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
275 sizeof(mmu_psize_defaults_old));
276
277 /*
278 * Try to find the available page sizes in the device-tree
279 */
280 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
281 if (rc != 0) /* Found */
282 goto found;
283
284 /*
285 * Not in the device-tree, let's fallback on known size
286 * list for 16M capable GP & GR
287 */
0470466d 288 if (cpu_has_feature(CPU_FTR_16M_PAGE))
3c726f8d
BH
289 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
290 sizeof(mmu_psize_defaults_gp));
291 found:
370a908d 292#ifndef CONFIG_DEBUG_PAGEALLOC
3c726f8d
BH
293 /*
294 * Pick a size for the linear mapping. Currently, we only support
295 * 16M, 1M and 4K which is the default
296 */
297 if (mmu_psize_defs[MMU_PAGE_16M].shift)
298 mmu_linear_psize = MMU_PAGE_16M;
299 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
300 mmu_linear_psize = MMU_PAGE_1M;
370a908d 301#endif /* CONFIG_DEBUG_PAGEALLOC */
3c726f8d 302
bf72aeba 303#ifdef CONFIG_PPC_64K_PAGES
3c726f8d
BH
304 /*
305 * Pick a size for the ordinary pages. Default is 4K, we support
bf72aeba
PM
306 * 64K for user mappings and vmalloc if supported by the processor.
307 * We only use 64k for ioremap if the processor
308 * (and firmware) support cache-inhibited large pages.
309 * If not, we use 4k and set mmu_ci_restrictions so that
310 * hash_page knows to switch processes that use cache-inhibited
311 * mappings to 4k pages.
3c726f8d 312 */
bf72aeba 313 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
3c726f8d 314 mmu_virtual_psize = MMU_PAGE_64K;
bf72aeba 315 mmu_vmalloc_psize = MMU_PAGE_64K;
370a908d
BH
316 if (mmu_linear_psize == MMU_PAGE_4K)
317 mmu_linear_psize = MMU_PAGE_64K;
bf72aeba
PM
318 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
319 mmu_io_psize = MMU_PAGE_64K;
320 else
321 mmu_ci_restrictions = 1;
322 }
370a908d 323#endif /* CONFIG_PPC_64K_PAGES */
3c726f8d 324
bf72aeba
PM
325 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
326 "virtual = %d, io = %d\n",
3c726f8d 327 mmu_psize_defs[mmu_linear_psize].shift,
bf72aeba
PM
328 mmu_psize_defs[mmu_virtual_psize].shift,
329 mmu_psize_defs[mmu_io_psize].shift);
3c726f8d
BH
330
331#ifdef CONFIG_HUGETLB_PAGE
332 /* Init large page size. Currently, we pick 16M or 1M depending
333 * on what is available
334 */
335 if (mmu_psize_defs[MMU_PAGE_16M].shift)
336 mmu_huge_psize = MMU_PAGE_16M;
7d24f0b8
DG
337 /* With 4k/4level pagetables, we can't (for now) cope with a
338 * huge page size < PMD_SIZE */
3c726f8d
BH
339 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
340 mmu_huge_psize = MMU_PAGE_1M;
341
342 /* Calculate HPAGE_SHIFT and sanity check it */
7d24f0b8
DG
343 if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
344 mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
3c726f8d
BH
345 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
346 else
347 HPAGE_SHIFT = 0; /* No huge pages dude ! */
348#endif /* CONFIG_HUGETLB_PAGE */
349}
350
351static int __init htab_dt_scan_pftsize(unsigned long node,
352 const char *uname, int depth,
353 void *data)
354{
355 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
356 u32 *prop;
357
358 /* We are scanning "cpu" nodes only */
359 if (type == NULL || strcmp(type, "cpu") != 0)
360 return 0;
361
362 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
363 if (prop != NULL) {
364 /* pft_size[0] is the NUMA CEC cookie */
365 ppc64_pft_size = prop[1];
366 return 1;
1da177e4 367 }
3c726f8d 368 return 0;
1da177e4
LT
369}
370
3c726f8d 371static unsigned long __init htab_get_table_size(void)
3eac8c69 372{
799d6046 373 unsigned long mem_size, rnd_mem_size, pteg_count;
3eac8c69 374
3c726f8d 375 /* If hash size isn't already provided by the platform, we try to
943ffb58 376 * retrieve it from the device-tree. If it's not there neither, we
3c726f8d 377 * calculate it now based on the total RAM size
3eac8c69 378 */
3c726f8d
BH
379 if (ppc64_pft_size == 0)
380 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
3eac8c69
PM
381 if (ppc64_pft_size)
382 return 1UL << ppc64_pft_size;
383
384 /* round mem_size up to next power of 2 */
799d6046
PM
385 mem_size = lmb_phys_mem_size();
386 rnd_mem_size = 1UL << __ilog2(mem_size);
387 if (rnd_mem_size < mem_size)
3eac8c69
PM
388 rnd_mem_size <<= 1;
389
390 /* # pages / 2 */
391 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
392
393 return pteg_count << 7;
394}
395
54b79248
MK
396#ifdef CONFIG_MEMORY_HOTPLUG
397void create_section_mapping(unsigned long start, unsigned long end)
398{
caf80e57 399 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
54b79248
MK
400 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
401 mmu_linear_psize));
402}
403#endif /* CONFIG_MEMORY_HOTPLUG */
404
7d0daae4
ME
405static inline void make_bl(unsigned int *insn_addr, void *func)
406{
407 unsigned long funcp = *((unsigned long *)func);
408 int offset = funcp - (unsigned long)insn_addr;
409
410 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
411 flush_icache_range((unsigned long)insn_addr, 4+
412 (unsigned long)insn_addr);
413}
414
415static void __init htab_finish_init(void)
416{
417 extern unsigned int *htab_call_hpte_insert1;
418 extern unsigned int *htab_call_hpte_insert2;
419 extern unsigned int *htab_call_hpte_remove;
420 extern unsigned int *htab_call_hpte_updatepp;
421
16c2d476 422#ifdef CONFIG_PPC_HAS_HASH_64K
7d0daae4
ME
423 extern unsigned int *ht64_call_hpte_insert1;
424 extern unsigned int *ht64_call_hpte_insert2;
425 extern unsigned int *ht64_call_hpte_remove;
426 extern unsigned int *ht64_call_hpte_updatepp;
427
428 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
429 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
430 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
431 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
5b825831 432#endif /* CONFIG_PPC_HAS_HASH_64K */
7d0daae4
ME
433
434 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
435 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
436 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
437 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
438}
439
1da177e4
LT
440void __init htab_initialize(void)
441{
337a7128 442 unsigned long table;
1da177e4
LT
443 unsigned long pteg_count;
444 unsigned long mode_rw;
1da177e4 445 unsigned long base = 0, size = 0;
3c726f8d
BH
446 int i;
447
1da177e4
LT
448 extern unsigned long tce_alloc_start, tce_alloc_end;
449
450 DBG(" -> htab_initialize()\n");
451
3c726f8d
BH
452 /* Initialize page sizes */
453 htab_init_page_sizes();
454
1da177e4
LT
455 /*
456 * Calculate the required size of the htab. We want the number of
457 * PTEGs to equal one half the number of real pages.
458 */
3c726f8d 459 htab_size_bytes = htab_get_table_size();
1da177e4
LT
460 pteg_count = htab_size_bytes >> 7;
461
1da177e4
LT
462 htab_hash_mask = pteg_count - 1;
463
57cfb814 464 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1da177e4
LT
465 /* Using a hypervisor which owns the htab */
466 htab_address = NULL;
467 _SDR1 = 0;
468 } else {
469 /* Find storage for the HPT. Must be contiguous in
470 * the absolute address space.
471 */
472 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
473
474 DBG("Hash table allocated at %lx, size: %lx\n", table,
475 htab_size_bytes);
476
1da177e4
LT
477 htab_address = abs_to_virt(table);
478
479 /* htab absolute addr + encoded htabsize */
480 _SDR1 = table + __ilog2(pteg_count) - 11;
481
482 /* Initialize the HPT with no entries */
483 memset((void *)table, 0, htab_size_bytes);
799d6046
PM
484
485 /* Set SDR1 */
486 mtspr(SPRN_SDR1, _SDR1);
1da177e4
LT
487 }
488
515bae9c 489 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
1da177e4 490
370a908d
BH
491#ifdef CONFIG_DEBUG_PAGEALLOC
492 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
493 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
494 1, lmb.rmo_size));
495 memset(linear_map_hash_slots, 0, linear_map_hash_count);
496#endif /* CONFIG_DEBUG_PAGEALLOC */
497
1da177e4
LT
498 /* On U3 based machines, we need to reserve the DART area and
499 * _NOT_ map it to avoid cache paradoxes as it's remapped non
500 * cacheable later on
501 */
1da177e4
LT
502
503 /* create bolted the linear mapping in the hash table */
504 for (i=0; i < lmb.memory.cnt; i++) {
b5666f70 505 base = (unsigned long)__va(lmb.memory.region[i].base);
1da177e4
LT
506 size = lmb.memory.region[i].size;
507
508 DBG("creating mapping for region: %lx : %lx\n", base, size);
509
510#ifdef CONFIG_U3_DART
511 /* Do not map the DART space. Fortunately, it will be aligned
3c726f8d
BH
512 * in such a way that it will not cross two lmb regions and
513 * will fit within a single 16Mb page.
514 * The DART space is assumed to be a full 16Mb region even if
515 * we only use 2Mb of that space. We will use more of it later
516 * for AGP GART. We have to use a full 16Mb large page.
1da177e4
LT
517 */
518 DBG("DART base: %lx\n", dart_tablebase);
519
520 if (dart_tablebase != 0 && dart_tablebase >= base
521 && dart_tablebase < (base + size)) {
caf80e57 522 unsigned long dart_table_end = dart_tablebase + 16 * MB;
1da177e4 523 if (base != dart_tablebase)
3c726f8d 524 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
caf80e57
ME
525 __pa(base), mode_rw,
526 mmu_linear_psize));
527 if ((base + size) > dart_table_end)
3c726f8d 528 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
caf80e57
ME
529 base + size,
530 __pa(dart_table_end),
3c726f8d
BH
531 mode_rw,
532 mmu_linear_psize));
1da177e4
LT
533 continue;
534 }
535#endif /* CONFIG_U3_DART */
caf80e57
ME
536 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
537 mode_rw, mmu_linear_psize));
3c726f8d 538 }
1da177e4
LT
539
540 /*
541 * If we have a memory_limit and we've allocated TCEs then we need to
542 * explicitly map the TCE area at the top of RAM. We also cope with the
543 * case that the TCEs start below memory_limit.
544 * tce_alloc_start/end are 16MB aligned so the mapping should work
545 * for either 4K or 16MB pages.
546 */
547 if (tce_alloc_start) {
b5666f70
ME
548 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
549 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
1da177e4
LT
550
551 if (base + size >= tce_alloc_start)
552 tce_alloc_start = base + size + 1;
553
caf80e57
ME
554 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
555 __pa(tce_alloc_start), mode_rw,
3c726f8d 556 mmu_linear_psize));
1da177e4
LT
557 }
558
7d0daae4
ME
559 htab_finish_init();
560
1da177e4
LT
561 DBG(" <- htab_initialize()\n");
562}
563#undef KB
564#undef MB
1da177e4 565
e597cb32 566void htab_initialize_secondary(void)
799d6046 567{
57cfb814 568 if (!firmware_has_feature(FW_FEATURE_LPAR))
799d6046
PM
569 mtspr(SPRN_SDR1, _SDR1);
570}
571
1da177e4
LT
572/*
573 * Called by asm hashtable.S for doing lazy icache flush
574 */
575unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
576{
577 struct page *page;
578
76c8e25b
BH
579 if (!pfn_valid(pte_pfn(pte)))
580 return pp;
581
1da177e4
LT
582 page = pte_page(pte);
583
584 /* page is dirty */
585 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
586 if (trap == 0x400) {
587 __flush_dcache_icache(page_address(page));
588 set_bit(PG_arch_1, &page->flags);
589 } else
3c726f8d 590 pp |= HPTE_R_N;
1da177e4
LT
591 }
592 return pp;
593}
594
721151d0
PM
595/*
596 * Demote a segment to using 4k pages.
597 * For now this makes the whole process use 4k pages.
598 */
721151d0 599#ifdef CONFIG_PPC_64K_PAGES
16f1c746
BH
600static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
601{
721151d0
PM
602 if (mm->context.user_psize == MMU_PAGE_4K)
603 return;
d0f13e3c
BH
604#ifdef CONFIG_PPC_MM_SLICES
605 slice_set_user_psize(mm, MMU_PAGE_4K);
606#else /* CONFIG_PPC_MM_SLICES */
721151d0
PM
607 mm->context.user_psize = MMU_PAGE_4K;
608 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
d0f13e3c
BH
609#endif /* CONFIG_PPC_MM_SLICES */
610
1e57ba8d 611#ifdef CONFIG_SPU_BASE
721151d0
PM
612 spu_flush_all_slbs(mm);
613#endif
721151d0 614}
16f1c746 615#endif /* CONFIG_PPC_64K_PAGES */
721151d0 616
1da177e4
LT
617/* Result code is:
618 * 0 - handled
619 * 1 - normal page fault
620 * -1 - critical hash insertion error
621 */
622int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
623{
624 void *pgdir;
625 unsigned long vsid;
626 struct mm_struct *mm;
627 pte_t *ptep;
1da177e4 628 cpumask_t tmp;
3c726f8d 629 int rc, user_region = 0, local = 0;
bf72aeba 630 int psize;
1da177e4 631
3c726f8d
BH
632 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
633 ea, access, trap);
1f8d419e 634
3c726f8d
BH
635 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
636 DBG_LOW(" out of pgtable range !\n");
637 return 1;
638 }
639
640 /* Get region & vsid */
1da177e4
LT
641 switch (REGION_ID(ea)) {
642 case USER_REGION_ID:
643 user_region = 1;
644 mm = current->mm;
3c726f8d
BH
645 if (! mm) {
646 DBG_LOW(" user region with no mm !\n");
1da177e4 647 return 1;
3c726f8d 648 }
1da177e4 649 vsid = get_vsid(mm->context.id, ea);
16c2d476
BH
650#ifdef CONFIG_PPC_MM_SLICES
651 psize = get_slice_psize(mm, ea);
652#else
bf72aeba 653 psize = mm->context.user_psize;
16c2d476 654#endif
1da177e4 655 break;
1da177e4 656 case VMALLOC_REGION_ID:
1da177e4
LT
657 mm = &init_mm;
658 vsid = get_kernel_vsid(ea);
bf72aeba
PM
659 if (ea < VMALLOC_END)
660 psize = mmu_vmalloc_psize;
661 else
662 psize = mmu_io_psize;
1da177e4 663 break;
1da177e4
LT
664 default:
665 /* Not a valid range
666 * Send the problem up to do_page_fault
667 */
668 return 1;
1da177e4 669 }
3c726f8d 670 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
1da177e4 671
3c726f8d 672 /* Get pgdir */
1da177e4 673 pgdir = mm->pgd;
1da177e4
LT
674 if (pgdir == NULL)
675 return 1;
676
3c726f8d 677 /* Check CPU locality */
1da177e4
LT
678 tmp = cpumask_of_cpu(smp_processor_id());
679 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
680 local = 1;
681
d0f13e3c 682#ifdef CONFIG_HUGETLB_PAGE
3c726f8d 683 /* Handle hugepage regions */
16c2d476 684 if (HPAGE_SHIFT && psize == mmu_huge_psize) {
3c726f8d 685 DBG_LOW(" -> huge page !\n");
cbf52afd 686 return hash_huge_page(mm, access, ea, vsid, local, trap);
3c726f8d 687 }
d0f13e3c 688#endif /* CONFIG_HUGETLB_PAGE */
3c726f8d 689
16c2d476
BH
690#ifndef CONFIG_PPC_64K_PAGES
691 /* If we use 4K pages and our psize is not 4K, then we are hitting
692 * a special driver mapping, we need to align the address before
693 * we fetch the PTE
694 */
695 if (psize != MMU_PAGE_4K)
696 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
697#endif /* CONFIG_PPC_64K_PAGES */
698
3c726f8d
BH
699 /* Get PTE and page size from page tables */
700 ptep = find_linux_pte(pgdir, ea);
701 if (ptep == NULL || !pte_present(*ptep)) {
702 DBG_LOW(" no PTE !\n");
703 return 1;
704 }
705
706#ifndef CONFIG_PPC_64K_PAGES
707 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
708#else
709 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
710 pte_val(*(ptep + PTRS_PER_PTE)));
711#endif
712 /* Pre-check access permissions (will be re-checked atomically
713 * in __hash_page_XX but this pre-check is a fast path
714 */
715 if (access & ~pte_val(*ptep)) {
716 DBG_LOW(" no access !\n");
717 return 1;
1da177e4
LT
718 }
719
3c726f8d 720 /* Do actual hashing */
16c2d476 721#ifdef CONFIG_PPC_64K_PAGES
721151d0
PM
722 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
723 if (pte_val(*ptep) & _PAGE_4K_PFN) {
724 demote_segment_4k(mm, ea);
725 psize = MMU_PAGE_4K;
726 }
727
16f1c746
BH
728 /* If this PTE is non-cacheable and we have restrictions on
729 * using non cacheable large pages, then we switch to 4k
730 */
731 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
732 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
733 if (user_region) {
734 demote_segment_4k(mm, ea);
735 psize = MMU_PAGE_4K;
736 } else if (ea < VMALLOC_END) {
737 /*
738 * some driver did a non-cacheable mapping
739 * in vmalloc space, so switch vmalloc
740 * to 4k pages
741 */
742 printk(KERN_ALERT "Reducing vmalloc segment "
743 "to 4kB pages because of "
744 "non-cacheable mapping\n");
745 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
1e57ba8d 746#ifdef CONFIG_SPU_BASE
94b2a439
BH
747 spu_flush_all_slbs(mm);
748#endif
bf72aeba 749 }
16f1c746
BH
750 }
751 if (user_region) {
752 if (psize != get_paca()->context.user_psize) {
753 get_paca()->context.user_psize =
754 mm->context.user_psize;
bf72aeba
PM
755 slb_flush_and_rebolt();
756 }
16f1c746
BH
757 } else if (get_paca()->vmalloc_sllp !=
758 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
759 get_paca()->vmalloc_sllp =
760 mmu_psize_defs[mmu_vmalloc_psize].sllp;
67439b76 761 slb_vmalloc_update();
bf72aeba 762 }
16c2d476 763#endif /* CONFIG_PPC_64K_PAGES */
16f1c746 764
16c2d476 765#ifdef CONFIG_PPC_HAS_HASH_64K
bf72aeba 766 if (psize == MMU_PAGE_64K)
3c726f8d
BH
767 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
768 else
16c2d476 769#endif /* CONFIG_PPC_HAS_HASH_64K */
3c726f8d 770 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
3c726f8d
BH
771
772#ifndef CONFIG_PPC_64K_PAGES
773 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
774#else
775 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
776 pte_val(*(ptep + PTRS_PER_PTE)));
777#endif
778 DBG_LOW(" -> rc=%d\n", rc);
779 return rc;
1da177e4 780}
67207b96 781EXPORT_SYMBOL_GPL(hash_page);
1da177e4 782
3c726f8d
BH
783void hash_preload(struct mm_struct *mm, unsigned long ea,
784 unsigned long access, unsigned long trap)
1da177e4 785{
3c726f8d
BH
786 unsigned long vsid;
787 void *pgdir;
788 pte_t *ptep;
789 cpumask_t mask;
790 unsigned long flags;
791 int local = 0;
792
d0f13e3c
BH
793 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
794
795#ifdef CONFIG_PPC_MM_SLICES
796 /* We only prefault standard pages for now */
2b02d139 797 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
3c726f8d 798 return;
d0f13e3c 799#endif
3c726f8d
BH
800
801 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
802 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1da177e4 803
16f1c746 804 /* Get Linux PTE if available */
3c726f8d
BH
805 pgdir = mm->pgd;
806 if (pgdir == NULL)
807 return;
808 ptep = find_linux_pte(pgdir, ea);
809 if (!ptep)
810 return;
16f1c746
BH
811
812#ifdef CONFIG_PPC_64K_PAGES
813 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
814 * a 64K kernel), then we don't preload, hash_page() will take
815 * care of it once we actually try to access the page.
816 * That way we don't have to duplicate all of the logic for segment
817 * page size demotion here
818 */
819 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
820 return;
821#endif /* CONFIG_PPC_64K_PAGES */
822
823 /* Get VSID */
3c726f8d
BH
824 vsid = get_vsid(mm->context.id, ea);
825
16c2d476 826 /* Hash doesn't like irqs */
3c726f8d 827 local_irq_save(flags);
16c2d476
BH
828
829 /* Is that local to this CPU ? */
3c726f8d
BH
830 mask = cpumask_of_cpu(smp_processor_id());
831 if (cpus_equal(mm->cpu_vm_mask, mask))
832 local = 1;
16c2d476
BH
833
834 /* Hash it in */
835#ifdef CONFIG_PPC_HAS_HASH_64K
bf72aeba 836 if (mm->context.user_psize == MMU_PAGE_64K)
3c726f8d 837 __hash_page_64K(ea, access, vsid, ptep, trap, local);
1da177e4 838 else
5b825831 839#endif /* CONFIG_PPC_HAS_HASH_64K */
16c2d476
BH
840 __hash_page_4K(ea, access, vsid, ptep, trap, local);
841
3c726f8d
BH
842 local_irq_restore(flags);
843}
844
845void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
846{
847 unsigned long hash, index, shift, hidx, slot;
848
849 DBG_LOW("flush_hash_page(va=%016x)\n", va);
850 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
851 hash = hpt_hash(va, shift);
852 hidx = __rpte_to_hidx(pte, index);
853 if (hidx & _PTEIDX_SECONDARY)
854 hash = ~hash;
855 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
856 slot += hidx & _PTEIDX_GROUP_IX;
857 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
858 ppc_md.hpte_invalidate(slot, va, psize, local);
859 } pte_iterate_hashed_end();
1da177e4
LT
860}
861
61b1a942 862void flush_hash_range(unsigned long number, int local)
1da177e4 863{
3c726f8d 864 if (ppc_md.flush_hash_range)
61b1a942 865 ppc_md.flush_hash_range(number, local);
3c726f8d 866 else {
1da177e4 867 int i;
61b1a942
BH
868 struct ppc64_tlb_batch *batch =
869 &__get_cpu_var(ppc64_tlb_batch);
1da177e4
LT
870
871 for (i = 0; i < number; i++)
3c726f8d
BH
872 flush_hash_page(batch->vaddr[i], batch->pte[i],
873 batch->psize, local);
1da177e4
LT
874 }
875}
876
1da177e4
LT
877/*
878 * low_hash_fault is called when we the low level hash code failed
879 * to instert a PTE due to an hypervisor error
880 */
881void low_hash_fault(struct pt_regs *regs, unsigned long address)
882{
883 if (user_mode(regs)) {
884 siginfo_t info;
885
886 info.si_signo = SIGBUS;
887 info.si_errno = 0;
888 info.si_code = BUS_ADRERR;
889 info.si_addr = (void __user *)address;
890 force_sig_info(SIGBUS, &info, current);
891 return;
892 }
893 bad_page_fault(regs, address, SIGBUS);
894}
370a908d
BH
895
896#ifdef CONFIG_DEBUG_PAGEALLOC
897static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
898{
899 unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr);
900 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
901 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
902 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
903 int ret;
904
905 hash = hpt_hash(va, PAGE_SHIFT);
906 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
907
908 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
909 mode, HPTE_V_BOLTED, mmu_linear_psize);
910 BUG_ON (ret < 0);
911 spin_lock(&linear_map_hash_lock);
912 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
913 linear_map_hash_slots[lmi] = ret | 0x80;
914 spin_unlock(&linear_map_hash_lock);
915}
916
917static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
918{
919 unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr);
920 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
921
922 hash = hpt_hash(va, PAGE_SHIFT);
923 spin_lock(&linear_map_hash_lock);
924 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
925 hidx = linear_map_hash_slots[lmi] & 0x7f;
926 linear_map_hash_slots[lmi] = 0;
927 spin_unlock(&linear_map_hash_lock);
928 if (hidx & _PTEIDX_SECONDARY)
929 hash = ~hash;
930 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
931 slot += hidx & _PTEIDX_GROUP_IX;
932 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0);
933}
934
935void kernel_map_pages(struct page *page, int numpages, int enable)
936{
937 unsigned long flags, vaddr, lmi;
938 int i;
939
940 local_irq_save(flags);
941 for (i = 0; i < numpages; i++, page++) {
942 vaddr = (unsigned long)page_address(page);
943 lmi = __pa(vaddr) >> PAGE_SHIFT;
944 if (lmi >= linear_map_hash_count)
945 continue;
946 if (enable)
947 kernel_map_linear_page(vaddr, lmi);
948 else
949 kernel_unmap_linear_page(vaddr, lmi);
950 }
951 local_irq_restore(flags);
952}
953#endif /* CONFIG_DEBUG_PAGEALLOC */
This page took 0.259085 seconds and 5 git commands to generate.