powerpc: Make the irq reverse mapping radix tree lockless
[deliverable/linux.git] / arch / powerpc / mm / hugetlbpage.c
CommitLineData
1da177e4
LT
1/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
1da177e4
LT
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/sysctl.h>
18#include <asm/mman.h>
19#include <asm/pgalloc.h>
20#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22#include <asm/mmu_context.h>
23#include <asm/machdep.h>
24#include <asm/cputable.h>
94b2a439 25#include <asm/spu.h>
1da177e4 26
91224346
JT
27#define PAGE_SHIFT_64K 16
28#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34
4ec161cf 30
c594adad
DG
31#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
ec4b2c0c
JT
33#define MAX_NUMBER_GPAGES 1024
34
35/* Tracks the 16G pages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. */
37static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
38static unsigned nr_gpages;
c594adad 39
0d9ea754
JT
40/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
41 * stored for the huge page sizes that are valid.
42 */
43unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
44
45#define hugepte_shift mmu_huge_psizes
46#define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize])
47#define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize])
48
49#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \
50 + hugepte_shift[psize])
51#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize))
52#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1))
f10a04c0 53
0d9ea754
JT
54/* Subtract one from array size because we don't need a cache for 4K since
55 * is not a huge page size */
56#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \
57 + psize-1])
58#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
f10a04c0 59
0d9ea754
JT
60static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
61 "unused_4K", "hugepte_cache_64K", "unused_64K_AP",
62 "hugepte_cache_1M", "hugepte_cache_16M", "hugepte_cache_16G"
63};
f10a04c0
DG
64
65/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
66 * will choke on pointers to hugepte tables, which is handy for
67 * catching screwups early. */
68#define HUGEPD_OK 0x1
69
70typedef struct { unsigned long pd; } hugepd_t;
71
72#define hugepd_none(hpd) ((hpd).pd == 0)
73
0d9ea754
JT
74static inline int shift_to_mmu_psize(unsigned int shift)
75{
76 switch (shift) {
77#ifndef CONFIG_PPC_64K_PAGES
78 case PAGE_SHIFT_64K:
79 return MMU_PAGE_64K;
80#endif
81 case PAGE_SHIFT_16M:
82 return MMU_PAGE_16M;
83 case PAGE_SHIFT_16G:
84 return MMU_PAGE_16G;
85 }
86 return -1;
87}
88
89static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
90{
91 if (mmu_psize_defs[mmu_psize].shift)
92 return mmu_psize_defs[mmu_psize].shift;
93 BUG();
94}
95
f10a04c0
DG
96static inline pte_t *hugepd_page(hugepd_t hpd)
97{
98 BUG_ON(!(hpd.pd & HUGEPD_OK));
99 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
100}
101
0d9ea754
JT
102static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
103 struct hstate *hstate)
f10a04c0 104{
0d9ea754
JT
105 unsigned int shift = huge_page_shift(hstate);
106 int psize = shift_to_mmu_psize(shift);
107 unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1));
f10a04c0
DG
108 pte_t *dir = hugepd_page(*hpdp);
109
110 return dir + idx;
111}
112
113static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
0d9ea754 114 unsigned long address, unsigned int psize)
f10a04c0 115{
51cc5068 116 pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize),
f10a04c0
DG
117 GFP_KERNEL|__GFP_REPEAT);
118
119 if (! new)
120 return -ENOMEM;
121
122 spin_lock(&mm->page_table_lock);
123 if (!hugepd_none(*hpdp))
0d9ea754 124 kmem_cache_free(huge_pgtable_cache(psize), new);
f10a04c0
DG
125 else
126 hpdp->pd = (unsigned long)new | HUGEPD_OK;
127 spin_unlock(&mm->page_table_lock);
128 return 0;
129}
130
4ec161cf
JT
131/* Base page size affects how we walk hugetlb page tables */
132#ifdef CONFIG_PPC_64K_PAGES
0d9ea754
JT
133#define hpmd_offset(pud, addr, h) pmd_offset(pud, addr)
134#define hpmd_alloc(mm, pud, addr, h) pmd_alloc(mm, pud, addr)
4ec161cf
JT
135#else
136static inline
0d9ea754 137pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
4ec161cf 138{
0d9ea754 139 if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
4ec161cf
JT
140 return pmd_offset(pud, addr);
141 else
142 return (pmd_t *) pud;
143}
144static inline
0d9ea754
JT
145pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
146 struct hstate *hstate)
4ec161cf 147{
0d9ea754 148 if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
4ec161cf
JT
149 return pmd_alloc(mm, pud, addr);
150 else
151 return (pmd_t *) pud;
152}
153#endif
154
658013e9
JT
155/* Build list of addresses of gigantic pages. This function is used in early
156 * boot before the buddy or bootmem allocator is setup.
157 */
158void add_gpage(unsigned long addr, unsigned long page_size,
159 unsigned long number_of_pages)
160{
161 if (!addr)
162 return;
163 while (number_of_pages > 0) {
164 gpage_freearray[nr_gpages] = addr;
165 nr_gpages++;
166 number_of_pages--;
167 addr += page_size;
168 }
169}
170
ec4b2c0c 171/* Moves the gigantic page addresses from the temporary list to the
0d9ea754
JT
172 * huge_boot_pages list.
173 */
174int alloc_bootmem_huge_page(struct hstate *hstate)
ec4b2c0c
JT
175{
176 struct huge_bootmem_page *m;
177 if (nr_gpages == 0)
178 return 0;
179 m = phys_to_virt(gpage_freearray[--nr_gpages]);
180 gpage_freearray[nr_gpages] = 0;
181 list_add(&m->list, &huge_boot_pages);
0d9ea754 182 m->hstate = hstate;
ec4b2c0c
JT
183 return 1;
184}
185
186
e28f7faf
DG
187/* Modelled after find_linux_pte() */
188pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
1da177e4 189{
e28f7faf
DG
190 pgd_t *pg;
191 pud_t *pu;
4ec161cf 192 pmd_t *pm;
1da177e4 193
0d9ea754
JT
194 unsigned int psize;
195 unsigned int shift;
196 unsigned long sz;
197 struct hstate *hstate;
198 psize = get_slice_psize(mm, addr);
199 shift = mmu_psize_to_shift(psize);
200 sz = ((1UL) << shift);
201 hstate = size_to_hstate(sz);
1da177e4 202
0d9ea754 203 addr &= hstate->mask;
e28f7faf
DG
204
205 pg = pgd_offset(mm, addr);
206 if (!pgd_none(*pg)) {
207 pu = pud_offset(pg, addr);
208 if (!pud_none(*pu)) {
0d9ea754 209 pm = hpmd_offset(pu, addr, hstate);
f10a04c0 210 if (!pmd_none(*pm))
0d9ea754
JT
211 return hugepte_offset((hugepd_t *)pm, addr,
212 hstate);
e28f7faf
DG
213 }
214 }
1da177e4 215
e28f7faf 216 return NULL;
1da177e4
LT
217}
218
a5516438
AK
219pte_t *huge_pte_alloc(struct mm_struct *mm,
220 unsigned long addr, unsigned long sz)
1da177e4 221{
e28f7faf
DG
222 pgd_t *pg;
223 pud_t *pu;
4ec161cf 224 pmd_t *pm;
f10a04c0 225 hugepd_t *hpdp = NULL;
0d9ea754
JT
226 struct hstate *hstate;
227 unsigned int psize;
228 hstate = size_to_hstate(sz);
1da177e4 229
0d9ea754
JT
230 psize = get_slice_psize(mm, addr);
231 BUG_ON(!mmu_huge_psizes[psize]);
1da177e4 232
0d9ea754 233 addr &= hstate->mask;
1da177e4 234
e28f7faf
DG
235 pg = pgd_offset(mm, addr);
236 pu = pud_alloc(mm, pg, addr);
1da177e4 237
e28f7faf 238 if (pu) {
0d9ea754 239 pm = hpmd_alloc(mm, pu, addr, hstate);
f10a04c0
DG
240 if (pm)
241 hpdp = (hugepd_t *)pm;
f10a04c0
DG
242 }
243
244 if (! hpdp)
245 return NULL;
246
0d9ea754 247 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize))
f10a04c0
DG
248 return NULL;
249
0d9ea754 250 return hugepte_offset(hpdp, addr, hstate);
f10a04c0
DG
251}
252
39dde65c
CK
253int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
254{
255 return 0;
256}
257
0d9ea754
JT
258static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
259 unsigned int psize)
f10a04c0
DG
260{
261 pte_t *hugepte = hugepd_page(*hpdp);
262
263 hpdp->pd = 0;
264 tlb->need_flush = 1;
0d9ea754
JT
265 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte,
266 HUGEPTE_CACHE_NUM+psize-1,
c9169f87 267 PGF_CACHENUM_MASK));
f10a04c0
DG
268}
269
f10a04c0
DG
270static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
271 unsigned long addr, unsigned long end,
0d9ea754
JT
272 unsigned long floor, unsigned long ceiling,
273 unsigned int psize)
f10a04c0
DG
274{
275 pmd_t *pmd;
276 unsigned long next;
277 unsigned long start;
278
279 start = addr;
280 pmd = pmd_offset(pud, addr);
281 do {
282 next = pmd_addr_end(addr, end);
283 if (pmd_none(*pmd))
284 continue;
0d9ea754 285 free_hugepte_range(tlb, (hugepd_t *)pmd, psize);
f10a04c0
DG
286 } while (pmd++, addr = next, addr != end);
287
288 start &= PUD_MASK;
289 if (start < floor)
290 return;
291 if (ceiling) {
292 ceiling &= PUD_MASK;
293 if (!ceiling)
294 return;
1da177e4 295 }
f10a04c0
DG
296 if (end - 1 > ceiling - 1)
297 return;
1da177e4 298
f10a04c0
DG
299 pmd = pmd_offset(pud, start);
300 pud_clear(pud);
301 pmd_free_tlb(tlb, pmd);
302}
f10a04c0
DG
303
304static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
305 unsigned long addr, unsigned long end,
306 unsigned long floor, unsigned long ceiling)
307{
308 pud_t *pud;
309 unsigned long next;
310 unsigned long start;
0d9ea754
JT
311 unsigned int shift;
312 unsigned int psize = get_slice_psize(tlb->mm, addr);
313 shift = mmu_psize_to_shift(psize);
f10a04c0
DG
314
315 start = addr;
316 pud = pud_offset(pgd, addr);
317 do {
318 next = pud_addr_end(addr, end);
319#ifdef CONFIG_PPC_64K_PAGES
320 if (pud_none_or_clear_bad(pud))
321 continue;
0d9ea754
JT
322 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling,
323 psize);
f10a04c0 324#else
0d9ea754 325 if (shift == PAGE_SHIFT_64K) {
4ec161cf
JT
326 if (pud_none_or_clear_bad(pud))
327 continue;
0d9ea754
JT
328 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
329 ceiling, psize);
4ec161cf
JT
330 } else {
331 if (pud_none(*pud))
332 continue;
0d9ea754 333 free_hugepte_range(tlb, (hugepd_t *)pud, psize);
4ec161cf 334 }
f10a04c0
DG
335#endif
336 } while (pud++, addr = next, addr != end);
337
338 start &= PGDIR_MASK;
339 if (start < floor)
340 return;
341 if (ceiling) {
342 ceiling &= PGDIR_MASK;
343 if (!ceiling)
344 return;
345 }
346 if (end - 1 > ceiling - 1)
347 return;
348
349 pud = pud_offset(pgd, start);
350 pgd_clear(pgd);
351 pud_free_tlb(tlb, pud);
352}
353
354/*
355 * This function frees user-level page tables of a process.
356 *
357 * Must be called with pagetable lock held.
358 */
42b77728 359void hugetlb_free_pgd_range(struct mmu_gather *tlb,
f10a04c0
DG
360 unsigned long addr, unsigned long end,
361 unsigned long floor, unsigned long ceiling)
362{
363 pgd_t *pgd;
364 unsigned long next;
365 unsigned long start;
366
367 /*
368 * Comments below take from the normal free_pgd_range(). They
369 * apply here too. The tests against HUGEPD_MASK below are
370 * essential, because we *don't* test for this at the bottom
371 * level. Without them we'll attempt to free a hugepte table
372 * when we unmap just part of it, even if there are other
373 * active mappings using it.
374 *
375 * The next few lines have given us lots of grief...
376 *
377 * Why are we testing HUGEPD* at this top level? Because
378 * often there will be no work to do at all, and we'd prefer
379 * not to go all the way down to the bottom just to discover
380 * that.
381 *
382 * Why all these "- 1"s? Because 0 represents both the bottom
383 * of the address space and the top of it (using -1 for the
384 * top wouldn't help much: the masks would do the wrong thing).
385 * The rule is that addr 0 and floor 0 refer to the bottom of
386 * the address space, but end 0 and ceiling 0 refer to the top
387 * Comparisons need to use "end - 1" and "ceiling - 1" (though
388 * that end 0 case should be mythical).
389 *
390 * Wherever addr is brought up or ceiling brought down, we
391 * must be careful to reject "the opposite 0" before it
392 * confuses the subsequent tests. But what about where end is
393 * brought down by HUGEPD_SIZE below? no, end can't go down to
394 * 0 there.
395 *
396 * Whereas we round start (addr) and ceiling down, by different
397 * masks at different levels, in order to test whether a table
398 * now has no other vmas using it, so can be freed, we don't
399 * bother to round floor or end up - the tests don't need that.
400 */
0d9ea754 401 unsigned int psize = get_slice_psize(tlb->mm, addr);
f10a04c0 402
0d9ea754 403 addr &= HUGEPD_MASK(psize);
f10a04c0 404 if (addr < floor) {
0d9ea754 405 addr += HUGEPD_SIZE(psize);
f10a04c0
DG
406 if (!addr)
407 return;
408 }
409 if (ceiling) {
0d9ea754 410 ceiling &= HUGEPD_MASK(psize);
f10a04c0
DG
411 if (!ceiling)
412 return;
413 }
414 if (end - 1 > ceiling - 1)
0d9ea754 415 end -= HUGEPD_SIZE(psize);
f10a04c0
DG
416 if (addr > end - 1)
417 return;
418
419 start = addr;
42b77728 420 pgd = pgd_offset(tlb->mm, addr);
f10a04c0 421 do {
0d9ea754
JT
422 psize = get_slice_psize(tlb->mm, addr);
423 BUG_ON(!mmu_huge_psizes[psize]);
f10a04c0
DG
424 next = pgd_addr_end(addr, end);
425 if (pgd_none_or_clear_bad(pgd))
426 continue;
42b77728 427 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
f10a04c0 428 } while (pgd++, addr = next, addr != end);
1da177e4
LT
429}
430
e28f7faf
DG
431void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
432 pte_t *ptep, pte_t pte)
433{
e28f7faf 434 if (pte_present(*ptep)) {
3c726f8d 435 /* We open-code pte_clear because we need to pass the right
a741e679
BH
436 * argument to hpte_need_flush (huge / !huge). Might not be
437 * necessary anymore if we make hpte_need_flush() get the
438 * page size from the slices
3c726f8d 439 */
0d9ea754
JT
440 unsigned int psize = get_slice_psize(mm, addr);
441 unsigned int shift = mmu_psize_to_shift(psize);
442 unsigned long sz = ((1UL) << shift);
443 struct hstate *hstate = size_to_hstate(sz);
444 pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
e28f7faf 445 }
3c726f8d 446 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
1da177e4
LT
447}
448
e28f7faf
DG
449pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
450 pte_t *ptep)
1da177e4 451{
a741e679 452 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
e28f7faf 453 return __pte(old);
1da177e4
LT
454}
455
1da177e4
LT
456struct page *
457follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
458{
459 pte_t *ptep;
460 struct page *page;
0d9ea754 461 unsigned int mmu_psize = get_slice_psize(mm, address);
1da177e4 462
0d9ea754
JT
463 /* Verify it is a huge page else bail. */
464 if (!mmu_huge_psizes[mmu_psize])
1da177e4
LT
465 return ERR_PTR(-EINVAL);
466
467 ptep = huge_pte_offset(mm, address);
468 page = pte_page(*ptep);
0d9ea754
JT
469 if (page) {
470 unsigned int shift = mmu_psize_to_shift(mmu_psize);
471 unsigned long sz = ((1UL) << shift);
472 page += (address % sz) / PAGE_SIZE;
473 }
1da177e4
LT
474
475 return page;
476}
477
478int pmd_huge(pmd_t pmd)
479{
480 return 0;
481}
482
ceb86879
AK
483int pud_huge(pud_t pud)
484{
485 return 0;
486}
487
1da177e4
LT
488struct page *
489follow_huge_pmd(struct mm_struct *mm, unsigned long address,
490 pmd_t *pmd, int write)
491{
492 BUG();
493 return NULL;
494}
495
1da177e4
LT
496
497unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
498 unsigned long len, unsigned long pgoff,
499 unsigned long flags)
500{
0d9ea754
JT
501 struct hstate *hstate = hstate_file(file);
502 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
503 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
1da177e4
LT
504}
505
cbf52afd
DG
506/*
507 * Called by asm hashtable.S for doing lazy icache flush
508 */
509static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
0d9ea754 510 pte_t pte, int trap, unsigned long sz)
cbf52afd
DG
511{
512 struct page *page;
513 int i;
514
515 if (!pfn_valid(pte_pfn(pte)))
516 return rflags;
517
518 page = pte_page(pte);
519
520 /* page is dirty */
521 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
522 if (trap == 0x400) {
0d9ea754 523 for (i = 0; i < (sz / PAGE_SIZE); i++)
cbf52afd
DG
524 __flush_dcache_icache(page_address(page+i));
525 set_bit(PG_arch_1, &page->flags);
526 } else {
527 rflags |= HPTE_R_N;
528 }
529 }
530 return rflags;
531}
532
1da177e4 533int hash_huge_page(struct mm_struct *mm, unsigned long access,
cbf52afd
DG
534 unsigned long ea, unsigned long vsid, int local,
535 unsigned long trap)
1da177e4
LT
536{
537 pte_t *ptep;
3c726f8d 538 unsigned long old_pte, new_pte;
0d9ea754 539 unsigned long va, rflags, pa, sz;
1da177e4
LT
540 long slot;
541 int err = 1;
1189be65 542 int ssize = user_segment_size(ea);
0d9ea754
JT
543 unsigned int mmu_psize;
544 int shift;
545 mmu_psize = get_slice_psize(mm, ea);
1da177e4 546
0d9ea754
JT
547 if (!mmu_huge_psizes[mmu_psize])
548 goto out;
1da177e4
LT
549 ptep = huge_pte_offset(mm, ea);
550
551 /* Search the Linux page table for a match with va */
1189be65 552 va = hpt_va(ea, vsid, ssize);
1da177e4
LT
553
554 /*
555 * If no pte found or not present, send the problem up to
556 * do_page_fault
557 */
558 if (unlikely(!ptep || pte_none(*ptep)))
559 goto out;
560
1da177e4
LT
561 /*
562 * Check the user's access rights to the page. If access should be
563 * prevented then send the problem up to do_page_fault.
564 */
565 if (unlikely(access & ~pte_val(*ptep)))
566 goto out;
567 /*
568 * At this point, we have a pte (old_pte) which can be used to build
569 * or update an HPTE. There are 2 cases:
570 *
571 * 1. There is a valid (present) pte with no associated HPTE (this is
572 * the most common case)
573 * 2. There is a valid (present) pte with an associated HPTE. The
574 * current values of the pp bits in the HPTE prevent access
575 * because we are doing software DIRTY bit management and the
576 * page is currently not DIRTY.
577 */
578
579
3c726f8d
BH
580 do {
581 old_pte = pte_val(*ptep);
582 if (old_pte & _PAGE_BUSY)
583 goto out;
41743a4e 584 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
3c726f8d
BH
585 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
586 old_pte, new_pte));
587
588 rflags = 0x2 | (!(new_pte & _PAGE_RW));
1da177e4 589 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
3c726f8d 590 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
0d9ea754
JT
591 shift = mmu_psize_to_shift(mmu_psize);
592 sz = ((1UL) << shift);
cbf52afd
DG
593 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
594 /* No CPU has hugepages but lacks no execute, so we
595 * don't need to worry about that case */
596 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
0d9ea754 597 trap, sz);
1da177e4
LT
598
599 /* Check if pte already has an hpte (case 2) */
3c726f8d 600 if (unlikely(old_pte & _PAGE_HASHPTE)) {
1da177e4
LT
601 /* There MIGHT be an HPTE for this pte */
602 unsigned long hash, slot;
603
0d9ea754 604 hash = hpt_hash(va, shift, ssize);
3c726f8d 605 if (old_pte & _PAGE_F_SECOND)
1da177e4
LT
606 hash = ~hash;
607 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
3c726f8d 608 slot += (old_pte & _PAGE_F_GIX) >> 12;
1da177e4 609
0d9ea754 610 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
1189be65 611 ssize, local) == -1)
3c726f8d 612 old_pte &= ~_PAGE_HPTEFLAGS;
1da177e4
LT
613 }
614
3c726f8d 615 if (likely(!(old_pte & _PAGE_HASHPTE))) {
0d9ea754 616 unsigned long hash = hpt_hash(va, shift, ssize);
1da177e4
LT
617 unsigned long hpte_group;
618
3c726f8d 619 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
1da177e4
LT
620
621repeat:
622 hpte_group = ((hash & htab_hash_mask) *
623 HPTES_PER_GROUP) & ~0x7UL;
624
3c726f8d 625 /* clear HPTE slot informations in new PTE */
41743a4e
BH
626#ifdef CONFIG_PPC_64K_PAGES
627 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
628#else
3c726f8d 629 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
41743a4e 630#endif
1da177e4 631 /* Add in WIMG bits */
87e9ab13
DK
632 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
633 _PAGE_COHERENT | _PAGE_GUARDED));
1da177e4 634
3c726f8d
BH
635 /* Insert into the hash table, primary slot */
636 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
0d9ea754 637 mmu_psize, ssize);
1da177e4
LT
638
639 /* Primary is full, try the secondary */
640 if (unlikely(slot == -1)) {
1da177e4
LT
641 hpte_group = ((~hash & htab_hash_mask) *
642 HPTES_PER_GROUP) & ~0x7UL;
3c726f8d 643 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
67b10813 644 HPTE_V_SECONDARY,
0d9ea754 645 mmu_psize, ssize);
1da177e4
LT
646 if (slot == -1) {
647 if (mftb() & 0x1)
67b10813
BH
648 hpte_group = ((hash & htab_hash_mask) *
649 HPTES_PER_GROUP)&~0x7UL;
1da177e4
LT
650
651 ppc_md.hpte_remove(hpte_group);
652 goto repeat;
653 }
654 }
655
656 if (unlikely(slot == -2))
657 panic("hash_huge_page: pte_insert failed\n");
658
d649bd7b 659 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
1da177e4
LT
660 }
661
3c726f8d 662 /*
01edcd89 663 * No need to use ldarx/stdcx here
3c726f8d
BH
664 */
665 *ptep = __pte(new_pte & ~_PAGE_BUSY);
666
1da177e4
LT
667 err = 0;
668
669 out:
1da177e4
LT
670 return err;
671}
f10a04c0 672
4ec161cf
JT
673void set_huge_psize(int psize)
674{
675 /* Check that it is a page size supported by the hardware and
676 * that it fits within pagetable limits. */
91224346
JT
677 if (mmu_psize_defs[psize].shift &&
678 mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
4ec161cf 679 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
91224346
JT
680 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
681 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
0d9ea754
JT
682 /* Return if huge page size has already been setup or is the
683 * same as the base page size. */
684 if (mmu_huge_psizes[psize] ||
685 mmu_psize_defs[psize].shift == PAGE_SHIFT)
91224346 686 return;
0d9ea754 687 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
91224346 688
0d9ea754 689 switch (mmu_psize_defs[psize].shift) {
91224346
JT
690 case PAGE_SHIFT_64K:
691 /* We only allow 64k hpages with 4k base page,
692 * which was checked above, and always put them
693 * at the PMD */
0d9ea754 694 hugepte_shift[psize] = PMD_SHIFT;
91224346
JT
695 break;
696 case PAGE_SHIFT_16M:
697 /* 16M pages can be at two different levels
698 * of pagestables based on base page size */
699 if (PAGE_SHIFT == PAGE_SHIFT_64K)
0d9ea754 700 hugepte_shift[psize] = PMD_SHIFT;
91224346 701 else /* 4k base page */
0d9ea754 702 hugepte_shift[psize] = PUD_SHIFT;
91224346
JT
703 break;
704 case PAGE_SHIFT_16G:
705 /* 16G pages are always at PGD level */
0d9ea754 706 hugepte_shift[psize] = PGDIR_SHIFT;
91224346
JT
707 break;
708 }
0d9ea754 709 hugepte_shift[psize] -= mmu_psize_defs[psize].shift;
4ec161cf 710 } else
0d9ea754 711 hugepte_shift[psize] = 0;
4ec161cf
JT
712}
713
714static int __init hugepage_setup_sz(char *str)
715{
716 unsigned long long size;
0d9ea754 717 int mmu_psize;
4ec161cf
JT
718 int shift;
719
720 size = memparse(str, &str);
721
722 shift = __ffs(size);
0d9ea754
JT
723 mmu_psize = shift_to_mmu_psize(shift);
724 if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
4ec161cf
JT
725 set_huge_psize(mmu_psize);
726 else
727 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
728
729 return 1;
730}
731__setup("hugepagesz=", hugepage_setup_sz);
732
f10a04c0
DG
733static int __init hugetlbpage_init(void)
734{
0d9ea754
JT
735 unsigned int psize;
736
f10a04c0
DG
737 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
738 return -ENODEV;
00df438e 739
0d9ea754
JT
740 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
741 * and adjust PTE_NONCACHE_NUM if the number of supported huge page
742 * sizes changes.
743 */
744 set_huge_psize(MMU_PAGE_16M);
0d9ea754
JT
745 set_huge_psize(MMU_PAGE_16G);
746
00df438e
BH
747 /* Temporarily disable support for 64K huge pages when 64K SPU local
748 * store support is enabled as the current implementation conflicts.
749 */
750#ifndef CONFIG_SPU_FS_64K_LS
751 set_huge_psize(MMU_PAGE_64K);
752#endif
753
0d9ea754
JT
754 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
755 if (mmu_huge_psizes[psize]) {
756 huge_pgtable_cache(psize) = kmem_cache_create(
757 HUGEPTE_CACHE_NAME(psize),
758 HUGEPTE_TABLE_SIZE(psize),
759 HUGEPTE_TABLE_SIZE(psize),
760 0,
51cc5068 761 NULL);
0d9ea754
JT
762 if (!huge_pgtable_cache(psize))
763 panic("hugetlbpage_init(): could not create %s"\
764 "\n", HUGEPTE_CACHE_NAME(psize));
765 }
766 }
f10a04c0
DG
767
768 return 0;
769}
770
771module_init(hugetlbpage_init);
This page took 0.392086 seconds and 5 git commands to generate.