2 #include <linux/hugetlb.h>
3 #include <asm/pgtable.h>
4 #include <asm/pgalloc.h>
5 #include <asm/cacheflush.h>
6 #include <asm/machdep.h>
9 void radix__flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
11 unsigned long ap
, shift
;
12 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
14 shift
= huge_page_shift(hstate
);
15 if (shift
== mmu_psize_defs
[MMU_PAGE_2M
].shift
)
16 ap
= mmu_get_ap(MMU_PAGE_2M
);
17 else if (shift
== mmu_psize_defs
[MMU_PAGE_1G
].shift
)
18 ap
= mmu_get_ap(MMU_PAGE_1G
);
20 WARN(1, "Wrong huge page shift\n");
23 radix___flush_tlb_page(vma
->vm_mm
, vmaddr
, ap
, 0);
26 void radix__local_flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
28 unsigned long ap
, shift
;
29 struct hstate
*hstate
= hstate_file(vma
->vm_file
);
31 shift
= huge_page_shift(hstate
);
32 if (shift
== mmu_psize_defs
[MMU_PAGE_2M
].shift
)
33 ap
= mmu_get_ap(MMU_PAGE_2M
);
34 else if (shift
== mmu_psize_defs
[MMU_PAGE_1G
].shift
)
35 ap
= mmu_get_ap(MMU_PAGE_1G
);
37 WARN(1, "Wrong huge page shift\n");
40 radix___local_flush_tlb_page(vma
->vm_mm
, vmaddr
, ap
, 0);
44 * A vairant of hugetlb_get_unmapped_area doing topdown search
45 * FIXME!! should we do as x86 does or non hugetlb area does ?
46 * ie, use topdown or not based on mmap_is_legacy check ?
49 radix__hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
50 unsigned long len
, unsigned long pgoff
,
53 struct mm_struct
*mm
= current
->mm
;
54 struct vm_area_struct
*vma
;
55 struct hstate
*h
= hstate_file(file
);
56 struct vm_unmapped_area_info info
;
58 if (len
& ~huge_page_mask(h
))
63 if (flags
& MAP_FIXED
) {
64 if (prepare_hugepage_range(file
, addr
, len
))
70 addr
= ALIGN(addr
, huge_page_size(h
));
71 vma
= find_vma(mm
, addr
);
72 if (TASK_SIZE
- len
>= addr
&&
73 (!vma
|| addr
+ len
<= vma
->vm_start
))
77 * We are always doing an topdown search here. Slice code
80 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
82 info
.low_limit
= PAGE_SIZE
;
83 info
.high_limit
= current
->mm
->mmap_base
;
84 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
85 info
.align_offset
= 0;
86 return vm_unmapped_area(&info
);