[NETFILTER]: Replace sk_buff ** with sk_buff *
[deliverable/linux.git] / include / linux / hugetlb.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4e950f6f
AD
4#include <linux/fs.h>
5
1da177e4
LT
6#ifdef CONFIG_HUGETLB_PAGE
7
8#include <linux/mempolicy.h>
516dffdc 9#include <linux/shm.h>
63551ae0 10#include <asm/tlbflush.h>
1da177e4
LT
11
12struct ctl_table;
13
14static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
15{
16 return vma->vm_flags & VM_HUGETLB;
17}
18
19int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
396faf03 20int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
1da177e4
LT
21int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
22int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
1da177e4 23void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
502717f4 24void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
1da177e4
LT
25int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
26int hugetlb_report_meminfo(char *);
27int hugetlb_report_node_meminfo(int, char *);
1da177e4 28unsigned long hugetlb_total_pages(void);
ac9b9c66
HD
29int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
30 unsigned long address, int write_access);
a43a8c39
CK
31int hugetlb_reserve_pages(struct inode *inode, long from, long to);
32void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
1da177e4
LT
33
34extern unsigned long max_huge_pages;
396faf03 35extern unsigned long hugepages_treat_as_movable;
1da177e4
LT
36extern const unsigned long hugetlb_zero, hugetlb_infinity;
37extern int sysctl_hugetlb_shm_group;
38
63551ae0
DG
39/* arch callbacks */
40
41pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
42pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
39dde65c 43int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
63551ae0
DG
44struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
45 int write);
46struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
47 pmd_t *pmd, int write);
63551ae0 48int pmd_huge(pmd_t pmd);
8f860591
ZY
49void hugetlb_change_protection(struct vm_area_struct *vma,
50 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 51
1da177e4
LT
52#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
53#define is_hugepage_only_range(mm, addr, len) 0
9da61aef
DG
54#endif
55
56#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
57#define hugetlb_free_pgd_range free_pgd_range
3915bcf3
DG
58#else
59void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
60 unsigned long end, unsigned long floor,
61 unsigned long ceiling);
1da177e4
LT
62#endif
63
64#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
42b88bef
DG
65/*
66 * If the arch doesn't supply something else, assume that hugepage
67 * size aligned regions are ok without further preparation.
68 */
dec4ad86 69static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
42b88bef
DG
70{
71 if (len & ~HPAGE_MASK)
72 return -EINVAL;
73 if (addr & ~HPAGE_MASK)
74 return -EINVAL;
75 return 0;
76}
1da177e4 77#else
dec4ad86 78int prepare_hugepage_range(unsigned long addr, unsigned long len);
1da177e4
LT
79#endif
80
63551ae0
DG
81#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
82#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
83#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
84#else
85void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
86 pte_t *ptep, pte_t pte);
87pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
88 pte_t *ptep);
89#endif
90
91#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
92#define hugetlb_prefault_arch_hook(mm) do { } while (0)
93#else
94void hugetlb_prefault_arch_hook(struct mm_struct *mm);
95#endif
96
1da177e4
LT
97#else /* !CONFIG_HUGETLB_PAGE */
98
99static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
100{
101 return 0;
102}
103static inline unsigned long hugetlb_total_pages(void)
104{
105 return 0;
106}
107
108#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
109#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
110#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
111#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
1da177e4 112#define unmap_hugepage_range(vma, start, end) BUG()
1da177e4
LT
113#define hugetlb_report_meminfo(buf) 0
114#define hugetlb_report_node_meminfo(n, buf) 0
115#define follow_huge_pmd(mm, addr, pmd, write) NULL
dec4ad86 116#define prepare_hugepage_range(addr,len) (-EINVAL)
1da177e4
LT
117#define pmd_huge(x) 0
118#define is_hugepage_only_range(mm, addr, len) 0
9da61aef 119#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
ac9b9c66 120#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
1da177e4 121
8f860591
ZY
122#define hugetlb_change_protection(vma, address, end, newprot)
123
1da177e4 124#ifndef HPAGE_MASK
51c6f666
RH
125#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
126#define HPAGE_SIZE PAGE_SIZE
1da177e4
LT
127#endif
128
129#endif /* !CONFIG_HUGETLB_PAGE */
130
131#ifdef CONFIG_HUGETLBFS
132struct hugetlbfs_config {
133 uid_t uid;
134 gid_t gid;
135 umode_t mode;
136 long nr_blocks;
137 long nr_inodes;
138};
139
140struct hugetlbfs_sb_info {
141 long max_blocks; /* blocks allowed */
142 long free_blocks; /* blocks free */
143 long max_inodes; /* inodes allowed */
144 long free_inodes; /* inodes free */
145 spinlock_t stat_lock;
146};
147
148
149struct hugetlbfs_inode_info {
150 struct shared_policy policy;
151 struct inode vfs_inode;
152};
153
154static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
155{
156 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
157}
158
159static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
160{
161 return sb->s_fs_info;
162}
163
4b6f5d20 164extern const struct file_operations hugetlbfs_file_operations;
1da177e4 165extern struct vm_operations_struct hugetlb_vm_ops;
9d66586f 166struct file *hugetlb_file_setup(const char *name, size_t);
1da177e4
LT
167int hugetlb_get_quota(struct address_space *mapping);
168void hugetlb_put_quota(struct address_space *mapping);
169
170static inline int is_file_hugepages(struct file *file)
171{
516dffdc
AL
172 if (file->f_op == &hugetlbfs_file_operations)
173 return 1;
174 if (is_file_shm_hugepages(file))
175 return 1;
176
177 return 0;
1da177e4
LT
178}
179
180static inline void set_file_hugepages(struct file *file)
181{
182 file->f_op = &hugetlbfs_file_operations;
183}
184#else /* !CONFIG_HUGETLBFS */
185
186#define is_file_hugepages(file) 0
187#define set_file_hugepages(file) BUG()
9d66586f 188#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
1da177e4
LT
189
190#endif /* !CONFIG_HUGETLBFS */
191
d2ba27e8
AB
192#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
193unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
194 unsigned long len, unsigned long pgoff,
195 unsigned long flags);
196#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
197
1da177e4 198#endif /* _LINUX_HUGETLB_H */
This page took 0.436229 seconds and 5 git commands to generate.