Merge tag 'nfsd-4.5' of git://linux-nfs.org/~bfields/linux
[deliverable/linux.git] / include / linux / hugetlb.h
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
3
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
6 #include <linux/fs.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
11
12 struct ctl_table;
13 struct user_struct;
14 struct mmu_gather;
15
16 #ifdef CONFIG_HUGETLB_PAGE
17
18 #include <linux/mempolicy.h>
19 #include <linux/shm.h>
20 #include <asm/tlbflush.h>
21
22 struct hugepage_subpool {
23 spinlock_t lock;
24 long count;
25 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
26 long used_hpages; /* Used count against maximum, includes */
27 /* both alloced and reserved pages. */
28 struct hstate *hstate;
29 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
30 long rsv_hpages; /* Pages reserved against global pool to */
31 /* sasitfy minimum size. */
32 };
33
34 struct resv_map {
35 struct kref refs;
36 spinlock_t lock;
37 struct list_head regions;
38 long adds_in_progress;
39 struct list_head region_cache;
40 long region_cache_count;
41 };
42 extern struct resv_map *resv_map_alloc(void);
43 void resv_map_release(struct kref *ref);
44
45 extern spinlock_t hugetlb_lock;
46 extern int hugetlb_max_hstate __read_mostly;
47 #define for_each_hstate(h) \
48 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
49
50 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
51 long min_hpages);
52 void hugepage_put_subpool(struct hugepage_subpool *spool);
53
54 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
55 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
56 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
58
59 #ifdef CONFIG_NUMA
60 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
61 void __user *, size_t *, loff_t *);
62 #endif
63
64 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
65 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
66 struct page **, struct vm_area_struct **,
67 unsigned long *, unsigned long *, long, unsigned int);
68 void unmap_hugepage_range(struct vm_area_struct *,
69 unsigned long, unsigned long, struct page *);
70 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
71 struct vm_area_struct *vma,
72 unsigned long start, unsigned long end,
73 struct page *ref_page);
74 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
75 unsigned long start, unsigned long end,
76 struct page *ref_page);
77 void hugetlb_report_meminfo(struct seq_file *);
78 int hugetlb_report_node_meminfo(int, char *);
79 void hugetlb_show_meminfo(void);
80 unsigned long hugetlb_total_pages(void);
81 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82 unsigned long address, unsigned int flags);
83 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
84 struct vm_area_struct *vma,
85 vm_flags_t vm_flags);
86 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
87 long freed);
88 int dequeue_hwpoisoned_huge_page(struct page *page);
89 bool isolate_huge_page(struct page *page, struct list_head *list);
90 void putback_active_hugepage(struct page *page);
91 void free_huge_page(struct page *page);
92 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
93 extern struct mutex *hugetlb_fault_mutex_table;
94 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
95 struct vm_area_struct *vma,
96 struct address_space *mapping,
97 pgoff_t idx, unsigned long address);
98
99 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
100
101 extern int hugepages_treat_as_movable;
102 extern int sysctl_hugetlb_shm_group;
103 extern struct list_head huge_boot_pages;
104
105 /* arch callbacks */
106
107 pte_t *huge_pte_alloc(struct mm_struct *mm,
108 unsigned long addr, unsigned long sz);
109 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
110 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
111 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
112 int write);
113 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
114 pmd_t *pmd, int flags);
115 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
116 pud_t *pud, int flags);
117 int pmd_huge(pmd_t pmd);
118 int pud_huge(pud_t pmd);
119 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
120 unsigned long address, unsigned long end, pgprot_t newprot);
121
122 #else /* !CONFIG_HUGETLB_PAGE */
123
124 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
125 {
126 }
127
128 static inline unsigned long hugetlb_total_pages(void)
129 {
130 return 0;
131 }
132
133 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
134 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
135 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
136 static inline void hugetlb_report_meminfo(struct seq_file *m)
137 {
138 }
139 #define hugetlb_report_node_meminfo(n, buf) 0
140 static inline void hugetlb_show_meminfo(void)
141 {
142 }
143 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
144 #define follow_huge_pud(mm, addr, pud, flags) NULL
145 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
146 #define pmd_huge(x) 0
147 #define pud_huge(x) 0
148 #define is_hugepage_only_range(mm, addr, len) 0
149 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
150 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
151 #define huge_pte_offset(mm, address) 0
152 static inline int dequeue_hwpoisoned_huge_page(struct page *page)
153 {
154 return 0;
155 }
156
157 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
158 {
159 return false;
160 }
161 #define putback_active_hugepage(p) do {} while (0)
162
163 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
164 unsigned long address, unsigned long end, pgprot_t newprot)
165 {
166 return 0;
167 }
168
169 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
170 struct vm_area_struct *vma, unsigned long start,
171 unsigned long end, struct page *ref_page)
172 {
173 BUG();
174 }
175
176 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
177 struct vm_area_struct *vma, unsigned long start,
178 unsigned long end, struct page *ref_page)
179 {
180 BUG();
181 }
182
183 #endif /* !CONFIG_HUGETLB_PAGE */
184 /*
185 * hugepages at page global directory. If arch support
186 * hugepages at pgd level, they need to define this.
187 */
188 #ifndef pgd_huge
189 #define pgd_huge(x) 0
190 #endif
191
192 #ifndef pgd_write
193 static inline int pgd_write(pgd_t pgd)
194 {
195 BUG();
196 return 0;
197 }
198 #endif
199
200 #ifndef pud_write
201 static inline int pud_write(pud_t pud)
202 {
203 BUG();
204 return 0;
205 }
206 #endif
207
208 #ifndef is_hugepd
209 /*
210 * Some architectures requires a hugepage directory format that is
211 * required to support multiple hugepage sizes. For example
212 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
213 * introduced the same on powerpc. This allows for a more flexible hugepage
214 * pagetable layout.
215 */
216 typedef struct { unsigned long pd; } hugepd_t;
217 #define is_hugepd(hugepd) (0)
218 #define __hugepd(x) ((hugepd_t) { (x) })
219 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
220 unsigned pdshift, unsigned long end,
221 int write, struct page **pages, int *nr)
222 {
223 return 0;
224 }
225 #else
226 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
227 unsigned pdshift, unsigned long end,
228 int write, struct page **pages, int *nr);
229 #endif
230
231 #define HUGETLB_ANON_FILE "anon_hugepage"
232
233 enum {
234 /*
235 * The file will be used as an shm file so shmfs accounting rules
236 * apply
237 */
238 HUGETLB_SHMFS_INODE = 1,
239 /*
240 * The file is being created on the internal vfs mount and shmfs
241 * accounting rules do not apply
242 */
243 HUGETLB_ANONHUGE_INODE = 2,
244 };
245
246 #ifdef CONFIG_HUGETLBFS
247 struct hugetlbfs_sb_info {
248 long max_inodes; /* inodes allowed */
249 long free_inodes; /* inodes free */
250 spinlock_t stat_lock;
251 struct hstate *hstate;
252 struct hugepage_subpool *spool;
253 };
254
255 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
256 {
257 return sb->s_fs_info;
258 }
259
260 extern const struct file_operations hugetlbfs_file_operations;
261 extern const struct vm_operations_struct hugetlb_vm_ops;
262 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
263 struct user_struct **user, int creat_flags,
264 int page_size_log);
265
266 static inline bool is_file_hugepages(struct file *file)
267 {
268 if (file->f_op == &hugetlbfs_file_operations)
269 return true;
270
271 return is_file_shm_hugepages(file);
272 }
273
274
275 #else /* !CONFIG_HUGETLBFS */
276
277 #define is_file_hugepages(file) false
278 static inline struct file *
279 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
280 struct user_struct **user, int creat_flags,
281 int page_size_log)
282 {
283 return ERR_PTR(-ENOSYS);
284 }
285
286 #endif /* !CONFIG_HUGETLBFS */
287
288 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
289 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
290 unsigned long len, unsigned long pgoff,
291 unsigned long flags);
292 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
293
294 #ifdef CONFIG_HUGETLB_PAGE
295
296 #define HSTATE_NAME_LEN 32
297 /* Defines one hugetlb page size */
298 struct hstate {
299 int next_nid_to_alloc;
300 int next_nid_to_free;
301 unsigned int order;
302 unsigned long mask;
303 unsigned long max_huge_pages;
304 unsigned long nr_huge_pages;
305 unsigned long free_huge_pages;
306 unsigned long resv_huge_pages;
307 unsigned long surplus_huge_pages;
308 unsigned long nr_overcommit_huge_pages;
309 struct list_head hugepage_activelist;
310 struct list_head hugepage_freelists[MAX_NUMNODES];
311 unsigned int nr_huge_pages_node[MAX_NUMNODES];
312 unsigned int free_huge_pages_node[MAX_NUMNODES];
313 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
314 #ifdef CONFIG_CGROUP_HUGETLB
315 /* cgroup control files */
316 struct cftype cgroup_files[5];
317 #endif
318 char name[HSTATE_NAME_LEN];
319 };
320
321 struct huge_bootmem_page {
322 struct list_head list;
323 struct hstate *hstate;
324 #ifdef CONFIG_HIGHMEM
325 phys_addr_t phys;
326 #endif
327 };
328
329 struct page *alloc_huge_page(struct vm_area_struct *vma,
330 unsigned long addr, int avoid_reserve);
331 struct page *alloc_huge_page_node(struct hstate *h, int nid);
332 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
333 unsigned long addr, int avoid_reserve);
334 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
335 pgoff_t idx);
336
337 /* arch callback */
338 int __init alloc_bootmem_huge_page(struct hstate *h);
339
340 void __init hugetlb_add_hstate(unsigned order);
341 struct hstate *size_to_hstate(unsigned long size);
342
343 #ifndef HUGE_MAX_HSTATE
344 #define HUGE_MAX_HSTATE 1
345 #endif
346
347 extern struct hstate hstates[HUGE_MAX_HSTATE];
348 extern unsigned int default_hstate_idx;
349
350 #define default_hstate (hstates[default_hstate_idx])
351
352 static inline struct hstate *hstate_inode(struct inode *i)
353 {
354 struct hugetlbfs_sb_info *hsb;
355 hsb = HUGETLBFS_SB(i->i_sb);
356 return hsb->hstate;
357 }
358
359 static inline struct hstate *hstate_file(struct file *f)
360 {
361 return hstate_inode(file_inode(f));
362 }
363
364 static inline struct hstate *hstate_sizelog(int page_size_log)
365 {
366 if (!page_size_log)
367 return &default_hstate;
368
369 return size_to_hstate(1UL << page_size_log);
370 }
371
372 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
373 {
374 return hstate_file(vma->vm_file);
375 }
376
377 static inline unsigned long huge_page_size(struct hstate *h)
378 {
379 return (unsigned long)PAGE_SIZE << h->order;
380 }
381
382 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
383
384 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
385
386 static inline unsigned long huge_page_mask(struct hstate *h)
387 {
388 return h->mask;
389 }
390
391 static inline unsigned int huge_page_order(struct hstate *h)
392 {
393 return h->order;
394 }
395
396 static inline unsigned huge_page_shift(struct hstate *h)
397 {
398 return h->order + PAGE_SHIFT;
399 }
400
401 static inline bool hstate_is_gigantic(struct hstate *h)
402 {
403 return huge_page_order(h) >= MAX_ORDER;
404 }
405
406 static inline unsigned int pages_per_huge_page(struct hstate *h)
407 {
408 return 1 << h->order;
409 }
410
411 static inline unsigned int blocks_per_huge_page(struct hstate *h)
412 {
413 return huge_page_size(h) / 512;
414 }
415
416 #include <asm/hugetlb.h>
417
418 #ifndef arch_make_huge_pte
419 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
420 struct page *page, int writable)
421 {
422 return entry;
423 }
424 #endif
425
426 static inline struct hstate *page_hstate(struct page *page)
427 {
428 VM_BUG_ON_PAGE(!PageHuge(page), page);
429 return size_to_hstate(PAGE_SIZE << compound_order(page));
430 }
431
432 static inline unsigned hstate_index_to_shift(unsigned index)
433 {
434 return hstates[index].order + PAGE_SHIFT;
435 }
436
437 static inline int hstate_index(struct hstate *h)
438 {
439 return h - hstates;
440 }
441
442 pgoff_t __basepage_index(struct page *page);
443
444 /* Return page->index in PAGE_SIZE units */
445 static inline pgoff_t basepage_index(struct page *page)
446 {
447 if (!PageCompound(page))
448 return page->index;
449
450 return __basepage_index(page);
451 }
452
453 extern void dissolve_free_huge_pages(unsigned long start_pfn,
454 unsigned long end_pfn);
455 static inline int hugepage_migration_supported(struct hstate *h)
456 {
457 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
458 return huge_page_shift(h) == PMD_SHIFT;
459 #else
460 return 0;
461 #endif
462 }
463
464 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
465 struct mm_struct *mm, pte_t *pte)
466 {
467 if (huge_page_size(h) == PMD_SIZE)
468 return pmd_lockptr(mm, (pmd_t *) pte);
469 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
470 return &mm->page_table_lock;
471 }
472
473 #ifndef hugepages_supported
474 /*
475 * Some platform decide whether they support huge pages at boot
476 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
477 * when there is no such support
478 */
479 #define hugepages_supported() (HPAGE_SHIFT != 0)
480 #endif
481
482 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
483
484 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
485 {
486 atomic_long_add(l, &mm->hugetlb_usage);
487 }
488
489 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
490 {
491 atomic_long_sub(l, &mm->hugetlb_usage);
492 }
493 #else /* CONFIG_HUGETLB_PAGE */
494 struct hstate {};
495 #define alloc_huge_page(v, a, r) NULL
496 #define alloc_huge_page_node(h, nid) NULL
497 #define alloc_huge_page_noerr(v, a, r) NULL
498 #define alloc_bootmem_huge_page(h) NULL
499 #define hstate_file(f) NULL
500 #define hstate_sizelog(s) NULL
501 #define hstate_vma(v) NULL
502 #define hstate_inode(i) NULL
503 #define page_hstate(page) NULL
504 #define huge_page_size(h) PAGE_SIZE
505 #define huge_page_mask(h) PAGE_MASK
506 #define vma_kernel_pagesize(v) PAGE_SIZE
507 #define vma_mmu_pagesize(v) PAGE_SIZE
508 #define huge_page_order(h) 0
509 #define huge_page_shift(h) PAGE_SHIFT
510 static inline unsigned int pages_per_huge_page(struct hstate *h)
511 {
512 return 1;
513 }
514 #define hstate_index_to_shift(index) 0
515 #define hstate_index(h) 0
516
517 static inline pgoff_t basepage_index(struct page *page)
518 {
519 return page->index;
520 }
521 #define dissolve_free_huge_pages(s, e) do {} while (0)
522 #define hugepage_migration_supported(h) 0
523
524 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
525 struct mm_struct *mm, pte_t *pte)
526 {
527 return &mm->page_table_lock;
528 }
529
530 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
531 {
532 }
533
534 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
535 {
536 }
537 #endif /* CONFIG_HUGETLB_PAGE */
538
539 static inline spinlock_t *huge_pte_lock(struct hstate *h,
540 struct mm_struct *mm, pte_t *pte)
541 {
542 spinlock_t *ptl;
543
544 ptl = huge_pte_lockptr(h, mm, pte);
545 spin_lock(ptl);
546 return ptl;
547 }
548
549 #endif /* _LINUX_HUGETLB_H */
This page took 0.044878 seconds and 6 git commands to generate.