hugetlbfs: handle pages higher order than MAX_ORDER
[deliverable/linux.git] / mm / internal.h
CommitLineData
1da177e4
LT
1/* internal.h: mm/ internal definitions
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
0f8053a5
NP
11#ifndef __MM_INTERNAL_H
12#define __MM_INTERNAL_H
13
14#include <linux/mm.h>
1da177e4 15
42b77728
JB
16void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 unsigned long floor, unsigned long ceiling);
18
01ad1c08
AK
19extern void prep_compound_page(struct page *page, unsigned long order);
20
7835e98b 21static inline void set_page_count(struct page *page, int v)
77a8a788 22{
7835e98b
NP
23 atomic_set(&page->_count, v);
24}
25
26/*
27 * Turn a non-refcounted page (->_count == 0) into refcounted with
28 * a count of one.
29 */
30static inline void set_page_refcounted(struct page *page)
31{
ae1276b9 32 VM_BUG_ON(PageTail(page));
725d704e 33 VM_BUG_ON(atomic_read(&page->_count));
77a8a788 34 set_page_count(page, 1);
77a8a788
NP
35}
36
0f8053a5
NP
37static inline void __put_page(struct page *page)
38{
39 atomic_dec(&page->_count);
40}
41
894bc310
LS
42/*
43 * in mm/vmscan.c:
44 */
62695a84 45extern int isolate_lru_page(struct page *page);
894bc310 46extern void putback_lru_page(struct page *page);
62695a84 47
894bc310
LS
48/*
49 * in mm/page_alloc.c
50 */
0c0a4a51 51extern void __free_pages_bootmem(struct page *page, unsigned int order);
0f8053a5 52
48f13bf3
MG
53/*
54 * function for dealing with page's order in buddy system.
55 * zone->lock is already acquired when we use these.
56 * So, we don't need atomic page->flags operations here.
57 */
58static inline unsigned long page_order(struct page *page)
59{
60 VM_BUG_ON(!PageBuddy(page));
61 return page_private(page);
62}
b5a0e011 63
ba470de4 64extern long mlock_vma_pages_range(struct vm_area_struct *vma,
b291f000 65 unsigned long start, unsigned long end);
ba470de4
RR
66extern void munlock_vma_pages_range(struct vm_area_struct *vma,
67 unsigned long start, unsigned long end);
68static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
69{
70 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
71}
b291f000 72
894bc310
LS
73#ifdef CONFIG_UNEVICTABLE_LRU
74/*
75 * unevictable_migrate_page() called only from migrate_page_copy() to
76 * migrate unevictable flag to new page.
77 * Note that the old page has been isolated from the LRU lists at this
78 * point so we don't need to worry about LRU statistics.
79 */
80static inline void unevictable_migrate_page(struct page *new, struct page *old)
81{
82 if (TestClearPageUnevictable(old))
83 SetPageUnevictable(new);
84}
85#else
86static inline void unevictable_migrate_page(struct page *new, struct page *old)
87{
88}
89#endif
90
b291f000
NP
91#ifdef CONFIG_UNEVICTABLE_LRU
92/*
93 * Called only in fault path via page_evictable() for a new page
94 * to determine if it's being mapped into a LOCKED vma.
95 * If so, mark page as mlocked.
96 */
97static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
98{
99 VM_BUG_ON(PageLRU(page));
100
101 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
102 return 0;
103
5344b7e6
NP
104 if (!TestSetPageMlocked(page)) {
105 inc_zone_page_state(page, NR_MLOCK);
106 count_vm_event(UNEVICTABLE_PGMLOCKED);
107 }
b291f000
NP
108 return 1;
109}
110
111/*
112 * must be called with vma's mmap_sem held for read, and page locked.
113 */
114extern void mlock_vma_page(struct page *page);
115
116/*
117 * Clear the page's PageMlocked(). This can be useful in a situation where
118 * we want to unconditionally remove a page from the pagecache -- e.g.,
119 * on truncation or freeing.
120 *
121 * It is legal to call this function for any page, mlocked or not.
122 * If called for a page that is still mapped by mlocked vmas, all we do
123 * is revert to lazy LRU behaviour -- semantics are not broken.
124 */
125extern void __clear_page_mlock(struct page *page);
126static inline void clear_page_mlock(struct page *page)
127{
128 if (unlikely(TestClearPageMlocked(page)))
129 __clear_page_mlock(page);
130}
131
132/*
133 * mlock_migrate_page - called only from migrate_page_copy() to
5344b7e6 134 * migrate the Mlocked page flag; update statistics.
b291f000
NP
135 */
136static inline void mlock_migrate_page(struct page *newpage, struct page *page)
137{
5344b7e6
NP
138 if (TestClearPageMlocked(page)) {
139 unsigned long flags;
140
141 local_irq_save(flags);
142 __dec_zone_page_state(page, NR_MLOCK);
b291f000 143 SetPageMlocked(newpage);
5344b7e6
NP
144 __inc_zone_page_state(newpage, NR_MLOCK);
145 local_irq_restore(flags);
146 }
b291f000
NP
147}
148
985737cf
LS
149/*
150 * free_page_mlock() -- clean up attempts to free and mlocked() page.
151 * Page should not be on lru, so no need to fix that up.
152 * free_pages_check() will verify...
153 */
154static inline void free_page_mlock(struct page *page)
155{
156 if (unlikely(TestClearPageMlocked(page))) {
157 unsigned long flags;
158
159 local_irq_save(flags);
160 __dec_zone_page_state(page, NR_MLOCK);
161 __count_vm_event(UNEVICTABLE_MLOCKFREED);
162 local_irq_restore(flags);
163 }
164}
b291f000
NP
165
166#else /* CONFIG_UNEVICTABLE_LRU */
167static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
168{
169 return 0;
170}
171static inline void clear_page_mlock(struct page *page) { }
172static inline void mlock_vma_page(struct page *page) { }
173static inline void mlock_migrate_page(struct page *new, struct page *old) { }
985737cf 174static inline void free_page_mlock(struct page *page) { }
b291f000
NP
175
176#endif /* CONFIG_UNEVICTABLE_LRU */
894bc310 177
69d177c2
AW
178/*
179 * Return the mem_map entry representing the 'offset' subpage within
180 * the maximally aligned gigantic page 'base'. Handle any discontiguity
181 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
182 */
183static inline struct page *mem_map_offset(struct page *base, int offset)
184{
185 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
186 return pfn_to_page(page_to_pfn(base) + offset);
187 return base + offset;
188}
189
190/*
191 * Iterator over all subpages withing the maximally aligned gigantic
192 * page 'base'. Handle any discontiguity in the mem_map.
193 */
194static inline struct page *mem_map_next(struct page *iter,
195 struct page *base, int offset)
196{
197 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
198 unsigned long pfn = page_to_pfn(base) + offset;
199 if (!pfn_valid(pfn))
200 return NULL;
201 return pfn_to_page(pfn);
202 }
203 return iter + 1;
204}
205
b5a0e011
AH
206/*
207 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
208 * so all functions starting at paging_init should be marked __init
209 * in those cases. SPARSEMEM, however, allows for memory hotplug,
210 * and alloc_bootmem_node is not used.
211 */
212#ifdef CONFIG_SPARSEMEM
213#define __paginginit __meminit
214#else
215#define __paginginit __init
216#endif
217
6b74ab97
MG
218/* Memory initialisation debug and verification */
219enum mminit_level {
220 MMINIT_WARNING,
221 MMINIT_VERIFY,
222 MMINIT_TRACE
223};
224
225#ifdef CONFIG_DEBUG_MEMORY_INIT
226
227extern int mminit_loglevel;
228
229#define mminit_dprintk(level, prefix, fmt, arg...) \
230do { \
231 if (level < mminit_loglevel) { \
232 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
233 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
234 } \
235} while (0)
236
708614e6
MG
237extern void mminit_verify_pageflags_layout(void);
238extern void mminit_verify_page_links(struct page *page,
239 enum zone_type zone, unsigned long nid, unsigned long pfn);
68ad8df4 240extern void mminit_verify_zonelist(void);
708614e6 241
6b74ab97
MG
242#else
243
244static inline void mminit_dprintk(enum mminit_level level,
245 const char *prefix, const char *fmt, ...)
246{
247}
248
708614e6
MG
249static inline void mminit_verify_pageflags_layout(void)
250{
251}
252
253static inline void mminit_verify_page_links(struct page *page,
254 enum zone_type zone, unsigned long nid, unsigned long pfn)
255{
256}
68ad8df4
MG
257
258static inline void mminit_verify_zonelist(void)
259{
260}
6b74ab97 261#endif /* CONFIG_DEBUG_MEMORY_INIT */
2dbb51c4
MG
262
263/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
264#if defined(CONFIG_SPARSEMEM)
265extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
266 unsigned long *end_pfn);
267#else
268static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
269 unsigned long *end_pfn)
270{
271}
272#endif /* CONFIG_SPARSEMEM */
273
b291f000
NP
274#define GUP_FLAGS_WRITE 0x1
275#define GUP_FLAGS_FORCE 0x2
276#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
277
278int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
279 unsigned long start, int len, int flags,
280 struct page **pages, struct vm_area_struct **vmas);
281
0f8053a5 282#endif
This page took 0.349167 seconds and 5 git commands to generate.