mm: kill vma flag VM_CAN_NONLINEAR
[deliverable/linux.git] / include / linux / mm.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
1da177e4
LT
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
1da177e4 8#include <linux/gfp.h>
187f1882 9#include <linux/bug.h>
1da177e4
LT
10#include <linux/list.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/prio_tree.h>
83aeeada 14#include <linux/atomic.h>
9a11b49a 15#include <linux/debug_locks.h>
5b99cd0e 16#include <linux/mm_types.h>
08677214 17#include <linux/range.h>
c6f6b596 18#include <linux/pfn.h>
e9da73d6 19#include <linux/bit_spinlock.h>
b0d40c92 20#include <linux/shrinker.h>
1da177e4
LT
21
22struct mempolicy;
23struct anon_vma;
4e950f6f 24struct file_ra_state;
e8edc6e0 25struct user_struct;
4e950f6f 26struct writeback_control;
1da177e4
LT
27
28#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
29extern unsigned long max_mapnr;
30#endif
31
32extern unsigned long num_physpages;
4481374c 33extern unsigned long totalram_pages;
1da177e4 34extern void * high_memory;
1da177e4
LT
35extern int page_cluster;
36
37#ifdef CONFIG_SYSCTL
38extern int sysctl_legacy_va_layout;
39#else
40#define sysctl_legacy_va_layout 0
41#endif
42
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/processor.h>
1da177e4 46
1da177e4
LT
47#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
48
27ac792c
AR
49/* to align the pointer to the (next) page boundary */
50#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
51
1da177e4
LT
52/*
53 * Linux kernel virtual memory manager primitives.
54 * The idea being to have a "virtual" mm in the same way
55 * we have a virtual fs - giving a cleaner interface to the
56 * mm details, and allowing different kinds of memory mappings
57 * (from shared memory to executable loading to arbitrary
58 * mmap() functions).
59 */
60
c43692e8
CL
61extern struct kmem_cache *vm_area_cachep;
62
1da177e4 63#ifndef CONFIG_MMU
8feae131
DH
64extern struct rb_root nommu_region_tree;
65extern struct rw_semaphore nommu_region_sem;
1da177e4
LT
66
67extern unsigned int kobjsize(const void *objp);
68#endif
69
70/*
605d9288 71 * vm_flags in vm_area_struct, see mm_types.h.
1da177e4 72 */
cc2383ec
KK
73#define VM_NONE 0x00000000
74
1da177e4
LT
75#define VM_READ 0x00000001 /* currently active flags */
76#define VM_WRITE 0x00000002
77#define VM_EXEC 0x00000004
78#define VM_SHARED 0x00000008
79
7e2cff42 80/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
1da177e4
LT
81#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
82#define VM_MAYWRITE 0x00000020
83#define VM_MAYEXEC 0x00000040
84#define VM_MAYSHARE 0x00000080
85
86#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
6aab341e 87#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
1da177e4
LT
88#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
89
90#define VM_EXECUTABLE 0x00001000
91#define VM_LOCKED 0x00002000
92#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
93
94 /* Used by sys_madvise() */
95#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
96#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
97
98#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
99#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
0b14c179 100#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */
1da177e4 101#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
cdfd4325 102#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
1da177e4
LT
103#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
104#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
cc2383ec 105#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
accb61fe 106#define VM_NODUMP 0x04000000 /* Do not include in the core dump */
d00806b1 107
b379d790 108#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
cc2383ec
KK
109#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
110#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
f8af4da3 111#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
1da177e4 112
cc2383ec
KK
113#if defined(CONFIG_X86)
114# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
115#elif defined(CONFIG_PPC)
116# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
117#elif defined(CONFIG_PARISC)
118# define VM_GROWSUP VM_ARCH_1
119#elif defined(CONFIG_IA64)
120# define VM_GROWSUP VM_ARCH_1
121#elif !defined(CONFIG_MMU)
122# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
123#endif
124
125#ifndef VM_GROWSUP
126# define VM_GROWSUP VM_NONE
127#endif
128
a8bef8ff
MG
129/* Bits set in the VMA until the stack is in its final location */
130#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
131
1da177e4
LT
132#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
133#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
134#endif
135
136#ifdef CONFIG_STACK_GROWSUP
137#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
138#else
139#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
140#endif
141
142#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
143#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
144#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
145#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
146#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
147
b291f000 148/*
78f11a25
AA
149 * Special vmas that are non-mergable, non-mlock()able.
150 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
b291f000
NP
151 */
152#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
153
1da177e4
LT
154/*
155 * mapping from the currently active vm_flags protection bits (the
156 * low four bits) to a page protection mask..
157 */
158extern pgprot_t protection_map[16];
159
d0217ac0
NP
160#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
161#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
c2ec175c 162#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
d065bd81 163#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
318b275f 164#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
37b23e05 165#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
d0217ac0 166
54cb8821 167/*
d0217ac0 168 * vm_fault is filled by the the pagefault handler and passed to the vma's
83c54070
NP
169 * ->fault function. The vma's ->fault is responsible for returning a bitmask
170 * of VM_FAULT_xxx flags that give details about how the fault was handled.
54cb8821 171 *
d0217ac0 172 * pgoff should be used in favour of virtual_address, if possible. If pgoff
0b173bc4 173 * is used, one may implement ->remap_pages to get nonlinear mapping support.
54cb8821 174 */
d0217ac0
NP
175struct vm_fault {
176 unsigned int flags; /* FAULT_FLAG_xxx flags */
177 pgoff_t pgoff; /* Logical page offset based on vma */
178 void __user *virtual_address; /* Faulting virtual address */
179
180 struct page *page; /* ->fault handlers should return a
83c54070 181 * page here, unless VM_FAULT_NOPAGE
d0217ac0 182 * is set (which is also implied by
83c54070 183 * VM_FAULT_ERROR).
d0217ac0 184 */
54cb8821 185};
1da177e4
LT
186
187/*
188 * These are the virtual MM functions - opening of an area, closing and
189 * unmapping it (needed to keep files on disk up-to-date etc), pointer
190 * to the functions called when a no-page or a wp-page exception occurs.
191 */
192struct vm_operations_struct {
193 void (*open)(struct vm_area_struct * area);
194 void (*close)(struct vm_area_struct * area);
d0217ac0 195 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
9637a5ef
DH
196
197 /* notification that a previously read-only page is about to become
198 * writable, if an error is returned it will cause a SIGBUS */
c2ec175c 199 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
28b2ee20
RR
200
201 /* called by access_process_vm when get_user_pages() fails, typically
202 * for use by special VMAs that can switch between memory and hardware
203 */
204 int (*access)(struct vm_area_struct *vma, unsigned long addr,
205 void *buf, int len, int write);
1da177e4 206#ifdef CONFIG_NUMA
a6020ed7
LS
207 /*
208 * set_policy() op must add a reference to any non-NULL @new mempolicy
209 * to hold the policy upon return. Caller should pass NULL @new to
210 * remove a policy and fall back to surrounding context--i.e. do not
211 * install a MPOL_DEFAULT policy, nor the task or system default
212 * mempolicy.
213 */
1da177e4 214 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
a6020ed7
LS
215
216 /*
217 * get_policy() op must add reference [mpol_get()] to any policy at
218 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
219 * in mm/mempolicy.c will do this automatically.
220 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
221 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
222 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
223 * must return NULL--i.e., do not "fallback" to task or system default
224 * policy.
225 */
1da177e4
LT
226 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
227 unsigned long addr);
7b2259b3
CL
228 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
229 const nodemask_t *to, unsigned long flags);
1da177e4 230#endif
0b173bc4
KK
231 /* called by sys_remap_file_pages() to populate non-linear mapping */
232 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
233 unsigned long size, pgoff_t pgoff);
1da177e4
LT
234};
235
236struct mmu_gather;
237struct inode;
238
349aef0b
AM
239#define page_private(page) ((page)->private)
240#define set_page_private(page, v) ((page)->private = (v))
4c21e2f2 241
1da177e4
LT
242/*
243 * FIXME: take this include out, include page-flags.h in
244 * files which need it (119 of them)
245 */
246#include <linux/page-flags.h>
71e3aac0 247#include <linux/huge_mm.h>
1da177e4
LT
248
249/*
250 * Methods to modify the page usage count.
251 *
252 * What counts for a page usage:
253 * - cache mapping (page->mapping)
254 * - private data (page->private)
255 * - page mapped in a task's page tables, each mapping
256 * is counted separately
257 *
258 * Also, many kernel routines increase the page count before a critical
259 * routine so they can be sure the page doesn't go away from under them.
1da177e4
LT
260 */
261
262/*
da6052f7 263 * Drop a ref, return true if the refcount fell to zero (the page has no users)
1da177e4 264 */
7c8ee9a8
NP
265static inline int put_page_testzero(struct page *page)
266{
725d704e 267 VM_BUG_ON(atomic_read(&page->_count) == 0);
8dc04efb 268 return atomic_dec_and_test(&page->_count);
7c8ee9a8 269}
1da177e4
LT
270
271/*
7c8ee9a8
NP
272 * Try to grab a ref unless the page has a refcount of zero, return false if
273 * that is the case.
1da177e4 274 */
7c8ee9a8
NP
275static inline int get_page_unless_zero(struct page *page)
276{
8dc04efb 277 return atomic_inc_not_zero(&page->_count);
7c8ee9a8 278}
1da177e4 279
53df8fdc
WF
280extern int page_is_ram(unsigned long pfn);
281
48667e7a 282/* Support for virtually mapped pages */
b3bdda02
CL
283struct page *vmalloc_to_page(const void *addr);
284unsigned long vmalloc_to_pfn(const void *addr);
48667e7a 285
0738c4bb
PM
286/*
287 * Determine if an address is within the vmalloc range
288 *
289 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
290 * is no special casing required.
291 */
9e2779fa
CL
292static inline int is_vmalloc_addr(const void *x)
293{
0738c4bb 294#ifdef CONFIG_MMU
9e2779fa
CL
295 unsigned long addr = (unsigned long)x;
296
297 return addr >= VMALLOC_START && addr < VMALLOC_END;
0738c4bb
PM
298#else
299 return 0;
8ca3ed87 300#endif
0738c4bb 301}
81ac3ad9
KH
302#ifdef CONFIG_MMU
303extern int is_vmalloc_or_module_addr(const void *x);
304#else
934831d0 305static inline int is_vmalloc_or_module_addr(const void *x)
81ac3ad9
KH
306{
307 return 0;
308}
309#endif
9e2779fa 310
e9da73d6
AA
311static inline void compound_lock(struct page *page)
312{
313#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5bf5f03c 314 VM_BUG_ON(PageSlab(page));
e9da73d6
AA
315 bit_spin_lock(PG_compound_lock, &page->flags);
316#endif
317}
318
319static inline void compound_unlock(struct page *page)
320{
321#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5bf5f03c 322 VM_BUG_ON(PageSlab(page));
e9da73d6
AA
323 bit_spin_unlock(PG_compound_lock, &page->flags);
324#endif
325}
326
327static inline unsigned long compound_lock_irqsave(struct page *page)
328{
329 unsigned long uninitialized_var(flags);
330#ifdef CONFIG_TRANSPARENT_HUGEPAGE
331 local_irq_save(flags);
332 compound_lock(page);
333#endif
334 return flags;
335}
336
337static inline void compound_unlock_irqrestore(struct page *page,
338 unsigned long flags)
339{
340#ifdef CONFIG_TRANSPARENT_HUGEPAGE
341 compound_unlock(page);
342 local_irq_restore(flags);
343#endif
344}
345
d85f3385
CL
346static inline struct page *compound_head(struct page *page)
347{
6d777953 348 if (unlikely(PageTail(page)))
d85f3385
CL
349 return page->first_page;
350 return page;
351}
352
70b50f94
AA
353/*
354 * The atomic page->_mapcount, starts from -1: so that transitions
355 * both from it and to it can be tracked, using atomic_inc_and_test
356 * and atomic_add_negative(-1).
357 */
358static inline void reset_page_mapcount(struct page *page)
359{
360 atomic_set(&(page)->_mapcount, -1);
361}
362
363static inline int page_mapcount(struct page *page)
364{
365 return atomic_read(&(page)->_mapcount) + 1;
366}
367
4c21e2f2 368static inline int page_count(struct page *page)
1da177e4 369{
d85f3385 370 return atomic_read(&compound_head(page)->_count);
1da177e4
LT
371}
372
b35a35b5
AA
373static inline void get_huge_page_tail(struct page *page)
374{
375 /*
376 * __split_huge_page_refcount() cannot run
377 * from under us.
378 */
379 VM_BUG_ON(page_mapcount(page) < 0);
380 VM_BUG_ON(atomic_read(&page->_count) != 0);
381 atomic_inc(&page->_mapcount);
382}
383
70b50f94
AA
384extern bool __get_page_tail(struct page *page);
385
1da177e4
LT
386static inline void get_page(struct page *page)
387{
70b50f94
AA
388 if (unlikely(PageTail(page)))
389 if (likely(__get_page_tail(page)))
390 return;
91807063
AA
391 /*
392 * Getting a normal page or the head of a compound page
70b50f94 393 * requires to already have an elevated page->_count.
91807063 394 */
70b50f94 395 VM_BUG_ON(atomic_read(&page->_count) <= 0);
1da177e4
LT
396 atomic_inc(&page->_count);
397}
398
b49af68f
CL
399static inline struct page *virt_to_head_page(const void *x)
400{
401 struct page *page = virt_to_page(x);
402 return compound_head(page);
403}
404
7835e98b
NP
405/*
406 * Setup the page count before being freed into the page allocator for
407 * the first time (boot or memory hotplug)
408 */
409static inline void init_page_count(struct page *page)
410{
411 atomic_set(&page->_count, 1);
412}
413
5f24ce5f
AA
414/*
415 * PageBuddy() indicate that the page is free and in the buddy system
416 * (see mm/page_alloc.c).
ef2b4b95
AA
417 *
418 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
419 * -2 so that an underflow of the page_mapcount() won't be mistaken
420 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
421 * efficiently by most CPU architectures.
5f24ce5f 422 */
ef2b4b95
AA
423#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
424
5f24ce5f
AA
425static inline int PageBuddy(struct page *page)
426{
ef2b4b95 427 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
5f24ce5f
AA
428}
429
430static inline void __SetPageBuddy(struct page *page)
431{
432 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
ef2b4b95 433 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
5f24ce5f
AA
434}
435
436static inline void __ClearPageBuddy(struct page *page)
437{
438 VM_BUG_ON(!PageBuddy(page));
439 atomic_set(&page->_mapcount, -1);
440}
441
1da177e4 442void put_page(struct page *page);
1d7ea732 443void put_pages_list(struct list_head *pages);
1da177e4 444
8dfcc9ba 445void split_page(struct page *page, unsigned int order);
748446bb 446int split_free_page(struct page *page);
8dfcc9ba 447
33f2ef89
AW
448/*
449 * Compound pages have a destructor function. Provide a
450 * prototype for that function and accessor functions.
451 * These are _only_ valid on the head of a PG_compound page.
452 */
453typedef void compound_page_dtor(struct page *);
454
455static inline void set_compound_page_dtor(struct page *page,
456 compound_page_dtor *dtor)
457{
458 page[1].lru.next = (void *)dtor;
459}
460
461static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
462{
463 return (compound_page_dtor *)page[1].lru.next;
464}
465
d85f3385
CL
466static inline int compound_order(struct page *page)
467{
6d777953 468 if (!PageHead(page))
d85f3385
CL
469 return 0;
470 return (unsigned long)page[1].lru.prev;
471}
472
37c2ac78
AA
473static inline int compound_trans_order(struct page *page)
474{
475 int order;
476 unsigned long flags;
477
478 if (!PageHead(page))
479 return 0;
480
481 flags = compound_lock_irqsave(page);
482 order = compound_order(page);
483 compound_unlock_irqrestore(page, flags);
484 return order;
485}
486
d85f3385
CL
487static inline void set_compound_order(struct page *page, unsigned long order)
488{
489 page[1].lru.prev = (void *)order;
490}
491
3dece370 492#ifdef CONFIG_MMU
14fd403f
AA
493/*
494 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
495 * servicing faults for write access. In the normal case, do always want
496 * pte_mkwrite. But get_user_pages can cause write faults for mappings
497 * that do not have writing enabled, when used by access_process_vm.
498 */
499static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
500{
501 if (likely(vma->vm_flags & VM_WRITE))
502 pte = pte_mkwrite(pte);
503 return pte;
504}
3dece370 505#endif
14fd403f 506
1da177e4
LT
507/*
508 * Multiple processes may "see" the same page. E.g. for untouched
509 * mappings of /dev/null, all processes see the same page full of
510 * zeroes, and text pages of executables and shared libraries have
511 * only one copy in memory, at most, normally.
512 *
513 * For the non-reserved pages, page_count(page) denotes a reference count.
7e871b6c
PBG
514 * page_count() == 0 means the page is free. page->lru is then used for
515 * freelist management in the buddy allocator.
da6052f7 516 * page_count() > 0 means the page has been allocated.
1da177e4 517 *
da6052f7
NP
518 * Pages are allocated by the slab allocator in order to provide memory
519 * to kmalloc and kmem_cache_alloc. In this case, the management of the
520 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
521 * unless a particular usage is carefully commented. (the responsibility of
522 * freeing the kmalloc memory is the caller's, of course).
1da177e4 523 *
da6052f7
NP
524 * A page may be used by anyone else who does a __get_free_page().
525 * In this case, page_count still tracks the references, and should only
526 * be used through the normal accessor functions. The top bits of page->flags
527 * and page->virtual store page management information, but all other fields
528 * are unused and could be used privately, carefully. The management of this
529 * page is the responsibility of the one who allocated it, and those who have
530 * subsequently been given references to it.
531 *
532 * The other pages (we may call them "pagecache pages") are completely
1da177e4
LT
533 * managed by the Linux memory manager: I/O, buffers, swapping etc.
534 * The following discussion applies only to them.
535 *
da6052f7
NP
536 * A pagecache page contains an opaque `private' member, which belongs to the
537 * page's address_space. Usually, this is the address of a circular list of
538 * the page's disk buffers. PG_private must be set to tell the VM to call
539 * into the filesystem to release these pages.
1da177e4 540 *
da6052f7
NP
541 * A page may belong to an inode's memory mapping. In this case, page->mapping
542 * is the pointer to the inode, and page->index is the file offset of the page,
543 * in units of PAGE_CACHE_SIZE.
1da177e4 544 *
da6052f7
NP
545 * If pagecache pages are not associated with an inode, they are said to be
546 * anonymous pages. These may become associated with the swapcache, and in that
547 * case PG_swapcache is set, and page->private is an offset into the swapcache.
1da177e4 548 *
da6052f7
NP
549 * In either case (swapcache or inode backed), the pagecache itself holds one
550 * reference to the page. Setting PG_private should also increment the
551 * refcount. The each user mapping also has a reference to the page.
1da177e4 552 *
da6052f7
NP
553 * The pagecache pages are stored in a per-mapping radix tree, which is
554 * rooted at mapping->page_tree, and indexed by offset.
555 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
556 * lists, we instead now tag pages as dirty/writeback in the radix tree.
1da177e4 557 *
da6052f7 558 * All pagecache pages may be subject to I/O:
1da177e4
LT
559 * - inode pages may need to be read from disk,
560 * - inode pages which have been modified and are MAP_SHARED may need
da6052f7
NP
561 * to be written back to the inode on disk,
562 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
563 * modified may need to be swapped out to swap space and (later) to be read
564 * back into memory.
1da177e4
LT
565 */
566
567/*
568 * The zone field is never updated after free_area_init_core()
569 * sets it, so none of the operations on it need to be atomic.
1da177e4 570 */
348f8b6c 571
d41dee36
AW
572
573/*
574 * page->flags layout:
575 *
576 * There are three possibilities for how page->flags get
577 * laid out. The first is for the normal case, without
578 * sparsemem. The second is for sparsemem when there is
579 * plenty of space for node and section. The last is when
580 * we have run out of space and have to fall back to an
581 * alternate (slower) way of determining the node.
582 *
308c05e3
CL
583 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
584 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
585 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
d41dee36 586 */
308c05e3 587#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
d41dee36
AW
588#define SECTIONS_WIDTH SECTIONS_SHIFT
589#else
590#define SECTIONS_WIDTH 0
591#endif
592
593#define ZONES_WIDTH ZONES_SHIFT
594
9223b419 595#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
d41dee36
AW
596#define NODES_WIDTH NODES_SHIFT
597#else
308c05e3
CL
598#ifdef CONFIG_SPARSEMEM_VMEMMAP
599#error "Vmemmap: No space for nodes field in page flags"
600#endif
d41dee36
AW
601#define NODES_WIDTH 0
602#endif
603
604/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
07808b74 605#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
d41dee36
AW
606#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
607#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
608
609/*
610 * We are going to use the flags for the page to node mapping if its in
611 * there. This includes the case where there is no node, so it is implicit.
612 */
89689ae7
CL
613#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
614#define NODE_NOT_IN_PAGE_FLAGS
615#endif
d41dee36 616
348f8b6c 617/*
25985edc 618 * Define the bit shifts to access each section. For non-existent
348f8b6c
DH
619 * sections we define the shift as 0; that plus a 0 mask ensures
620 * the compiler will optimise away reference to them.
621 */
d41dee36
AW
622#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
623#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
624#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
348f8b6c 625
bce54bbf
WD
626/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
627#ifdef NODE_NOT_IN_PAGE_FLAGS
89689ae7 628#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
bd8029b6
AW
629#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
630 SECTIONS_PGOFF : ZONES_PGOFF)
d41dee36 631#else
89689ae7 632#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
bd8029b6
AW
633#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
634 NODES_PGOFF : ZONES_PGOFF)
89689ae7
CL
635#endif
636
bd8029b6 637#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
348f8b6c 638
9223b419
CL
639#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
640#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
348f8b6c
DH
641#endif
642
d41dee36
AW
643#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
644#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
645#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
89689ae7 646#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
348f8b6c 647
33dd4e0e 648static inline enum zone_type page_zonenum(const struct page *page)
1da177e4 649{
348f8b6c 650 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1da177e4 651}
1da177e4 652
89689ae7
CL
653/*
654 * The identification function is only used by the buddy allocator for
655 * determining if two pages could be buddies. We are not really
656 * identifying a zone since we could be using a the section number
657 * id if we have not node id available in page flags.
658 * We guarantee only that it will return the same value for two
659 * combinable pages in a zone.
660 */
cb2b95e1
AW
661static inline int page_zone_id(struct page *page)
662{
89689ae7 663 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
348f8b6c
DH
664}
665
25ba77c1 666static inline int zone_to_nid(struct zone *zone)
89fa3024 667{
d5f541ed
CL
668#ifdef CONFIG_NUMA
669 return zone->node;
670#else
671 return 0;
672#endif
89fa3024
CL
673}
674
89689ae7 675#ifdef NODE_NOT_IN_PAGE_FLAGS
33dd4e0e 676extern int page_to_nid(const struct page *page);
89689ae7 677#else
33dd4e0e 678static inline int page_to_nid(const struct page *page)
d41dee36 679{
89689ae7 680 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
d41dee36 681}
89689ae7
CL
682#endif
683
33dd4e0e 684static inline struct zone *page_zone(const struct page *page)
89689ae7
CL
685{
686 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
687}
688
308c05e3 689#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
bf4e8902
DK
690static inline void set_page_section(struct page *page, unsigned long section)
691{
692 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
693 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
694}
695
aa462abe 696static inline unsigned long page_to_section(const struct page *page)
d41dee36
AW
697{
698 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
699}
308c05e3 700#endif
d41dee36 701
2f1b6248 702static inline void set_page_zone(struct page *page, enum zone_type zone)
348f8b6c
DH
703{
704 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
705 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
706}
2f1b6248 707
348f8b6c
DH
708static inline void set_page_node(struct page *page, unsigned long node)
709{
710 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
711 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1da177e4 712}
89689ae7 713
2f1b6248 714static inline void set_page_links(struct page *page, enum zone_type zone,
d41dee36 715 unsigned long node, unsigned long pfn)
1da177e4 716{
348f8b6c
DH
717 set_page_zone(page, zone);
718 set_page_node(page, node);
bf4e8902 719#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
d41dee36 720 set_page_section(page, pfn_to_section_nr(pfn));
bf4e8902 721#endif
1da177e4
LT
722}
723
f6ac2354
CL
724/*
725 * Some inline functions in vmstat.h depend on page_zone()
726 */
727#include <linux/vmstat.h>
728
33dd4e0e 729static __always_inline void *lowmem_page_address(const struct page *page)
1da177e4 730{
aa462abe 731 return __va(PFN_PHYS(page_to_pfn(page)));
1da177e4
LT
732}
733
734#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
735#define HASHED_PAGE_VIRTUAL
736#endif
737
738#if defined(WANT_PAGE_VIRTUAL)
739#define page_address(page) ((page)->virtual)
740#define set_page_address(page, address) \
741 do { \
742 (page)->virtual = (address); \
743 } while(0)
744#define page_address_init() do { } while(0)
745#endif
746
747#if defined(HASHED_PAGE_VIRTUAL)
f9918794 748void *page_address(const struct page *page);
1da177e4
LT
749void set_page_address(struct page *page, void *virtual);
750void page_address_init(void);
751#endif
752
753#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
754#define page_address(page) lowmem_page_address(page)
755#define set_page_address(page, address) do { } while(0)
756#define page_address_init() do { } while(0)
757#endif
758
759/*
760 * On an anonymous page mapped into a user virtual memory area,
761 * page->mapping points to its anon_vma, not to a struct address_space;
3ca7b3c5
HD
762 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
763 *
764 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
765 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
766 * and then page->mapping points, not to an anon_vma, but to a private
767 * structure which KSM associates with that merged page. See ksm.h.
768 *
769 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
1da177e4
LT
770 *
771 * Please note that, confusingly, "page_mapping" refers to the inode
772 * address_space which maps the page from disk; whereas "page_mapped"
773 * refers to user virtual address space into which the page is mapped.
774 */
775#define PAGE_MAPPING_ANON 1
3ca7b3c5
HD
776#define PAGE_MAPPING_KSM 2
777#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1da177e4
LT
778
779extern struct address_space swapper_space;
780static inline struct address_space *page_mapping(struct page *page)
781{
782 struct address_space *mapping = page->mapping;
783
b5fab14e 784 VM_BUG_ON(PageSlab(page));
1da177e4
LT
785 if (unlikely(PageSwapCache(page)))
786 mapping = &swapper_space;
e20e8779 787 else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
1da177e4
LT
788 mapping = NULL;
789 return mapping;
790}
791
3ca7b3c5
HD
792/* Neutral page->mapping pointer to address_space or anon_vma or other */
793static inline void *page_rmapping(struct page *page)
794{
795 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
796}
797
f981c595
MG
798extern struct address_space *__page_file_mapping(struct page *);
799
800static inline
801struct address_space *page_file_mapping(struct page *page)
802{
803 if (unlikely(PageSwapCache(page)))
804 return __page_file_mapping(page);
805
806 return page->mapping;
807}
808
1da177e4
LT
809static inline int PageAnon(struct page *page)
810{
811 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
812}
813
814/*
815 * Return the pagecache index of the passed page. Regular pagecache pages
816 * use ->index whereas swapcache pages use ->private
817 */
818static inline pgoff_t page_index(struct page *page)
819{
820 if (unlikely(PageSwapCache(page)))
4c21e2f2 821 return page_private(page);
1da177e4
LT
822 return page->index;
823}
824
f981c595
MG
825extern pgoff_t __page_file_index(struct page *page);
826
827/*
828 * Return the file index of the page. Regular pagecache pages use ->index
829 * whereas swapcache pages use swp_offset(->private)
830 */
831static inline pgoff_t page_file_index(struct page *page)
832{
833 if (unlikely(PageSwapCache(page)))
834 return __page_file_index(page);
835
836 return page->index;
837}
838
1da177e4
LT
839/*
840 * Return true if this page is mapped into pagetables.
841 */
842static inline int page_mapped(struct page *page)
843{
844 return atomic_read(&(page)->_mapcount) >= 0;
845}
846
1da177e4
LT
847/*
848 * Different kinds of faults, as returned by handle_mm_fault().
849 * Used to decide whether a process gets delivered SIGBUS or
850 * just gets major/minor fault counters bumped up.
851 */
d0217ac0 852
83c54070 853#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
d0217ac0 854
83c54070
NP
855#define VM_FAULT_OOM 0x0001
856#define VM_FAULT_SIGBUS 0x0002
857#define VM_FAULT_MAJOR 0x0004
858#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
aa50d3a7
AK
859#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
860#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
f33ea7f4 861
83c54070
NP
862#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
863#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
d065bd81 864#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
1da177e4 865
aa50d3a7
AK
866#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
867
868#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
869 VM_FAULT_HWPOISON_LARGE)
870
871/* Encode hstate index for a hwpoisoned large page */
872#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
873#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
d0217ac0 874
1c0fe6e3
NP
875/*
876 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
877 */
878extern void pagefault_out_of_memory(void);
879
1da177e4
LT
880#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
881
ddd588b5 882/*
7bf02ea2 883 * Flags passed to show_mem() and show_free_areas() to suppress output in
ddd588b5
DR
884 * various contexts.
885 */
886#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
887
7bf02ea2
DR
888extern void show_free_areas(unsigned int flags);
889extern bool skip_free_areas_node(unsigned int flags, int nid);
1da177e4 890
1da177e4
LT
891int shmem_zero_setup(struct vm_area_struct *);
892
e8edc6e0 893extern int can_do_mlock(void);
1da177e4
LT
894extern int user_shm_lock(size_t, struct user_struct *);
895extern void user_shm_unlock(size_t, struct user_struct *);
896
897/*
898 * Parameter block passed down to zap_pte_range in exceptional cases.
899 */
900struct zap_details {
901 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
902 struct address_space *check_mapping; /* Check page->mapping if set */
903 pgoff_t first_index; /* Lowest page->index to unmap */
904 pgoff_t last_index; /* Highest page->index to unmap */
1da177e4
LT
905};
906
7e675137
NP
907struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
908 pte_t pte);
909
c627f9cc
JS
910int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
911 unsigned long size);
14f5ff5d 912void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1da177e4 913 unsigned long size, struct zap_details *);
4f74d2c8
LT
914void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
915 unsigned long start, unsigned long end);
e6473092
MM
916
917/**
918 * mm_walk - callbacks for walk_page_range
919 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
920 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
921 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
03319327
DH
922 * this handler is required to be able to handle
923 * pmd_trans_huge() pmds. They may simply choose to
924 * split_huge_page() instead of handling it explicitly.
e6473092
MM
925 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
926 * @pte_hole: if set, called for each hole at all levels
5dc37642 927 * @hugetlb_entry: if set, called for each hugetlb entry
c27fe4c8
KM
928 * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
929 * is used.
e6473092
MM
930 *
931 * (see walk_page_range for more details)
932 */
933struct mm_walk {
2165009b
DH
934 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
935 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
936 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
937 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
938 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
116354d1
NH
939 int (*hugetlb_entry)(pte_t *, unsigned long,
940 unsigned long, unsigned long, struct mm_walk *);
2165009b
DH
941 struct mm_struct *mm;
942 void *private;
e6473092
MM
943};
944
2165009b
DH
945int walk_page_range(unsigned long addr, unsigned long end,
946 struct mm_walk *walk);
42b77728 947void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3bf5ee95 948 unsigned long end, unsigned long floor, unsigned long ceiling);
1da177e4
LT
949int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
950 struct vm_area_struct *vma);
1da177e4
LT
951void unmap_mapping_range(struct address_space *mapping,
952 loff_t const holebegin, loff_t const holelen, int even_cows);
3b6748e2
JW
953int follow_pfn(struct vm_area_struct *vma, unsigned long address,
954 unsigned long *pfn);
d87fe660 955int follow_phys(struct vm_area_struct *vma, unsigned long address,
956 unsigned int flags, unsigned long *prot, resource_size_t *phys);
28b2ee20
RR
957int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
958 void *buf, int len, int write);
1da177e4
LT
959
960static inline void unmap_shared_mapping_range(struct address_space *mapping,
961 loff_t const holebegin, loff_t const holelen)
962{
963 unmap_mapping_range(mapping, holebegin, holelen, 0);
964}
965
25d9e2d1 966extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
2c27c65e 967extern void truncate_setsize(struct inode *inode, loff_t newsize);
25d9e2d1 968extern int vmtruncate(struct inode *inode, loff_t offset);
623e3db9 969void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
750b4987 970int truncate_inode_page(struct address_space *mapping, struct page *page);
25718736 971int generic_error_remove_page(struct address_space *mapping, struct page *page);
83f78668
WF
972int invalidate_inode_page(struct page *page);
973
7ee1dd3f 974#ifdef CONFIG_MMU
83c54070 975extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
d06063cc 976 unsigned long address, unsigned int flags);
5c723ba5
PZ
977extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
978 unsigned long address, unsigned int fault_flags);
7ee1dd3f
DH
979#else
980static inline int handle_mm_fault(struct mm_struct *mm,
981 struct vm_area_struct *vma, unsigned long address,
d06063cc 982 unsigned int flags)
7ee1dd3f
DH
983{
984 /* should never happen if there's no MMU */
985 BUG();
986 return VM_FAULT_SIGBUS;
987}
5c723ba5
PZ
988static inline int fixup_user_fault(struct task_struct *tsk,
989 struct mm_struct *mm, unsigned long address,
990 unsigned int fault_flags)
991{
992 /* should never happen if there's no MMU */
993 BUG();
994 return -EFAULT;
995}
7ee1dd3f 996#endif
f33ea7f4 997
1da177e4
LT
998extern int make_pages_present(unsigned long addr, unsigned long end);
999extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
5ddd36b9
SW
1000extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1001 void *buf, int len, int write);
1da177e4 1002
0014bd99
HY
1003int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1004 unsigned long start, int len, unsigned int foll_flags,
1005 struct page **pages, struct vm_area_struct **vmas,
1006 int *nonblocking);
d2bf6be8 1007int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
9d73777e 1008 unsigned long start, int nr_pages, int write, int force,
d2bf6be8
NP
1009 struct page **pages, struct vm_area_struct **vmas);
1010int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1011 struct page **pages);
18022c5d
MG
1012struct kvec;
1013int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1014 struct page **pages);
1015int get_kernel_page(unsigned long start, int write, struct page **pages);
f3e8fccd 1016struct page *get_dump_page(unsigned long addr);
1da177e4 1017
cf9a2ae8
DH
1018extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1019extern void do_invalidatepage(struct page *page, unsigned long offset);
1020
1da177e4 1021int __set_page_dirty_nobuffers(struct page *page);
76719325 1022int __set_page_dirty_no_writeback(struct page *page);
1da177e4
LT
1023int redirty_page_for_writepage(struct writeback_control *wbc,
1024 struct page *page);
e3a7cca1 1025void account_page_dirtied(struct page *page, struct address_space *mapping);
f629d1c9 1026void account_page_writeback(struct page *page);
b3c97528 1027int set_page_dirty(struct page *page);
1da177e4
LT
1028int set_page_dirty_lock(struct page *page);
1029int clear_page_dirty_for_io(struct page *page);
1030
39aa3cb3 1031/* Is the vma a continuation of the stack vma above it? */
a09a79f6 1032static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
39aa3cb3
SB
1033{
1034 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1035}
1036
a09a79f6
MP
1037static inline int stack_guard_page_start(struct vm_area_struct *vma,
1038 unsigned long addr)
1039{
1040 return (vma->vm_flags & VM_GROWSDOWN) &&
1041 (vma->vm_start == addr) &&
1042 !vma_growsdown(vma->vm_prev, addr);
1043}
1044
1045/* Is the vma a continuation of the stack vma below it? */
1046static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1047{
1048 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1049}
1050
1051static inline int stack_guard_page_end(struct vm_area_struct *vma,
1052 unsigned long addr)
1053{
1054 return (vma->vm_flags & VM_GROWSUP) &&
1055 (vma->vm_end == addr) &&
1056 !vma_growsup(vma->vm_next, addr);
1057}
1058
b7643757
SP
1059extern pid_t
1060vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1061
b6a2fea3
OW
1062extern unsigned long move_page_tables(struct vm_area_struct *vma,
1063 unsigned long old_addr, struct vm_area_struct *new_vma,
1064 unsigned long new_addr, unsigned long len);
1da177e4
LT
1065extern unsigned long do_mremap(unsigned long addr,
1066 unsigned long old_len, unsigned long new_len,
1067 unsigned long flags, unsigned long new_addr);
b6a2fea3
OW
1068extern int mprotect_fixup(struct vm_area_struct *vma,
1069 struct vm_area_struct **pprev, unsigned long start,
1070 unsigned long end, unsigned long newflags);
1da177e4 1071
465a454f
PZ
1072/*
1073 * doesn't attempt to fault and will return short.
1074 */
1075int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1076 struct page **pages);
d559db08
KH
1077/*
1078 * per-process(per-mm_struct) statistics.
1079 */
d559db08
KH
1080static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1081{
69c97823
KK
1082 long val = atomic_long_read(&mm->rss_stat.count[member]);
1083
1084#ifdef SPLIT_RSS_COUNTING
1085 /*
1086 * counter is updated in asynchronous manner and may go to minus.
1087 * But it's never be expected number for users.
1088 */
1089 if (val < 0)
1090 val = 0;
172703b0 1091#endif
69c97823
KK
1092 return (unsigned long)val;
1093}
d559db08
KH
1094
1095static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1096{
172703b0 1097 atomic_long_add(value, &mm->rss_stat.count[member]);
d559db08
KH
1098}
1099
1100static inline void inc_mm_counter(struct mm_struct *mm, int member)
1101{
172703b0 1102 atomic_long_inc(&mm->rss_stat.count[member]);
d559db08
KH
1103}
1104
1105static inline void dec_mm_counter(struct mm_struct *mm, int member)
1106{
172703b0 1107 atomic_long_dec(&mm->rss_stat.count[member]);
d559db08
KH
1108}
1109
d559db08
KH
1110static inline unsigned long get_mm_rss(struct mm_struct *mm)
1111{
1112 return get_mm_counter(mm, MM_FILEPAGES) +
1113 get_mm_counter(mm, MM_ANONPAGES);
1114}
1115
1116static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1117{
1118 return max(mm->hiwater_rss, get_mm_rss(mm));
1119}
1120
1121static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1122{
1123 return max(mm->hiwater_vm, mm->total_vm);
1124}
1125
1126static inline void update_hiwater_rss(struct mm_struct *mm)
1127{
1128 unsigned long _rss = get_mm_rss(mm);
1129
1130 if ((mm)->hiwater_rss < _rss)
1131 (mm)->hiwater_rss = _rss;
1132}
1133
1134static inline void update_hiwater_vm(struct mm_struct *mm)
1135{
1136 if (mm->hiwater_vm < mm->total_vm)
1137 mm->hiwater_vm = mm->total_vm;
1138}
1139
1140static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1141 struct mm_struct *mm)
1142{
1143 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1144
1145 if (*maxrss < hiwater_rss)
1146 *maxrss = hiwater_rss;
1147}
1148
53bddb4e 1149#if defined(SPLIT_RSS_COUNTING)
05af2e10 1150void sync_mm_rss(struct mm_struct *mm);
53bddb4e 1151#else
05af2e10 1152static inline void sync_mm_rss(struct mm_struct *mm)
53bddb4e
KH
1153{
1154}
1155#endif
465a454f 1156
4e950f6f 1157int vma_wants_writenotify(struct vm_area_struct *vma);
d08b3851 1158
25ca1d6c
NK
1159extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1160 spinlock_t **ptl);
1161static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1162 spinlock_t **ptl)
1163{
1164 pte_t *ptep;
1165 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1166 return ptep;
1167}
c9cfcddf 1168
5f22df00
NP
1169#ifdef __PAGETABLE_PUD_FOLDED
1170static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1171 unsigned long address)
1172{
1173 return 0;
1174}
1175#else
1bb3630e 1176int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
5f22df00
NP
1177#endif
1178
1179#ifdef __PAGETABLE_PMD_FOLDED
1180static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1181 unsigned long address)
1182{
1183 return 0;
1184}
1185#else
1bb3630e 1186int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
5f22df00
NP
1187#endif
1188
8ac1f832
AA
1189int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1190 pmd_t *pmd, unsigned long address);
1bb3630e
HD
1191int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1192
1da177e4
LT
1193/*
1194 * The following ifdef needed to get the 4level-fixup.h header to work.
1195 * Remove it when 4level-fixup.h has been removed.
1196 */
1bb3630e 1197#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1da177e4
LT
1198static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1199{
1bb3630e
HD
1200 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1201 NULL: pud_offset(pgd, address);
1da177e4
LT
1202}
1203
1204static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1205{
1bb3630e
HD
1206 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1207 NULL: pmd_offset(pud, address);
1da177e4 1208}
1bb3630e
HD
1209#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1210
f7d0b926 1211#if USE_SPLIT_PTLOCKS
4c21e2f2
HD
1212/*
1213 * We tuck a spinlock to guard each pagetable page into its struct page,
1214 * at page->private, with BUILD_BUG_ON to make sure that this will not
1215 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
1216 * When freeing, reset page->mapping so free_pages_check won't complain.
1217 */
349aef0b 1218#define __pte_lockptr(page) &((page)->ptl)
4c21e2f2
HD
1219#define pte_lock_init(_page) do { \
1220 spin_lock_init(__pte_lockptr(_page)); \
1221} while (0)
1222#define pte_lock_deinit(page) ((page)->mapping = NULL)
1223#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
f7d0b926 1224#else /* !USE_SPLIT_PTLOCKS */
4c21e2f2
HD
1225/*
1226 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1227 */
1228#define pte_lock_init(page) do {} while (0)
1229#define pte_lock_deinit(page) do {} while (0)
1230#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
f7d0b926 1231#endif /* USE_SPLIT_PTLOCKS */
4c21e2f2 1232
2f569afd
MS
1233static inline void pgtable_page_ctor(struct page *page)
1234{
1235 pte_lock_init(page);
1236 inc_zone_page_state(page, NR_PAGETABLE);
1237}
1238
1239static inline void pgtable_page_dtor(struct page *page)
1240{
1241 pte_lock_deinit(page);
1242 dec_zone_page_state(page, NR_PAGETABLE);
1243}
1244
c74df32c
HD
1245#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1246({ \
4c21e2f2 1247 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
c74df32c
HD
1248 pte_t *__pte = pte_offset_map(pmd, address); \
1249 *(ptlp) = __ptl; \
1250 spin_lock(__ptl); \
1251 __pte; \
1252})
1253
1254#define pte_unmap_unlock(pte, ptl) do { \
1255 spin_unlock(ptl); \
1256 pte_unmap(pte); \
1257} while (0)
1258
8ac1f832
AA
1259#define pte_alloc_map(mm, vma, pmd, address) \
1260 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1261 pmd, address))? \
1262 NULL: pte_offset_map(pmd, address))
1bb3630e 1263
c74df32c 1264#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
8ac1f832
AA
1265 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1266 pmd, address))? \
c74df32c
HD
1267 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1268
1bb3630e 1269#define pte_alloc_kernel(pmd, address) \
8ac1f832 1270 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1bb3630e 1271 NULL: pte_offset_kernel(pmd, address))
1da177e4
LT
1272
1273extern void free_area_init(unsigned long * zones_size);
9109fb7b
JW
1274extern void free_area_init_node(int nid, unsigned long * zones_size,
1275 unsigned long zone_start_pfn, unsigned long *zholes_size);
49a7f04a
DH
1276extern void free_initmem(void);
1277
0ee332c1 1278#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 1279/*
0ee332c1 1280 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
c713216d
MG
1281 * zones, allocate the backing mem_map and account for memory holes in a more
1282 * architecture independent manner. This is a substitute for creating the
1283 * zone_sizes[] and zholes_size[] arrays and passing them to
1284 * free_area_init_node()
1285 *
1286 * An architecture is expected to register range of page frames backed by
0ee332c1 1287 * physical memory with memblock_add[_node]() before calling
c713216d
MG
1288 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1289 * usage, an architecture is expected to do something like
1290 *
1291 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1292 * max_highmem_pfn};
1293 * for_each_valid_physical_page_range()
0ee332c1 1294 * memblock_add_node(base, size, nid)
c713216d
MG
1295 * free_area_init_nodes(max_zone_pfns);
1296 *
0ee332c1
TH
1297 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1298 * registered physical page range. Similarly
1299 * sparse_memory_present_with_active_regions() calls memory_present() for
1300 * each range when SPARSEMEM is enabled.
c713216d
MG
1301 *
1302 * See mm/page_alloc.c for more information on each function exposed by
0ee332c1 1303 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
c713216d
MG
1304 */
1305extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1e01979c 1306unsigned long node_map_pfn_alignment(void);
32996250
YL
1307unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1308 unsigned long end_pfn);
c713216d
MG
1309extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1310 unsigned long end_pfn);
1311extern void get_pfn_range_for_nid(unsigned int nid,
1312 unsigned long *start_pfn, unsigned long *end_pfn);
1313extern unsigned long find_min_pfn_with_active_regions(void);
c713216d
MG
1314extern void free_bootmem_with_active_regions(int nid,
1315 unsigned long max_low_pfn);
1316extern void sparse_memory_present_with_active_regions(int nid);
f2dbcfa7 1317
0ee332c1 1318#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
f2dbcfa7 1319
0ee332c1 1320#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
f2dbcfa7
KH
1321 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1322static inline int __early_pfn_to_nid(unsigned long pfn)
1323{
1324 return 0;
1325}
1326#else
1327/* please see mm/page_alloc.c */
1328extern int __meminit early_pfn_to_nid(unsigned long pfn);
1329#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1330/* there is a per-arch backend function. */
1331extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1332#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1333#endif
1334
0e0b864e 1335extern void set_dma_reserve(unsigned long new_dma_reserve);
a2f3aa02
DH
1336extern void memmap_init_zone(unsigned long, int, unsigned long,
1337 unsigned long, enum memmap_context);
bc75d33f 1338extern void setup_per_zone_wmarks(void);
1b79acc9 1339extern int __meminit init_per_zone_wmark_min(void);
1da177e4 1340extern void mem_init(void);
8feae131 1341extern void __init mmap_init(void);
b2b755b5 1342extern void show_mem(unsigned int flags);
1da177e4
LT
1343extern void si_meminfo(struct sysinfo * val);
1344extern void si_meminfo_node(struct sysinfo *val, int nid);
3461b0af 1345extern int after_bootmem;
1da177e4 1346
3ee9a4f0
JP
1347extern __printf(3, 4)
1348void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
a238ab5b 1349
e7c8d5c9 1350extern void setup_per_cpu_pageset(void);
e7c8d5c9 1351
112067f0 1352extern void zone_pcp_update(struct zone *zone);
340175b7 1353extern void zone_pcp_reset(struct zone *zone);
112067f0 1354
8feae131 1355/* nommu.c */
33e5d769 1356extern atomic_long_t mmap_pages_allocated;
7e660872 1357extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
8feae131 1358
1da177e4
LT
1359/* prio_tree.c */
1360void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
1361void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
1362void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
1363struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
1364 struct prio_tree_iter *iter);
1365
1366#define vma_prio_tree_foreach(vma, iter, root, begin, end) \
1367 for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
1368 (vma = vma_prio_tree_next(vma, iter)); )
1369
1370static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1371 struct list_head *list)
1372{
1373 vma->shared.vm_set.parent = NULL;
1374 list_add_tail(&vma->shared.vm_set.list, list);
1375}
1376
1377/* mmap.c */
34b4e4aa 1378extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
5beb4930 1379extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1da177e4
LT
1380 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1381extern struct vm_area_struct *vma_merge(struct mm_struct *,
1382 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1383 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1384 struct mempolicy *);
1385extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1386extern int split_vma(struct mm_struct *,
1387 struct vm_area_struct *, unsigned long addr, int new_below);
1388extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1389extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1390 struct rb_node **, struct rb_node *);
a8fb5618 1391extern void unlink_file_vma(struct vm_area_struct *);
1da177e4
LT
1392extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1393 unsigned long addr, unsigned long len, pgoff_t pgoff);
1394extern void exit_mmap(struct mm_struct *);
925d1c40 1395
7906d00c
AA
1396extern int mm_take_all_locks(struct mm_struct *mm);
1397extern void mm_drop_all_locks(struct mm_struct *mm);
1398
925d1c40
MH
1399/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1400extern void added_exe_file_vma(struct mm_struct *mm);
1401extern void removed_exe_file_vma(struct mm_struct *mm);
38646013
JS
1402extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1403extern struct file *get_mm_exe_file(struct mm_struct *mm);
925d1c40 1404
119f657c 1405extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
fa5dc22f
RM
1406extern int install_special_mapping(struct mm_struct *mm,
1407 unsigned long addr, unsigned long len,
1408 unsigned long flags, struct page **pages);
1da177e4
LT
1409
1410extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1411
0165ab44
MS
1412extern unsigned long mmap_region(struct file *file, unsigned long addr,
1413 unsigned long len, unsigned long flags,
ca16d140 1414 vm_flags_t vm_flags, unsigned long pgoff);
e3fc629d 1415extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
6be5ceb0
LT
1416 unsigned long, unsigned long,
1417 unsigned long, unsigned long);
1da177e4
LT
1418extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1419
e4eb1ff6
LT
1420/* These take the mm semaphore themselves */
1421extern unsigned long vm_brk(unsigned long, unsigned long);
bfce281c 1422extern int vm_munmap(unsigned long, size_t);
6be5ceb0
LT
1423extern unsigned long vm_mmap(struct file *, unsigned long,
1424 unsigned long, unsigned long,
1425 unsigned long, unsigned long);
1da177e4 1426
85821aab 1427/* truncate.c */
1da177e4 1428extern void truncate_inode_pages(struct address_space *, loff_t);
d7339071
HR
1429extern void truncate_inode_pages_range(struct address_space *,
1430 loff_t lstart, loff_t lend);
1da177e4
LT
1431
1432/* generic vm_area_ops exported for stackable file systems */
d0217ac0 1433extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
4fcf1c62 1434extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1da177e4
LT
1435
1436/* mm/page-writeback.c */
1437int write_one_page(struct page *page, int wait);
1cf6e7d8 1438void task_dirty_inc(struct task_struct *tsk);
1da177e4
LT
1439
1440/* readahead.c */
1441#define VM_MAX_READAHEAD 128 /* kbytes */
1442#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1da177e4 1443
1da177e4 1444int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
7361f4d8 1445 pgoff_t offset, unsigned long nr_to_read);
cf914a7d
RR
1446
1447void page_cache_sync_readahead(struct address_space *mapping,
1448 struct file_ra_state *ra,
1449 struct file *filp,
1450 pgoff_t offset,
1451 unsigned long size);
1452
1453void page_cache_async_readahead(struct address_space *mapping,
1454 struct file_ra_state *ra,
1455 struct file *filp,
1456 struct page *pg,
1457 pgoff_t offset,
1458 unsigned long size);
1459
1da177e4 1460unsigned long max_sane_readahead(unsigned long nr);
d30a1100
WF
1461unsigned long ra_submit(struct file_ra_state *ra,
1462 struct address_space *mapping,
1463 struct file *filp);
1da177e4 1464
d05f3169 1465/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
46dea3d0 1466extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
d05f3169
MH
1467
1468/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1469extern int expand_downwards(struct vm_area_struct *vma,
1470 unsigned long address);
8ca3eb08 1471#if VM_GROWSUP
46dea3d0 1472extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
8ca3eb08
LT
1473#else
1474 #define expand_upwards(vma, address) do { } while (0)
9ab88515 1475#endif
1da177e4
LT
1476
1477/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1478extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1479extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1480 struct vm_area_struct **pprev);
1481
1482/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1483 NULL if none. Assume start_addr < end_addr. */
1484static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1485{
1486 struct vm_area_struct * vma = find_vma(mm,start_addr);
1487
1488 if (vma && end_addr <= vma->vm_start)
1489 vma = NULL;
1490 return vma;
1491}
1492
1493static inline unsigned long vma_pages(struct vm_area_struct *vma)
1494{
1495 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1496}
1497
640708a2
PE
1498/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
1499static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1500 unsigned long vm_start, unsigned long vm_end)
1501{
1502 struct vm_area_struct *vma = find_vma(mm, vm_start);
1503
1504 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1505 vma = NULL;
1506
1507 return vma;
1508}
1509
bad849b3 1510#ifdef CONFIG_MMU
804af2cf 1511pgprot_t vm_get_page_prot(unsigned long vm_flags);
bad849b3
DH
1512#else
1513static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1514{
1515 return __pgprot(0);
1516}
1517#endif
1518
deceb6cd 1519struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
deceb6cd
HD
1520int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1521 unsigned long pfn, unsigned long size, pgprot_t);
a145dd41 1522int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
e0dc0d8f
NP
1523int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1524 unsigned long pfn);
423bad60
NP
1525int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1526 unsigned long pfn);
deceb6cd 1527
6aab341e 1528struct page *follow_page(struct vm_area_struct *, unsigned long address,
deceb6cd
HD
1529 unsigned int foll_flags);
1530#define FOLL_WRITE 0x01 /* check pte is writable */
1531#define FOLL_TOUCH 0x02 /* mark page accessed */
1532#define FOLL_GET 0x04 /* do get_page on page */
8e4b9a60 1533#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
58fa879e 1534#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
318b275f
GN
1535#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
1536 * and return without waiting upon it */
110d74a9 1537#define FOLL_MLOCK 0x40 /* mark page as mlocked */
500d65d4 1538#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
69ebb83e 1539#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
1da177e4 1540
2f569afd 1541typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
aee16b3c
JF
1542 void *data);
1543extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1544 unsigned long size, pte_fn_t fn, void *data);
1545
1da177e4 1546#ifdef CONFIG_PROC_FS
ab50b8ed 1547void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1da177e4 1548#else
ab50b8ed 1549static inline void vm_stat_account(struct mm_struct *mm,
1da177e4
LT
1550 unsigned long flags, struct file *file, long pages)
1551{
44de9d0c 1552 mm->total_vm += pages;
1da177e4
LT
1553}
1554#endif /* CONFIG_PROC_FS */
1555
12d6f21e 1556#ifdef CONFIG_DEBUG_PAGEALLOC
12d6f21e 1557extern void kernel_map_pages(struct page *page, int numpages, int enable);
8a235efa
RW
1558#ifdef CONFIG_HIBERNATION
1559extern bool kernel_page_present(struct page *page);
1560#endif /* CONFIG_HIBERNATION */
12d6f21e 1561#else
1da177e4 1562static inline void
9858db50 1563kernel_map_pages(struct page *page, int numpages, int enable) {}
8a235efa
RW
1564#ifdef CONFIG_HIBERNATION
1565static inline bool kernel_page_present(struct page *page) { return true; }
1566#endif /* CONFIG_HIBERNATION */
1da177e4
LT
1567#endif
1568
31db58b3 1569extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1da177e4 1570#ifdef __HAVE_ARCH_GATE_AREA
cae5d390 1571int in_gate_area_no_mm(unsigned long addr);
83b964bb 1572int in_gate_area(struct mm_struct *mm, unsigned long addr);
1da177e4 1573#else
cae5d390
SW
1574int in_gate_area_no_mm(unsigned long addr);
1575#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1da177e4
LT
1576#endif /* __HAVE_ARCH_GATE_AREA */
1577
8d65af78 1578int drop_caches_sysctl_handler(struct ctl_table *, int,
9d0243bc 1579 void __user *, size_t *, loff_t *);
a09ed5e0 1580unsigned long shrink_slab(struct shrink_control *shrink,
1495f230
YH
1581 unsigned long nr_pages_scanned,
1582 unsigned long lru_pages);
9d0243bc 1583
7a9166e3
LY
1584#ifndef CONFIG_MMU
1585#define randomize_va_space 0
1586#else
a62eaf15 1587extern int randomize_va_space;
7a9166e3 1588#endif
a62eaf15 1589
045e72ac 1590const char * arch_vma_name(struct vm_area_struct *vma);
03252919 1591void print_vma_addr(char *prefix, unsigned long rip);
e6e5494c 1592
9bdac914
YL
1593void sparse_mem_maps_populate_node(struct page **map_map,
1594 unsigned long pnum_begin,
1595 unsigned long pnum_end,
1596 unsigned long map_count,
1597 int nodeid);
1598
98f3cfc1 1599struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
29c71111
AW
1600pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1601pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1602pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1603pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
8f6aac41 1604void *vmemmap_alloc_block(unsigned long size, int node);
9bdac914 1605void *vmemmap_alloc_block_buf(unsigned long size, int node);
8f6aac41 1606void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
29c71111
AW
1607int vmemmap_populate_basepages(struct page *start_page,
1608 unsigned long pages, int node);
1609int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
c2b91e2e 1610void vmemmap_populate_print_last(void);
8f6aac41 1611
6a46079c 1612
82ba011b
AK
1613enum mf_flags {
1614 MF_COUNT_INCREASED = 1 << 0,
7329bbeb 1615 MF_ACTION_REQUIRED = 1 << 1,
6751ed65 1616 MF_MUST_KILL = 1 << 2,
82ba011b 1617};
cd42f4a3 1618extern int memory_failure(unsigned long pfn, int trapno, int flags);
ea8f5fb8 1619extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
847ce401 1620extern int unpoison_memory(unsigned long pfn);
6a46079c
AK
1621extern int sysctl_memory_failure_early_kill;
1622extern int sysctl_memory_failure_recovery;
facb6011 1623extern void shake_page(struct page *p, int access);
6a46079c 1624extern atomic_long_t mce_bad_pages;
facb6011 1625extern int soft_offline_page(struct page *page, int flags);
6a46079c 1626
718a3821
WF
1627extern void dump_page(struct page *page);
1628
47ad8475
AA
1629#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1630extern void clear_huge_page(struct page *page,
1631 unsigned long addr,
1632 unsigned int pages_per_huge_page);
1633extern void copy_user_huge_page(struct page *dst, struct page *src,
1634 unsigned long addr, struct vm_area_struct *vma,
1635 unsigned int pages_per_huge_page);
1636#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
1637
c0a32fc5
SG
1638#ifdef CONFIG_DEBUG_PAGEALLOC
1639extern unsigned int _debug_guardpage_minorder;
1640
1641static inline unsigned int debug_guardpage_minorder(void)
1642{
1643 return _debug_guardpage_minorder;
1644}
1645
1646static inline bool page_is_guard(struct page *page)
1647{
1648 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1649}
1650#else
1651static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1652static inline bool page_is_guard(struct page *page) { return false; }
1653#endif /* CONFIG_DEBUG_PAGEALLOC */
1654
1da177e4
LT
1655#endif /* __KERNEL__ */
1656#endif /* _LINUX_MM_H */
This page took 1.112551 seconds and 5 git commands to generate.