thp: lazy huge zero page allocation
[deliverable/linux.git] / include / linux / mm.h
1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3
4 #include <linux/errno.h>
5
6 #ifdef __KERNEL__
7
8 #include <linux/gfp.h>
9 #include <linux/bug.h>
10 #include <linux/list.h>
11 #include <linux/mmzone.h>
12 #include <linux/rbtree.h>
13 #include <linux/atomic.h>
14 #include <linux/debug_locks.h>
15 #include <linux/mm_types.h>
16 #include <linux/range.h>
17 #include <linux/pfn.h>
18 #include <linux/bit_spinlock.h>
19 #include <linux/shrinker.h>
20
21 struct mempolicy;
22 struct anon_vma;
23 struct anon_vma_chain;
24 struct file_ra_state;
25 struct user_struct;
26 struct writeback_control;
27
28 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
29 extern unsigned long max_mapnr;
30 #endif
31
32 extern unsigned long num_physpages;
33 extern unsigned long totalram_pages;
34 extern void * high_memory;
35 extern int page_cluster;
36
37 #ifdef CONFIG_SYSCTL
38 extern int sysctl_legacy_va_layout;
39 #else
40 #define sysctl_legacy_va_layout 0
41 #endif
42
43 #include <asm/page.h>
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
46
47 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
48
49 /* to align the pointer to the (next) page boundary */
50 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
51
52 /*
53 * Linux kernel virtual memory manager primitives.
54 * The idea being to have a "virtual" mm in the same way
55 * we have a virtual fs - giving a cleaner interface to the
56 * mm details, and allowing different kinds of memory mappings
57 * (from shared memory to executable loading to arbitrary
58 * mmap() functions).
59 */
60
61 extern struct kmem_cache *vm_area_cachep;
62
63 #ifndef CONFIG_MMU
64 extern struct rb_root nommu_region_tree;
65 extern struct rw_semaphore nommu_region_sem;
66
67 extern unsigned int kobjsize(const void *objp);
68 #endif
69
70 /*
71 * vm_flags in vm_area_struct, see mm_types.h.
72 */
73 #define VM_NONE 0x00000000
74
75 #define VM_READ 0x00000001 /* currently active flags */
76 #define VM_WRITE 0x00000002
77 #define VM_EXEC 0x00000004
78 #define VM_SHARED 0x00000008
79
80 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
81 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
82 #define VM_MAYWRITE 0x00000020
83 #define VM_MAYEXEC 0x00000040
84 #define VM_MAYSHARE 0x00000080
85
86 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
87 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
88 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
89
90 #define VM_LOCKED 0x00002000
91 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
92
93 /* Used by sys_madvise() */
94 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
95 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
96
97 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
98 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
99 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
100 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
101 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
102 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
103 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
104 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
105
106 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
107 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
108 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
109 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
110
111 #if defined(CONFIG_X86)
112 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
113 #elif defined(CONFIG_PPC)
114 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
115 #elif defined(CONFIG_PARISC)
116 # define VM_GROWSUP VM_ARCH_1
117 #elif defined(CONFIG_IA64)
118 # define VM_GROWSUP VM_ARCH_1
119 #elif !defined(CONFIG_MMU)
120 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
121 #endif
122
123 #ifndef VM_GROWSUP
124 # define VM_GROWSUP VM_NONE
125 #endif
126
127 /* Bits set in the VMA until the stack is in its final location */
128 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
129
130 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
131 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
132 #endif
133
134 #ifdef CONFIG_STACK_GROWSUP
135 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
136 #else
137 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
138 #endif
139
140 #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
141 #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
142 #define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
143 #define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
144 #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
145
146 /*
147 * Special vmas that are non-mergable, non-mlock()able.
148 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
149 */
150 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
151
152 /*
153 * mapping from the currently active vm_flags protection bits (the
154 * low four bits) to a page protection mask..
155 */
156 extern pgprot_t protection_map[16];
157
158 #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
159 #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
160 #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
161 #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
162 #define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
163 #define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
164 #define FAULT_FLAG_TRIED 0x40 /* second try */
165
166 /*
167 * vm_fault is filled by the the pagefault handler and passed to the vma's
168 * ->fault function. The vma's ->fault is responsible for returning a bitmask
169 * of VM_FAULT_xxx flags that give details about how the fault was handled.
170 *
171 * pgoff should be used in favour of virtual_address, if possible. If pgoff
172 * is used, one may implement ->remap_pages to get nonlinear mapping support.
173 */
174 struct vm_fault {
175 unsigned int flags; /* FAULT_FLAG_xxx flags */
176 pgoff_t pgoff; /* Logical page offset based on vma */
177 void __user *virtual_address; /* Faulting virtual address */
178
179 struct page *page; /* ->fault handlers should return a
180 * page here, unless VM_FAULT_NOPAGE
181 * is set (which is also implied by
182 * VM_FAULT_ERROR).
183 */
184 };
185
186 /*
187 * These are the virtual MM functions - opening of an area, closing and
188 * unmapping it (needed to keep files on disk up-to-date etc), pointer
189 * to the functions called when a no-page or a wp-page exception occurs.
190 */
191 struct vm_operations_struct {
192 void (*open)(struct vm_area_struct * area);
193 void (*close)(struct vm_area_struct * area);
194 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
195
196 /* notification that a previously read-only page is about to become
197 * writable, if an error is returned it will cause a SIGBUS */
198 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
199
200 /* called by access_process_vm when get_user_pages() fails, typically
201 * for use by special VMAs that can switch between memory and hardware
202 */
203 int (*access)(struct vm_area_struct *vma, unsigned long addr,
204 void *buf, int len, int write);
205 #ifdef CONFIG_NUMA
206 /*
207 * set_policy() op must add a reference to any non-NULL @new mempolicy
208 * to hold the policy upon return. Caller should pass NULL @new to
209 * remove a policy and fall back to surrounding context--i.e. do not
210 * install a MPOL_DEFAULT policy, nor the task or system default
211 * mempolicy.
212 */
213 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
214
215 /*
216 * get_policy() op must add reference [mpol_get()] to any policy at
217 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
218 * in mm/mempolicy.c will do this automatically.
219 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
220 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
221 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
222 * must return NULL--i.e., do not "fallback" to task or system default
223 * policy.
224 */
225 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
226 unsigned long addr);
227 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
228 const nodemask_t *to, unsigned long flags);
229 #endif
230 /* called by sys_remap_file_pages() to populate non-linear mapping */
231 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
232 unsigned long size, pgoff_t pgoff);
233 };
234
235 struct mmu_gather;
236 struct inode;
237
238 #define page_private(page) ((page)->private)
239 #define set_page_private(page, v) ((page)->private = (v))
240
241 /* It's valid only if the page is free path or free_list */
242 static inline void set_freepage_migratetype(struct page *page, int migratetype)
243 {
244 page->index = migratetype;
245 }
246
247 /* It's valid only if the page is free path or free_list */
248 static inline int get_freepage_migratetype(struct page *page)
249 {
250 return page->index;
251 }
252
253 /*
254 * FIXME: take this include out, include page-flags.h in
255 * files which need it (119 of them)
256 */
257 #include <linux/page-flags.h>
258 #include <linux/huge_mm.h>
259
260 /*
261 * Methods to modify the page usage count.
262 *
263 * What counts for a page usage:
264 * - cache mapping (page->mapping)
265 * - private data (page->private)
266 * - page mapped in a task's page tables, each mapping
267 * is counted separately
268 *
269 * Also, many kernel routines increase the page count before a critical
270 * routine so they can be sure the page doesn't go away from under them.
271 */
272
273 /*
274 * Drop a ref, return true if the refcount fell to zero (the page has no users)
275 */
276 static inline int put_page_testzero(struct page *page)
277 {
278 VM_BUG_ON(atomic_read(&page->_count) == 0);
279 return atomic_dec_and_test(&page->_count);
280 }
281
282 /*
283 * Try to grab a ref unless the page has a refcount of zero, return false if
284 * that is the case.
285 */
286 static inline int get_page_unless_zero(struct page *page)
287 {
288 return atomic_inc_not_zero(&page->_count);
289 }
290
291 extern int page_is_ram(unsigned long pfn);
292
293 /* Support for virtually mapped pages */
294 struct page *vmalloc_to_page(const void *addr);
295 unsigned long vmalloc_to_pfn(const void *addr);
296
297 /*
298 * Determine if an address is within the vmalloc range
299 *
300 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
301 * is no special casing required.
302 */
303 static inline int is_vmalloc_addr(const void *x)
304 {
305 #ifdef CONFIG_MMU
306 unsigned long addr = (unsigned long)x;
307
308 return addr >= VMALLOC_START && addr < VMALLOC_END;
309 #else
310 return 0;
311 #endif
312 }
313 #ifdef CONFIG_MMU
314 extern int is_vmalloc_or_module_addr(const void *x);
315 #else
316 static inline int is_vmalloc_or_module_addr(const void *x)
317 {
318 return 0;
319 }
320 #endif
321
322 static inline void compound_lock(struct page *page)
323 {
324 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
325 VM_BUG_ON(PageSlab(page));
326 bit_spin_lock(PG_compound_lock, &page->flags);
327 #endif
328 }
329
330 static inline void compound_unlock(struct page *page)
331 {
332 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
333 VM_BUG_ON(PageSlab(page));
334 bit_spin_unlock(PG_compound_lock, &page->flags);
335 #endif
336 }
337
338 static inline unsigned long compound_lock_irqsave(struct page *page)
339 {
340 unsigned long uninitialized_var(flags);
341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 local_irq_save(flags);
343 compound_lock(page);
344 #endif
345 return flags;
346 }
347
348 static inline void compound_unlock_irqrestore(struct page *page,
349 unsigned long flags)
350 {
351 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
352 compound_unlock(page);
353 local_irq_restore(flags);
354 #endif
355 }
356
357 static inline struct page *compound_head(struct page *page)
358 {
359 if (unlikely(PageTail(page)))
360 return page->first_page;
361 return page;
362 }
363
364 /*
365 * The atomic page->_mapcount, starts from -1: so that transitions
366 * both from it and to it can be tracked, using atomic_inc_and_test
367 * and atomic_add_negative(-1).
368 */
369 static inline void reset_page_mapcount(struct page *page)
370 {
371 atomic_set(&(page)->_mapcount, -1);
372 }
373
374 static inline int page_mapcount(struct page *page)
375 {
376 return atomic_read(&(page)->_mapcount) + 1;
377 }
378
379 static inline int page_count(struct page *page)
380 {
381 return atomic_read(&compound_head(page)->_count);
382 }
383
384 static inline void get_huge_page_tail(struct page *page)
385 {
386 /*
387 * __split_huge_page_refcount() cannot run
388 * from under us.
389 */
390 VM_BUG_ON(page_mapcount(page) < 0);
391 VM_BUG_ON(atomic_read(&page->_count) != 0);
392 atomic_inc(&page->_mapcount);
393 }
394
395 extern bool __get_page_tail(struct page *page);
396
397 static inline void get_page(struct page *page)
398 {
399 if (unlikely(PageTail(page)))
400 if (likely(__get_page_tail(page)))
401 return;
402 /*
403 * Getting a normal page or the head of a compound page
404 * requires to already have an elevated page->_count.
405 */
406 VM_BUG_ON(atomic_read(&page->_count) <= 0);
407 atomic_inc(&page->_count);
408 }
409
410 static inline struct page *virt_to_head_page(const void *x)
411 {
412 struct page *page = virt_to_page(x);
413 return compound_head(page);
414 }
415
416 /*
417 * Setup the page count before being freed into the page allocator for
418 * the first time (boot or memory hotplug)
419 */
420 static inline void init_page_count(struct page *page)
421 {
422 atomic_set(&page->_count, 1);
423 }
424
425 /*
426 * PageBuddy() indicate that the page is free and in the buddy system
427 * (see mm/page_alloc.c).
428 *
429 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
430 * -2 so that an underflow of the page_mapcount() won't be mistaken
431 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
432 * efficiently by most CPU architectures.
433 */
434 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
435
436 static inline int PageBuddy(struct page *page)
437 {
438 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
439 }
440
441 static inline void __SetPageBuddy(struct page *page)
442 {
443 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
444 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
445 }
446
447 static inline void __ClearPageBuddy(struct page *page)
448 {
449 VM_BUG_ON(!PageBuddy(page));
450 atomic_set(&page->_mapcount, -1);
451 }
452
453 void put_page(struct page *page);
454 void put_pages_list(struct list_head *pages);
455
456 void split_page(struct page *page, unsigned int order);
457 int split_free_page(struct page *page);
458 int capture_free_page(struct page *page, int alloc_order, int migratetype);
459
460 /*
461 * Compound pages have a destructor function. Provide a
462 * prototype for that function and accessor functions.
463 * These are _only_ valid on the head of a PG_compound page.
464 */
465 typedef void compound_page_dtor(struct page *);
466
467 static inline void set_compound_page_dtor(struct page *page,
468 compound_page_dtor *dtor)
469 {
470 page[1].lru.next = (void *)dtor;
471 }
472
473 static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
474 {
475 return (compound_page_dtor *)page[1].lru.next;
476 }
477
478 static inline int compound_order(struct page *page)
479 {
480 if (!PageHead(page))
481 return 0;
482 return (unsigned long)page[1].lru.prev;
483 }
484
485 static inline int compound_trans_order(struct page *page)
486 {
487 int order;
488 unsigned long flags;
489
490 if (!PageHead(page))
491 return 0;
492
493 flags = compound_lock_irqsave(page);
494 order = compound_order(page);
495 compound_unlock_irqrestore(page, flags);
496 return order;
497 }
498
499 static inline void set_compound_order(struct page *page, unsigned long order)
500 {
501 page[1].lru.prev = (void *)order;
502 }
503
504 #ifdef CONFIG_MMU
505 /*
506 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
507 * servicing faults for write access. In the normal case, do always want
508 * pte_mkwrite. But get_user_pages can cause write faults for mappings
509 * that do not have writing enabled, when used by access_process_vm.
510 */
511 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
512 {
513 if (likely(vma->vm_flags & VM_WRITE))
514 pte = pte_mkwrite(pte);
515 return pte;
516 }
517 #endif
518
519 #ifndef my_zero_pfn
520 static inline unsigned long my_zero_pfn(unsigned long addr)
521 {
522 extern unsigned long zero_pfn;
523 return zero_pfn;
524 }
525 #endif
526
527 /*
528 * Multiple processes may "see" the same page. E.g. for untouched
529 * mappings of /dev/null, all processes see the same page full of
530 * zeroes, and text pages of executables and shared libraries have
531 * only one copy in memory, at most, normally.
532 *
533 * For the non-reserved pages, page_count(page) denotes a reference count.
534 * page_count() == 0 means the page is free. page->lru is then used for
535 * freelist management in the buddy allocator.
536 * page_count() > 0 means the page has been allocated.
537 *
538 * Pages are allocated by the slab allocator in order to provide memory
539 * to kmalloc and kmem_cache_alloc. In this case, the management of the
540 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
541 * unless a particular usage is carefully commented. (the responsibility of
542 * freeing the kmalloc memory is the caller's, of course).
543 *
544 * A page may be used by anyone else who does a __get_free_page().
545 * In this case, page_count still tracks the references, and should only
546 * be used through the normal accessor functions. The top bits of page->flags
547 * and page->virtual store page management information, but all other fields
548 * are unused and could be used privately, carefully. The management of this
549 * page is the responsibility of the one who allocated it, and those who have
550 * subsequently been given references to it.
551 *
552 * The other pages (we may call them "pagecache pages") are completely
553 * managed by the Linux memory manager: I/O, buffers, swapping etc.
554 * The following discussion applies only to them.
555 *
556 * A pagecache page contains an opaque `private' member, which belongs to the
557 * page's address_space. Usually, this is the address of a circular list of
558 * the page's disk buffers. PG_private must be set to tell the VM to call
559 * into the filesystem to release these pages.
560 *
561 * A page may belong to an inode's memory mapping. In this case, page->mapping
562 * is the pointer to the inode, and page->index is the file offset of the page,
563 * in units of PAGE_CACHE_SIZE.
564 *
565 * If pagecache pages are not associated with an inode, they are said to be
566 * anonymous pages. These may become associated with the swapcache, and in that
567 * case PG_swapcache is set, and page->private is an offset into the swapcache.
568 *
569 * In either case (swapcache or inode backed), the pagecache itself holds one
570 * reference to the page. Setting PG_private should also increment the
571 * refcount. The each user mapping also has a reference to the page.
572 *
573 * The pagecache pages are stored in a per-mapping radix tree, which is
574 * rooted at mapping->page_tree, and indexed by offset.
575 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
576 * lists, we instead now tag pages as dirty/writeback in the radix tree.
577 *
578 * All pagecache pages may be subject to I/O:
579 * - inode pages may need to be read from disk,
580 * - inode pages which have been modified and are MAP_SHARED may need
581 * to be written back to the inode on disk,
582 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
583 * modified may need to be swapped out to swap space and (later) to be read
584 * back into memory.
585 */
586
587 /*
588 * The zone field is never updated after free_area_init_core()
589 * sets it, so none of the operations on it need to be atomic.
590 */
591
592
593 /*
594 * page->flags layout:
595 *
596 * There are three possibilities for how page->flags get
597 * laid out. The first is for the normal case, without
598 * sparsemem. The second is for sparsemem when there is
599 * plenty of space for node and section. The last is when
600 * we have run out of space and have to fall back to an
601 * alternate (slower) way of determining the node.
602 *
603 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
604 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
605 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
606 */
607 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
608 #define SECTIONS_WIDTH SECTIONS_SHIFT
609 #else
610 #define SECTIONS_WIDTH 0
611 #endif
612
613 #define ZONES_WIDTH ZONES_SHIFT
614
615 #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
616 #define NODES_WIDTH NODES_SHIFT
617 #else
618 #ifdef CONFIG_SPARSEMEM_VMEMMAP
619 #error "Vmemmap: No space for nodes field in page flags"
620 #endif
621 #define NODES_WIDTH 0
622 #endif
623
624 /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
625 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
626 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
627 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
628
629 /*
630 * We are going to use the flags for the page to node mapping if its in
631 * there. This includes the case where there is no node, so it is implicit.
632 */
633 #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
634 #define NODE_NOT_IN_PAGE_FLAGS
635 #endif
636
637 /*
638 * Define the bit shifts to access each section. For non-existent
639 * sections we define the shift as 0; that plus a 0 mask ensures
640 * the compiler will optimise away reference to them.
641 */
642 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
643 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
644 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
645
646 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
647 #ifdef NODE_NOT_IN_PAGE_FLAGS
648 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
649 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
650 SECTIONS_PGOFF : ZONES_PGOFF)
651 #else
652 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
653 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
654 NODES_PGOFF : ZONES_PGOFF)
655 #endif
656
657 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
658
659 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
660 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
661 #endif
662
663 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
664 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
665 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
666 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
667
668 static inline enum zone_type page_zonenum(const struct page *page)
669 {
670 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
671 }
672
673 /*
674 * The identification function is only used by the buddy allocator for
675 * determining if two pages could be buddies. We are not really
676 * identifying a zone since we could be using a the section number
677 * id if we have not node id available in page flags.
678 * We guarantee only that it will return the same value for two
679 * combinable pages in a zone.
680 */
681 static inline int page_zone_id(struct page *page)
682 {
683 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
684 }
685
686 static inline int zone_to_nid(struct zone *zone)
687 {
688 #ifdef CONFIG_NUMA
689 return zone->node;
690 #else
691 return 0;
692 #endif
693 }
694
695 #ifdef NODE_NOT_IN_PAGE_FLAGS
696 extern int page_to_nid(const struct page *page);
697 #else
698 static inline int page_to_nid(const struct page *page)
699 {
700 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
701 }
702 #endif
703
704 static inline struct zone *page_zone(const struct page *page)
705 {
706 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
707 }
708
709 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
710 static inline void set_page_section(struct page *page, unsigned long section)
711 {
712 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
713 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
714 }
715
716 static inline unsigned long page_to_section(const struct page *page)
717 {
718 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
719 }
720 #endif
721
722 static inline void set_page_zone(struct page *page, enum zone_type zone)
723 {
724 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
725 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
726 }
727
728 static inline void set_page_node(struct page *page, unsigned long node)
729 {
730 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
731 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
732 }
733
734 static inline void set_page_links(struct page *page, enum zone_type zone,
735 unsigned long node, unsigned long pfn)
736 {
737 set_page_zone(page, zone);
738 set_page_node(page, node);
739 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
740 set_page_section(page, pfn_to_section_nr(pfn));
741 #endif
742 }
743
744 /*
745 * Some inline functions in vmstat.h depend on page_zone()
746 */
747 #include <linux/vmstat.h>
748
749 static __always_inline void *lowmem_page_address(const struct page *page)
750 {
751 return __va(PFN_PHYS(page_to_pfn(page)));
752 }
753
754 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
755 #define HASHED_PAGE_VIRTUAL
756 #endif
757
758 #if defined(WANT_PAGE_VIRTUAL)
759 #define page_address(page) ((page)->virtual)
760 #define set_page_address(page, address) \
761 do { \
762 (page)->virtual = (address); \
763 } while(0)
764 #define page_address_init() do { } while(0)
765 #endif
766
767 #if defined(HASHED_PAGE_VIRTUAL)
768 void *page_address(const struct page *page);
769 void set_page_address(struct page *page, void *virtual);
770 void page_address_init(void);
771 #endif
772
773 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
774 #define page_address(page) lowmem_page_address(page)
775 #define set_page_address(page, address) do { } while(0)
776 #define page_address_init() do { } while(0)
777 #endif
778
779 /*
780 * On an anonymous page mapped into a user virtual memory area,
781 * page->mapping points to its anon_vma, not to a struct address_space;
782 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
783 *
784 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
785 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
786 * and then page->mapping points, not to an anon_vma, but to a private
787 * structure which KSM associates with that merged page. See ksm.h.
788 *
789 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
790 *
791 * Please note that, confusingly, "page_mapping" refers to the inode
792 * address_space which maps the page from disk; whereas "page_mapped"
793 * refers to user virtual address space into which the page is mapped.
794 */
795 #define PAGE_MAPPING_ANON 1
796 #define PAGE_MAPPING_KSM 2
797 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
798
799 extern struct address_space swapper_space;
800 static inline struct address_space *page_mapping(struct page *page)
801 {
802 struct address_space *mapping = page->mapping;
803
804 VM_BUG_ON(PageSlab(page));
805 if (unlikely(PageSwapCache(page)))
806 mapping = &swapper_space;
807 else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
808 mapping = NULL;
809 return mapping;
810 }
811
812 /* Neutral page->mapping pointer to address_space or anon_vma or other */
813 static inline void *page_rmapping(struct page *page)
814 {
815 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
816 }
817
818 extern struct address_space *__page_file_mapping(struct page *);
819
820 static inline
821 struct address_space *page_file_mapping(struct page *page)
822 {
823 if (unlikely(PageSwapCache(page)))
824 return __page_file_mapping(page);
825
826 return page->mapping;
827 }
828
829 static inline int PageAnon(struct page *page)
830 {
831 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
832 }
833
834 /*
835 * Return the pagecache index of the passed page. Regular pagecache pages
836 * use ->index whereas swapcache pages use ->private
837 */
838 static inline pgoff_t page_index(struct page *page)
839 {
840 if (unlikely(PageSwapCache(page)))
841 return page_private(page);
842 return page->index;
843 }
844
845 extern pgoff_t __page_file_index(struct page *page);
846
847 /*
848 * Return the file index of the page. Regular pagecache pages use ->index
849 * whereas swapcache pages use swp_offset(->private)
850 */
851 static inline pgoff_t page_file_index(struct page *page)
852 {
853 if (unlikely(PageSwapCache(page)))
854 return __page_file_index(page);
855
856 return page->index;
857 }
858
859 /*
860 * Return true if this page is mapped into pagetables.
861 */
862 static inline int page_mapped(struct page *page)
863 {
864 return atomic_read(&(page)->_mapcount) >= 0;
865 }
866
867 /*
868 * Different kinds of faults, as returned by handle_mm_fault().
869 * Used to decide whether a process gets delivered SIGBUS or
870 * just gets major/minor fault counters bumped up.
871 */
872
873 #define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
874
875 #define VM_FAULT_OOM 0x0001
876 #define VM_FAULT_SIGBUS 0x0002
877 #define VM_FAULT_MAJOR 0x0004
878 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
879 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
880 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
881
882 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
883 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
884 #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
885
886 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
887
888 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
889 VM_FAULT_HWPOISON_LARGE)
890
891 /* Encode hstate index for a hwpoisoned large page */
892 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
893 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
894
895 /*
896 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
897 */
898 extern void pagefault_out_of_memory(void);
899
900 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
901
902 /*
903 * Flags passed to show_mem() and show_free_areas() to suppress output in
904 * various contexts.
905 */
906 #define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
907
908 extern void show_free_areas(unsigned int flags);
909 extern bool skip_free_areas_node(unsigned int flags, int nid);
910
911 int shmem_zero_setup(struct vm_area_struct *);
912
913 extern int can_do_mlock(void);
914 extern int user_shm_lock(size_t, struct user_struct *);
915 extern void user_shm_unlock(size_t, struct user_struct *);
916
917 /*
918 * Parameter block passed down to zap_pte_range in exceptional cases.
919 */
920 struct zap_details {
921 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
922 struct address_space *check_mapping; /* Check page->mapping if set */
923 pgoff_t first_index; /* Lowest page->index to unmap */
924 pgoff_t last_index; /* Highest page->index to unmap */
925 };
926
927 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
928 pte_t pte);
929
930 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
931 unsigned long size);
932 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
933 unsigned long size, struct zap_details *);
934 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
935 unsigned long start, unsigned long end);
936
937 /**
938 * mm_walk - callbacks for walk_page_range
939 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
940 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
941 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
942 * this handler is required to be able to handle
943 * pmd_trans_huge() pmds. They may simply choose to
944 * split_huge_page() instead of handling it explicitly.
945 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
946 * @pte_hole: if set, called for each hole at all levels
947 * @hugetlb_entry: if set, called for each hugetlb entry
948 * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
949 * is used.
950 *
951 * (see walk_page_range for more details)
952 */
953 struct mm_walk {
954 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
955 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
956 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
957 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
958 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
959 int (*hugetlb_entry)(pte_t *, unsigned long,
960 unsigned long, unsigned long, struct mm_walk *);
961 struct mm_struct *mm;
962 void *private;
963 };
964
965 int walk_page_range(unsigned long addr, unsigned long end,
966 struct mm_walk *walk);
967 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
968 unsigned long end, unsigned long floor, unsigned long ceiling);
969 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
970 struct vm_area_struct *vma);
971 void unmap_mapping_range(struct address_space *mapping,
972 loff_t const holebegin, loff_t const holelen, int even_cows);
973 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
974 unsigned long *pfn);
975 int follow_phys(struct vm_area_struct *vma, unsigned long address,
976 unsigned int flags, unsigned long *prot, resource_size_t *phys);
977 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
978 void *buf, int len, int write);
979
980 static inline void unmap_shared_mapping_range(struct address_space *mapping,
981 loff_t const holebegin, loff_t const holelen)
982 {
983 unmap_mapping_range(mapping, holebegin, holelen, 0);
984 }
985
986 extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
987 extern void truncate_setsize(struct inode *inode, loff_t newsize);
988 extern int vmtruncate(struct inode *inode, loff_t offset);
989 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
990 int truncate_inode_page(struct address_space *mapping, struct page *page);
991 int generic_error_remove_page(struct address_space *mapping, struct page *page);
992 int invalidate_inode_page(struct page *page);
993
994 #ifdef CONFIG_MMU
995 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
996 unsigned long address, unsigned int flags);
997 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
998 unsigned long address, unsigned int fault_flags);
999 #else
1000 static inline int handle_mm_fault(struct mm_struct *mm,
1001 struct vm_area_struct *vma, unsigned long address,
1002 unsigned int flags)
1003 {
1004 /* should never happen if there's no MMU */
1005 BUG();
1006 return VM_FAULT_SIGBUS;
1007 }
1008 static inline int fixup_user_fault(struct task_struct *tsk,
1009 struct mm_struct *mm, unsigned long address,
1010 unsigned int fault_flags)
1011 {
1012 /* should never happen if there's no MMU */
1013 BUG();
1014 return -EFAULT;
1015 }
1016 #endif
1017
1018 extern int make_pages_present(unsigned long addr, unsigned long end);
1019 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1020 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1021 void *buf, int len, int write);
1022
1023 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1024 unsigned long start, int len, unsigned int foll_flags,
1025 struct page **pages, struct vm_area_struct **vmas,
1026 int *nonblocking);
1027 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1028 unsigned long start, int nr_pages, int write, int force,
1029 struct page **pages, struct vm_area_struct **vmas);
1030 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1031 struct page **pages);
1032 struct kvec;
1033 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1034 struct page **pages);
1035 int get_kernel_page(unsigned long start, int write, struct page **pages);
1036 struct page *get_dump_page(unsigned long addr);
1037
1038 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1039 extern void do_invalidatepage(struct page *page, unsigned long offset);
1040
1041 int __set_page_dirty_nobuffers(struct page *page);
1042 int __set_page_dirty_no_writeback(struct page *page);
1043 int redirty_page_for_writepage(struct writeback_control *wbc,
1044 struct page *page);
1045 void account_page_dirtied(struct page *page, struct address_space *mapping);
1046 void account_page_writeback(struct page *page);
1047 int set_page_dirty(struct page *page);
1048 int set_page_dirty_lock(struct page *page);
1049 int clear_page_dirty_for_io(struct page *page);
1050
1051 /* Is the vma a continuation of the stack vma above it? */
1052 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1053 {
1054 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1055 }
1056
1057 static inline int stack_guard_page_start(struct vm_area_struct *vma,
1058 unsigned long addr)
1059 {
1060 return (vma->vm_flags & VM_GROWSDOWN) &&
1061 (vma->vm_start == addr) &&
1062 !vma_growsdown(vma->vm_prev, addr);
1063 }
1064
1065 /* Is the vma a continuation of the stack vma below it? */
1066 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1067 {
1068 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1069 }
1070
1071 static inline int stack_guard_page_end(struct vm_area_struct *vma,
1072 unsigned long addr)
1073 {
1074 return (vma->vm_flags & VM_GROWSUP) &&
1075 (vma->vm_end == addr) &&
1076 !vma_growsup(vma->vm_next, addr);
1077 }
1078
1079 extern pid_t
1080 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1081
1082 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1083 unsigned long old_addr, struct vm_area_struct *new_vma,
1084 unsigned long new_addr, unsigned long len,
1085 bool need_rmap_locks);
1086 extern unsigned long do_mremap(unsigned long addr,
1087 unsigned long old_len, unsigned long new_len,
1088 unsigned long flags, unsigned long new_addr);
1089 extern int mprotect_fixup(struct vm_area_struct *vma,
1090 struct vm_area_struct **pprev, unsigned long start,
1091 unsigned long end, unsigned long newflags);
1092
1093 /*
1094 * doesn't attempt to fault and will return short.
1095 */
1096 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1097 struct page **pages);
1098 /*
1099 * per-process(per-mm_struct) statistics.
1100 */
1101 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1102 {
1103 long val = atomic_long_read(&mm->rss_stat.count[member]);
1104
1105 #ifdef SPLIT_RSS_COUNTING
1106 /*
1107 * counter is updated in asynchronous manner and may go to minus.
1108 * But it's never be expected number for users.
1109 */
1110 if (val < 0)
1111 val = 0;
1112 #endif
1113 return (unsigned long)val;
1114 }
1115
1116 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1117 {
1118 atomic_long_add(value, &mm->rss_stat.count[member]);
1119 }
1120
1121 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1122 {
1123 atomic_long_inc(&mm->rss_stat.count[member]);
1124 }
1125
1126 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1127 {
1128 atomic_long_dec(&mm->rss_stat.count[member]);
1129 }
1130
1131 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1132 {
1133 return get_mm_counter(mm, MM_FILEPAGES) +
1134 get_mm_counter(mm, MM_ANONPAGES);
1135 }
1136
1137 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1138 {
1139 return max(mm->hiwater_rss, get_mm_rss(mm));
1140 }
1141
1142 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1143 {
1144 return max(mm->hiwater_vm, mm->total_vm);
1145 }
1146
1147 static inline void update_hiwater_rss(struct mm_struct *mm)
1148 {
1149 unsigned long _rss = get_mm_rss(mm);
1150
1151 if ((mm)->hiwater_rss < _rss)
1152 (mm)->hiwater_rss = _rss;
1153 }
1154
1155 static inline void update_hiwater_vm(struct mm_struct *mm)
1156 {
1157 if (mm->hiwater_vm < mm->total_vm)
1158 mm->hiwater_vm = mm->total_vm;
1159 }
1160
1161 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1162 struct mm_struct *mm)
1163 {
1164 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1165
1166 if (*maxrss < hiwater_rss)
1167 *maxrss = hiwater_rss;
1168 }
1169
1170 #if defined(SPLIT_RSS_COUNTING)
1171 void sync_mm_rss(struct mm_struct *mm);
1172 #else
1173 static inline void sync_mm_rss(struct mm_struct *mm)
1174 {
1175 }
1176 #endif
1177
1178 int vma_wants_writenotify(struct vm_area_struct *vma);
1179
1180 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1181 spinlock_t **ptl);
1182 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1183 spinlock_t **ptl)
1184 {
1185 pte_t *ptep;
1186 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1187 return ptep;
1188 }
1189
1190 #ifdef __PAGETABLE_PUD_FOLDED
1191 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1192 unsigned long address)
1193 {
1194 return 0;
1195 }
1196 #else
1197 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1198 #endif
1199
1200 #ifdef __PAGETABLE_PMD_FOLDED
1201 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1202 unsigned long address)
1203 {
1204 return 0;
1205 }
1206 #else
1207 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1208 #endif
1209
1210 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1211 pmd_t *pmd, unsigned long address);
1212 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1213
1214 /*
1215 * The following ifdef needed to get the 4level-fixup.h header to work.
1216 * Remove it when 4level-fixup.h has been removed.
1217 */
1218 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1219 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1220 {
1221 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1222 NULL: pud_offset(pgd, address);
1223 }
1224
1225 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1226 {
1227 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1228 NULL: pmd_offset(pud, address);
1229 }
1230 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1231
1232 #if USE_SPLIT_PTLOCKS
1233 /*
1234 * We tuck a spinlock to guard each pagetable page into its struct page,
1235 * at page->private, with BUILD_BUG_ON to make sure that this will not
1236 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
1237 * When freeing, reset page->mapping so free_pages_check won't complain.
1238 */
1239 #define __pte_lockptr(page) &((page)->ptl)
1240 #define pte_lock_init(_page) do { \
1241 spin_lock_init(__pte_lockptr(_page)); \
1242 } while (0)
1243 #define pte_lock_deinit(page) ((page)->mapping = NULL)
1244 #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1245 #else /* !USE_SPLIT_PTLOCKS */
1246 /*
1247 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1248 */
1249 #define pte_lock_init(page) do {} while (0)
1250 #define pte_lock_deinit(page) do {} while (0)
1251 #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
1252 #endif /* USE_SPLIT_PTLOCKS */
1253
1254 static inline void pgtable_page_ctor(struct page *page)
1255 {
1256 pte_lock_init(page);
1257 inc_zone_page_state(page, NR_PAGETABLE);
1258 }
1259
1260 static inline void pgtable_page_dtor(struct page *page)
1261 {
1262 pte_lock_deinit(page);
1263 dec_zone_page_state(page, NR_PAGETABLE);
1264 }
1265
1266 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
1267 ({ \
1268 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1269 pte_t *__pte = pte_offset_map(pmd, address); \
1270 *(ptlp) = __ptl; \
1271 spin_lock(__ptl); \
1272 __pte; \
1273 })
1274
1275 #define pte_unmap_unlock(pte, ptl) do { \
1276 spin_unlock(ptl); \
1277 pte_unmap(pte); \
1278 } while (0)
1279
1280 #define pte_alloc_map(mm, vma, pmd, address) \
1281 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1282 pmd, address))? \
1283 NULL: pte_offset_map(pmd, address))
1284
1285 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1286 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1287 pmd, address))? \
1288 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1289
1290 #define pte_alloc_kernel(pmd, address) \
1291 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1292 NULL: pte_offset_kernel(pmd, address))
1293
1294 extern void free_area_init(unsigned long * zones_size);
1295 extern void free_area_init_node(int nid, unsigned long * zones_size,
1296 unsigned long zone_start_pfn, unsigned long *zholes_size);
1297 extern void free_initmem(void);
1298
1299 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1300 /*
1301 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1302 * zones, allocate the backing mem_map and account for memory holes in a more
1303 * architecture independent manner. This is a substitute for creating the
1304 * zone_sizes[] and zholes_size[] arrays and passing them to
1305 * free_area_init_node()
1306 *
1307 * An architecture is expected to register range of page frames backed by
1308 * physical memory with memblock_add[_node]() before calling
1309 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1310 * usage, an architecture is expected to do something like
1311 *
1312 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1313 * max_highmem_pfn};
1314 * for_each_valid_physical_page_range()
1315 * memblock_add_node(base, size, nid)
1316 * free_area_init_nodes(max_zone_pfns);
1317 *
1318 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1319 * registered physical page range. Similarly
1320 * sparse_memory_present_with_active_regions() calls memory_present() for
1321 * each range when SPARSEMEM is enabled.
1322 *
1323 * See mm/page_alloc.c for more information on each function exposed by
1324 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1325 */
1326 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1327 unsigned long node_map_pfn_alignment(void);
1328 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1329 unsigned long end_pfn);
1330 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1331 unsigned long end_pfn);
1332 extern void get_pfn_range_for_nid(unsigned int nid,
1333 unsigned long *start_pfn, unsigned long *end_pfn);
1334 extern unsigned long find_min_pfn_with_active_regions(void);
1335 extern void free_bootmem_with_active_regions(int nid,
1336 unsigned long max_low_pfn);
1337 extern void sparse_memory_present_with_active_regions(int nid);
1338
1339 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1340
1341 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1342 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1343 static inline int __early_pfn_to_nid(unsigned long pfn)
1344 {
1345 return 0;
1346 }
1347 #else
1348 /* please see mm/page_alloc.c */
1349 extern int __meminit early_pfn_to_nid(unsigned long pfn);
1350 #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1351 /* there is a per-arch backend function. */
1352 extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1353 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1354 #endif
1355
1356 extern void set_dma_reserve(unsigned long new_dma_reserve);
1357 extern void memmap_init_zone(unsigned long, int, unsigned long,
1358 unsigned long, enum memmap_context);
1359 extern void setup_per_zone_wmarks(void);
1360 extern int __meminit init_per_zone_wmark_min(void);
1361 extern void mem_init(void);
1362 extern void __init mmap_init(void);
1363 extern void show_mem(unsigned int flags);
1364 extern void si_meminfo(struct sysinfo * val);
1365 extern void si_meminfo_node(struct sysinfo *val, int nid);
1366 extern int after_bootmem;
1367
1368 extern __printf(3, 4)
1369 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1370
1371 extern void setup_per_cpu_pageset(void);
1372
1373 extern void zone_pcp_update(struct zone *zone);
1374 extern void zone_pcp_reset(struct zone *zone);
1375
1376 /* nommu.c */
1377 extern atomic_long_t mmap_pages_allocated;
1378 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1379
1380 /* interval_tree.c */
1381 void vma_interval_tree_insert(struct vm_area_struct *node,
1382 struct rb_root *root);
1383 void vma_interval_tree_insert_after(struct vm_area_struct *node,
1384 struct vm_area_struct *prev,
1385 struct rb_root *root);
1386 void vma_interval_tree_remove(struct vm_area_struct *node,
1387 struct rb_root *root);
1388 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1389 unsigned long start, unsigned long last);
1390 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1391 unsigned long start, unsigned long last);
1392
1393 #define vma_interval_tree_foreach(vma, root, start, last) \
1394 for (vma = vma_interval_tree_iter_first(root, start, last); \
1395 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1396
1397 static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1398 struct list_head *list)
1399 {
1400 list_add_tail(&vma->shared.nonlinear, list);
1401 }
1402
1403 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1404 struct rb_root *root);
1405 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1406 struct rb_root *root);
1407 struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1408 struct rb_root *root, unsigned long start, unsigned long last);
1409 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1410 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1411 #ifdef CONFIG_DEBUG_VM_RB
1412 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1413 #endif
1414
1415 #define anon_vma_interval_tree_foreach(avc, root, start, last) \
1416 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1417 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1418
1419 /* mmap.c */
1420 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1421 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1422 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1423 extern struct vm_area_struct *vma_merge(struct mm_struct *,
1424 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1425 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1426 struct mempolicy *);
1427 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1428 extern int split_vma(struct mm_struct *,
1429 struct vm_area_struct *, unsigned long addr, int new_below);
1430 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1431 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1432 struct rb_node **, struct rb_node *);
1433 extern void unlink_file_vma(struct vm_area_struct *);
1434 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1435 unsigned long addr, unsigned long len, pgoff_t pgoff,
1436 bool *need_rmap_locks);
1437 extern void exit_mmap(struct mm_struct *);
1438
1439 extern int mm_take_all_locks(struct mm_struct *mm);
1440 extern void mm_drop_all_locks(struct mm_struct *mm);
1441
1442 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1443 extern struct file *get_mm_exe_file(struct mm_struct *mm);
1444
1445 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1446 extern int install_special_mapping(struct mm_struct *mm,
1447 unsigned long addr, unsigned long len,
1448 unsigned long flags, struct page **pages);
1449
1450 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1451
1452 extern unsigned long mmap_region(struct file *file, unsigned long addr,
1453 unsigned long len, unsigned long flags,
1454 vm_flags_t vm_flags, unsigned long pgoff);
1455 extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
1456 unsigned long, unsigned long,
1457 unsigned long, unsigned long);
1458 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1459
1460 /* These take the mm semaphore themselves */
1461 extern unsigned long vm_brk(unsigned long, unsigned long);
1462 extern int vm_munmap(unsigned long, size_t);
1463 extern unsigned long vm_mmap(struct file *, unsigned long,
1464 unsigned long, unsigned long,
1465 unsigned long, unsigned long);
1466
1467 struct vm_unmapped_area_info {
1468 #define VM_UNMAPPED_AREA_TOPDOWN 1
1469 unsigned long flags;
1470 unsigned long length;
1471 unsigned long low_limit;
1472 unsigned long high_limit;
1473 unsigned long align_mask;
1474 unsigned long align_offset;
1475 };
1476
1477 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1478 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1479
1480 /*
1481 * Search for an unmapped address range.
1482 *
1483 * We are looking for a range that:
1484 * - does not intersect with any VMA;
1485 * - is contained within the [low_limit, high_limit) interval;
1486 * - is at least the desired size.
1487 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1488 */
1489 static inline unsigned long
1490 vm_unmapped_area(struct vm_unmapped_area_info *info)
1491 {
1492 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1493 return unmapped_area(info);
1494 else
1495 return unmapped_area_topdown(info);
1496 }
1497
1498 /* truncate.c */
1499 extern void truncate_inode_pages(struct address_space *, loff_t);
1500 extern void truncate_inode_pages_range(struct address_space *,
1501 loff_t lstart, loff_t lend);
1502
1503 /* generic vm_area_ops exported for stackable file systems */
1504 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1505 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1506
1507 /* mm/page-writeback.c */
1508 int write_one_page(struct page *page, int wait);
1509 void task_dirty_inc(struct task_struct *tsk);
1510
1511 /* readahead.c */
1512 #define VM_MAX_READAHEAD 128 /* kbytes */
1513 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1514
1515 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1516 pgoff_t offset, unsigned long nr_to_read);
1517
1518 void page_cache_sync_readahead(struct address_space *mapping,
1519 struct file_ra_state *ra,
1520 struct file *filp,
1521 pgoff_t offset,
1522 unsigned long size);
1523
1524 void page_cache_async_readahead(struct address_space *mapping,
1525 struct file_ra_state *ra,
1526 struct file *filp,
1527 struct page *pg,
1528 pgoff_t offset,
1529 unsigned long size);
1530
1531 unsigned long max_sane_readahead(unsigned long nr);
1532 unsigned long ra_submit(struct file_ra_state *ra,
1533 struct address_space *mapping,
1534 struct file *filp);
1535
1536 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1537 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1538
1539 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1540 extern int expand_downwards(struct vm_area_struct *vma,
1541 unsigned long address);
1542 #if VM_GROWSUP
1543 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1544 #else
1545 #define expand_upwards(vma, address) do { } while (0)
1546 #endif
1547
1548 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1549 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1550 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1551 struct vm_area_struct **pprev);
1552
1553 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1554 NULL if none. Assume start_addr < end_addr. */
1555 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1556 {
1557 struct vm_area_struct * vma = find_vma(mm,start_addr);
1558
1559 if (vma && end_addr <= vma->vm_start)
1560 vma = NULL;
1561 return vma;
1562 }
1563
1564 static inline unsigned long vma_pages(struct vm_area_struct *vma)
1565 {
1566 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1567 }
1568
1569 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
1570 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1571 unsigned long vm_start, unsigned long vm_end)
1572 {
1573 struct vm_area_struct *vma = find_vma(mm, vm_start);
1574
1575 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1576 vma = NULL;
1577
1578 return vma;
1579 }
1580
1581 #ifdef CONFIG_MMU
1582 pgprot_t vm_get_page_prot(unsigned long vm_flags);
1583 #else
1584 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1585 {
1586 return __pgprot(0);
1587 }
1588 #endif
1589
1590 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1591 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1592 unsigned long pfn, unsigned long size, pgprot_t);
1593 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1594 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1595 unsigned long pfn);
1596 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1597 unsigned long pfn);
1598
1599 struct page *follow_page(struct vm_area_struct *, unsigned long address,
1600 unsigned int foll_flags);
1601 #define FOLL_WRITE 0x01 /* check pte is writable */
1602 #define FOLL_TOUCH 0x02 /* mark page accessed */
1603 #define FOLL_GET 0x04 /* do get_page on page */
1604 #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
1605 #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
1606 #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
1607 * and return without waiting upon it */
1608 #define FOLL_MLOCK 0x40 /* mark page as mlocked */
1609 #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
1610 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
1611
1612 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1613 void *data);
1614 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1615 unsigned long size, pte_fn_t fn, void *data);
1616
1617 #ifdef CONFIG_PROC_FS
1618 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1619 #else
1620 static inline void vm_stat_account(struct mm_struct *mm,
1621 unsigned long flags, struct file *file, long pages)
1622 {
1623 mm->total_vm += pages;
1624 }
1625 #endif /* CONFIG_PROC_FS */
1626
1627 #ifdef CONFIG_DEBUG_PAGEALLOC
1628 extern void kernel_map_pages(struct page *page, int numpages, int enable);
1629 #ifdef CONFIG_HIBERNATION
1630 extern bool kernel_page_present(struct page *page);
1631 #endif /* CONFIG_HIBERNATION */
1632 #else
1633 static inline void
1634 kernel_map_pages(struct page *page, int numpages, int enable) {}
1635 #ifdef CONFIG_HIBERNATION
1636 static inline bool kernel_page_present(struct page *page) { return true; }
1637 #endif /* CONFIG_HIBERNATION */
1638 #endif
1639
1640 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1641 #ifdef __HAVE_ARCH_GATE_AREA
1642 int in_gate_area_no_mm(unsigned long addr);
1643 int in_gate_area(struct mm_struct *mm, unsigned long addr);
1644 #else
1645 int in_gate_area_no_mm(unsigned long addr);
1646 #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1647 #endif /* __HAVE_ARCH_GATE_AREA */
1648
1649 int drop_caches_sysctl_handler(struct ctl_table *, int,
1650 void __user *, size_t *, loff_t *);
1651 unsigned long shrink_slab(struct shrink_control *shrink,
1652 unsigned long nr_pages_scanned,
1653 unsigned long lru_pages);
1654
1655 #ifndef CONFIG_MMU
1656 #define randomize_va_space 0
1657 #else
1658 extern int randomize_va_space;
1659 #endif
1660
1661 const char * arch_vma_name(struct vm_area_struct *vma);
1662 void print_vma_addr(char *prefix, unsigned long rip);
1663
1664 void sparse_mem_maps_populate_node(struct page **map_map,
1665 unsigned long pnum_begin,
1666 unsigned long pnum_end,
1667 unsigned long map_count,
1668 int nodeid);
1669
1670 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1671 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1672 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1673 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1674 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1675 void *vmemmap_alloc_block(unsigned long size, int node);
1676 void *vmemmap_alloc_block_buf(unsigned long size, int node);
1677 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1678 int vmemmap_populate_basepages(struct page *start_page,
1679 unsigned long pages, int node);
1680 int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1681 void vmemmap_populate_print_last(void);
1682
1683
1684 enum mf_flags {
1685 MF_COUNT_INCREASED = 1 << 0,
1686 MF_ACTION_REQUIRED = 1 << 1,
1687 MF_MUST_KILL = 1 << 2,
1688 };
1689 extern int memory_failure(unsigned long pfn, int trapno, int flags);
1690 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1691 extern int unpoison_memory(unsigned long pfn);
1692 extern int sysctl_memory_failure_early_kill;
1693 extern int sysctl_memory_failure_recovery;
1694 extern void shake_page(struct page *p, int access);
1695 extern atomic_long_t mce_bad_pages;
1696 extern int soft_offline_page(struct page *page, int flags);
1697
1698 extern void dump_page(struct page *page);
1699
1700 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1701 extern void clear_huge_page(struct page *page,
1702 unsigned long addr,
1703 unsigned int pages_per_huge_page);
1704 extern void copy_user_huge_page(struct page *dst, struct page *src,
1705 unsigned long addr, struct vm_area_struct *vma,
1706 unsigned int pages_per_huge_page);
1707 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
1708
1709 #ifdef CONFIG_DEBUG_PAGEALLOC
1710 extern unsigned int _debug_guardpage_minorder;
1711
1712 static inline unsigned int debug_guardpage_minorder(void)
1713 {
1714 return _debug_guardpage_minorder;
1715 }
1716
1717 static inline bool page_is_guard(struct page *page)
1718 {
1719 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1720 }
1721 #else
1722 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1723 static inline bool page_is_guard(struct page *page) { return false; }
1724 #endif /* CONFIG_DEBUG_PAGEALLOC */
1725
1726 #endif /* __KERNEL__ */
1727 #endif /* _LINUX_MM_H */
This page took 0.095917 seconds and 5 git commands to generate.