Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_RMAP_H |
2 | #define _LINUX_RMAP_H | |
3 | /* | |
4 | * Declarations for Reverse Mapping functions in mm/rmap.c | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/list.h> |
8 | #include <linux/slab.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/spinlock.h> | |
bed7161a | 11 | #include <linux/memcontrol.h> |
1da177e4 LT |
12 | |
13 | /* | |
14 | * The anon_vma heads a list of private "related" vmas, to scan if | |
15 | * an anonymous page pointing to this anon_vma needs to be unmapped: | |
16 | * the vmas on the list will be related by forking, or by splitting. | |
17 | * | |
18 | * Since vmas come and go as they are split and merged (particularly | |
19 | * in mprotect), the mapping field of an anonymous page cannot point | |
20 | * directly to a vma: instead it points to an anon_vma, on whose list | |
21 | * the related vmas can be easily linked or unlinked. | |
22 | * | |
23 | * After unlinking the last vma on the list, we must garbage collect | |
24 | * the anon_vma object itself: we're guaranteed no page can be | |
25 | * pointing to this anon_vma once its vma list is empty. | |
26 | */ | |
27 | struct anon_vma { | |
28 | spinlock_t lock; /* Serialize access to vma list */ | |
db114b83 HD |
29 | #ifdef CONFIG_KSM |
30 | atomic_t ksm_refcount; | |
31 | #endif | |
7906d00c AA |
32 | /* |
33 | * NOTE: the LSB of the head.next is set by | |
34 | * mm_take_all_locks() _after_ taking the above lock. So the | |
35 | * head must only be read/written after taking the above lock | |
36 | * to be sure to see a valid next pointer. The LSB bit itself | |
37 | * is serialized by a system wide lock only visible to | |
38 | * mm_take_all_locks() (mm_all_locks_mutex). | |
39 | */ | |
1da177e4 LT |
40 | struct list_head head; /* List of private "related" vmas */ |
41 | }; | |
42 | ||
43 | #ifdef CONFIG_MMU | |
db114b83 HD |
44 | #ifdef CONFIG_KSM |
45 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | |
46 | { | |
47 | atomic_set(&anon_vma->ksm_refcount, 0); | |
48 | } | |
49 | ||
50 | static inline int ksm_refcount(struct anon_vma *anon_vma) | |
51 | { | |
52 | return atomic_read(&anon_vma->ksm_refcount); | |
53 | } | |
54 | #else | |
55 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | |
56 | { | |
57 | } | |
58 | ||
59 | static inline int ksm_refcount(struct anon_vma *anon_vma) | |
60 | { | |
61 | return 0; | |
62 | } | |
63 | #endif /* CONFIG_KSM */ | |
1da177e4 | 64 | |
3ca7b3c5 HD |
65 | static inline struct anon_vma *page_anon_vma(struct page *page) |
66 | { | |
67 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != | |
68 | PAGE_MAPPING_ANON) | |
69 | return NULL; | |
70 | return page_rmapping(page); | |
71 | } | |
72 | ||
1da177e4 LT |
73 | static inline void anon_vma_lock(struct vm_area_struct *vma) |
74 | { | |
75 | struct anon_vma *anon_vma = vma->anon_vma; | |
76 | if (anon_vma) | |
77 | spin_lock(&anon_vma->lock); | |
78 | } | |
79 | ||
80 | static inline void anon_vma_unlock(struct vm_area_struct *vma) | |
81 | { | |
82 | struct anon_vma *anon_vma = vma->anon_vma; | |
83 | if (anon_vma) | |
84 | spin_unlock(&anon_vma->lock); | |
85 | } | |
86 | ||
87 | /* | |
88 | * anon_vma helper functions. | |
89 | */ | |
90 | void anon_vma_init(void); /* create anon_vma_cachep */ | |
91 | int anon_vma_prepare(struct vm_area_struct *); | |
92 | void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); | |
93 | void anon_vma_unlink(struct vm_area_struct *); | |
94 | void anon_vma_link(struct vm_area_struct *); | |
95 | void __anon_vma_link(struct vm_area_struct *); | |
db114b83 | 96 | void anon_vma_free(struct anon_vma *); |
1da177e4 LT |
97 | |
98 | /* | |
99 | * rmap interfaces called when adding or removing pte of page | |
100 | */ | |
101 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | |
9617d95e | 102 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
1da177e4 | 103 | void page_add_file_rmap(struct page *); |
edc315fd | 104 | void page_remove_rmap(struct page *); |
1da177e4 | 105 | |
21333b2b | 106 | static inline void page_dup_rmap(struct page *page) |
1da177e4 LT |
107 | { |
108 | atomic_inc(&page->_mapcount); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Called from mm/vmscan.c to handle paging out | |
113 | */ | |
6fe6b7e3 WF |
114 | int page_referenced(struct page *, int is_locked, |
115 | struct mem_cgroup *cnt, unsigned long *vm_flags); | |
5ad64688 HD |
116 | int page_referenced_one(struct page *, struct vm_area_struct *, |
117 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | |
118 | ||
14fa31b8 AK |
119 | enum ttu_flags { |
120 | TTU_UNMAP = 0, /* unmap mode */ | |
121 | TTU_MIGRATION = 1, /* migration mode */ | |
122 | TTU_MUNLOCK = 2, /* munlock mode */ | |
123 | TTU_ACTION_MASK = 0xff, | |
124 | ||
125 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | |
126 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | |
888b9f7c | 127 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ |
14fa31b8 AK |
128 | }; |
129 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | |
130 | ||
131 | int try_to_unmap(struct page *, enum ttu_flags flags); | |
5ad64688 HD |
132 | int try_to_unmap_one(struct page *, struct vm_area_struct *, |
133 | unsigned long address, enum ttu_flags flags); | |
1da177e4 | 134 | |
ceffc078 CO |
135 | /* |
136 | * Called from mm/filemap_xip.c to unmap empty zero page | |
137 | */ | |
c0718806 | 138 | pte_t *page_check_address(struct page *, struct mm_struct *, |
479db0bf | 139 | unsigned long, spinlock_t **, int); |
ceffc078 | 140 | |
1da177e4 LT |
141 | /* |
142 | * Used by swapoff to help locate where page is expected in vma. | |
143 | */ | |
144 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |
145 | ||
d08b3851 PZ |
146 | /* |
147 | * Cleans the PTEs of shared mappings. | |
148 | * (and since clean PTEs should also be readonly, write protects them too) | |
149 | * | |
150 | * returns the number of cleaned PTEs. | |
151 | */ | |
152 | int page_mkclean(struct page *); | |
153 | ||
b291f000 NP |
154 | /* |
155 | * called in munlock()/munmap() path to check for other vmas holding | |
156 | * the page mlocked. | |
157 | */ | |
158 | int try_to_munlock(struct page *); | |
b291f000 | 159 | |
10be22df AK |
160 | /* |
161 | * Called by memory-failure.c to kill processes. | |
162 | */ | |
163 | struct anon_vma *page_lock_anon_vma(struct page *page); | |
164 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | |
6a46079c | 165 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
10be22df | 166 | |
e9995ef9 HD |
167 | /* |
168 | * Called by migrate.c to remove migration ptes, but might be used more later. | |
169 | */ | |
170 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | |
171 | struct vm_area_struct *, unsigned long, void *), void *arg); | |
172 | ||
1da177e4 LT |
173 | #else /* !CONFIG_MMU */ |
174 | ||
175 | #define anon_vma_init() do {} while (0) | |
176 | #define anon_vma_prepare(vma) (0) | |
177 | #define anon_vma_link(vma) do {} while (0) | |
178 | ||
01ff53f4 MF |
179 | static inline int page_referenced(struct page *page, int is_locked, |
180 | struct mem_cgroup *cnt, | |
181 | unsigned long *vm_flags) | |
182 | { | |
183 | *vm_flags = 0; | |
184 | return TestClearPageReferenced(page); | |
185 | } | |
186 | ||
a48d07af | 187 | #define try_to_unmap(page, refs) SWAP_FAIL |
1da177e4 | 188 | |
d08b3851 PZ |
189 | static inline int page_mkclean(struct page *page) |
190 | { | |
191 | return 0; | |
192 | } | |
193 | ||
194 | ||
1da177e4 LT |
195 | #endif /* CONFIG_MMU */ |
196 | ||
197 | /* | |
198 | * Return values of try_to_unmap | |
199 | */ | |
200 | #define SWAP_SUCCESS 0 | |
201 | #define SWAP_AGAIN 1 | |
202 | #define SWAP_FAIL 2 | |
b291f000 | 203 | #define SWAP_MLOCK 3 |
1da177e4 LT |
204 | |
205 | #endif /* _LINUX_RMAP_H */ |