Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 16 | #include <linux/hugetlb_inline.h> |
1da177e4 LT |
17 | |
18 | /* | |
19 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | |
20 | * allocation mode flags. | |
21 | */ | |
9a896c9a LS |
22 | enum mapping_flags { |
23 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | |
24 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | |
25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | |
9a896c9a | 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
9d1ba805 | 27 | AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ |
9a896c9a | 28 | }; |
1da177e4 | 29 | |
3e9f45bd GC |
30 | static inline void mapping_set_error(struct address_space *mapping, int error) |
31 | { | |
2185e69f | 32 | if (unlikely(error)) { |
3e9f45bd GC |
33 | if (error == -ENOSPC) |
34 | set_bit(AS_ENOSPC, &mapping->flags); | |
35 | else | |
36 | set_bit(AS_EIO, &mapping->flags); | |
37 | } | |
38 | } | |
39 | ||
ba9ddf49 LS |
40 | static inline void mapping_set_unevictable(struct address_space *mapping) |
41 | { | |
42 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
43 | } | |
44 | ||
89e004ea LS |
45 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
46 | { | |
47 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
48 | } | |
49 | ||
ba9ddf49 LS |
50 | static inline int mapping_unevictable(struct address_space *mapping) |
51 | { | |
088e5465 | 52 | if (mapping) |
89e004ea LS |
53 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
54 | return !!mapping; | |
ba9ddf49 | 55 | } |
ba9ddf49 | 56 | |
91b0abe3 JW |
57 | static inline void mapping_set_exiting(struct address_space *mapping) |
58 | { | |
59 | set_bit(AS_EXITING, &mapping->flags); | |
60 | } | |
61 | ||
62 | static inline int mapping_exiting(struct address_space *mapping) | |
63 | { | |
64 | return test_bit(AS_EXITING, &mapping->flags); | |
65 | } | |
66 | ||
dd0fc66f | 67 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 68 | { |
260b2367 | 69 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
1da177e4 LT |
70 | } |
71 | ||
c62d2555 MH |
72 | /* Restricts the given gfp_mask to what the mapping allows. */ |
73 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
74 | gfp_t gfp_mask) | |
75 | { | |
76 | return mapping_gfp_mask(mapping) & gfp_mask; | |
77 | } | |
78 | ||
1da177e4 LT |
79 | /* |
80 | * This is non-atomic. Only to be used before the mapping is activated. | |
81 | * Probably needs a barrier... | |
82 | */ | |
260b2367 | 83 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 84 | { |
260b2367 AV |
85 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
86 | (__force unsigned long)mask; | |
1da177e4 LT |
87 | } |
88 | ||
89 | /* | |
50d8a189 | 90 | * The page cache can be done in larger chunks than |
1da177e4 LT |
91 | * one page, because it allows for more efficient |
92 | * throughput (it can then be mapped into user | |
93 | * space in smaller chunks for same flexibility). | |
94 | * | |
95 | * Or rather, it _will_ be done in larger chunks. | |
96 | */ | |
97 | #define PAGE_CACHE_SHIFT PAGE_SHIFT | |
98 | #define PAGE_CACHE_SIZE PAGE_SIZE | |
99 | #define PAGE_CACHE_MASK PAGE_MASK | |
100 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) | |
101 | ||
102 | #define page_cache_get(page) get_page(page) | |
103 | #define page_cache_release(page) put_page(page) | |
b745bc85 | 104 | void release_pages(struct page **pages, int nr, bool cold); |
1da177e4 | 105 | |
e286781d NP |
106 | /* |
107 | * speculatively take a reference to a page. | |
108 | * If the page is free (_count == 0), then _count is untouched, and 0 | |
109 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. | |
110 | * | |
111 | * This function must be called inside the same rcu_read_lock() section as has | |
112 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
113 | * this allows allocators to use a synchronize_rcu() to stabilize _count. | |
114 | * | |
115 | * Unless an RCU grace period has passed, the count of all pages coming out | |
116 | * of the allocator must be considered unstable. page_count may return higher | |
117 | * than expected, and put_page must be able to do the right thing when the | |
118 | * page has been finished with, no matter what it is subsequently allocated | |
119 | * for (because put_page is what is used here to drop an invalid speculative | |
120 | * reference). | |
121 | * | |
122 | * This is the interesting part of the lockless pagecache (and lockless | |
123 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
124 | * has the following pattern: | |
125 | * 1. find page in radix tree | |
126 | * 2. conditionally increment refcount | |
127 | * 3. check the page is still in pagecache (if no, goto 1) | |
128 | * | |
129 | * Remove-side that cares about stability of _count (eg. reclaim) has the | |
130 | * following (with tree_lock held for write): | |
131 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
132 | * B. remove page from pagecache | |
133 | * C. free the page | |
134 | * | |
135 | * There are 2 critical interleavings that matter: | |
136 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
137 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
138 | * subsequently, B will complete and 1 will find no page, causing the | |
139 | * lookup to return NULL. | |
140 | * | |
141 | * It is possible that between 1 and 2, the page is removed then the exact same | |
142 | * page is inserted into the same position in pagecache. That's OK: the | |
143 | * old find_get_page using tree_lock could equally have run before or after | |
144 | * such a re-insertion, depending on order that locks are granted. | |
145 | * | |
146 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
147 | * will find the page or it will not. Likewise, the old find_get_page could run | |
148 | * either before the insertion or afterwards, depending on timing. | |
149 | */ | |
150 | static inline int page_cache_get_speculative(struct page *page) | |
151 | { | |
152 | VM_BUG_ON(in_interrupt()); | |
153 | ||
8375ad98 | 154 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 155 | # ifdef CONFIG_PREEMPT_COUNT |
e286781d NP |
156 | VM_BUG_ON(!in_atomic()); |
157 | # endif | |
158 | /* | |
159 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
160 | * this for us. | |
161 | * | |
162 | * Pagecache won't be truncated from interrupt context, so if we have | |
163 | * found a page in the radix tree here, we have pinned its refcount by | |
164 | * disabling preempt, and hence no need for the "speculative get" that | |
165 | * SMP requires. | |
166 | */ | |
309381fe | 167 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
e286781d NP |
168 | atomic_inc(&page->_count); |
169 | ||
170 | #else | |
171 | if (unlikely(!get_page_unless_zero(page))) { | |
172 | /* | |
173 | * Either the page has been freed, or will be freed. | |
174 | * In either case, retry here and the caller should | |
175 | * do the right thing (see comments above). | |
176 | */ | |
177 | return 0; | |
178 | } | |
179 | #endif | |
309381fe | 180 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
181 | |
182 | return 1; | |
183 | } | |
184 | ||
ce0ad7f0 NP |
185 | /* |
186 | * Same as above, but add instead of inc (could just be merged) | |
187 | */ | |
188 | static inline int page_cache_add_speculative(struct page *page, int count) | |
189 | { | |
190 | VM_BUG_ON(in_interrupt()); | |
191 | ||
b560d8ad | 192 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
bdd4e85d | 193 | # ifdef CONFIG_PREEMPT_COUNT |
ce0ad7f0 NP |
194 | VM_BUG_ON(!in_atomic()); |
195 | # endif | |
309381fe | 196 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
ce0ad7f0 NP |
197 | atomic_add(count, &page->_count); |
198 | ||
199 | #else | |
200 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | |
201 | return 0; | |
202 | #endif | |
309381fe | 203 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
ce0ad7f0 NP |
204 | |
205 | return 1; | |
206 | } | |
207 | ||
e286781d NP |
208 | static inline int page_freeze_refs(struct page *page, int count) |
209 | { | |
210 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | |
211 | } | |
212 | ||
213 | static inline void page_unfreeze_refs(struct page *page, int count) | |
214 | { | |
309381fe | 215 | VM_BUG_ON_PAGE(page_count(page) != 0, page); |
e286781d NP |
216 | VM_BUG_ON(count == 0); |
217 | ||
218 | atomic_set(&page->_count, count); | |
219 | } | |
220 | ||
44110fe3 | 221 | #ifdef CONFIG_NUMA |
2ae88149 | 222 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 223 | #else |
2ae88149 NP |
224 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
225 | { | |
226 | return alloc_pages(gfp, 0); | |
227 | } | |
228 | #endif | |
229 | ||
1da177e4 LT |
230 | static inline struct page *page_cache_alloc(struct address_space *x) |
231 | { | |
2ae88149 | 232 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
233 | } |
234 | ||
235 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
236 | { | |
2ae88149 | 237 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
238 | } |
239 | ||
7b1de586 WF |
240 | static inline struct page *page_cache_alloc_readahead(struct address_space *x) |
241 | { | |
242 | return __page_cache_alloc(mapping_gfp_mask(x) | | |
243 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); | |
244 | } | |
245 | ||
1da177e4 LT |
246 | typedef int filler_t(void *, struct page *); |
247 | ||
e7b563bb JW |
248 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
249 | pgoff_t index, unsigned long max_scan); | |
250 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | |
251 | pgoff_t index, unsigned long max_scan); | |
252 | ||
2457aec6 MG |
253 | #define FGP_ACCESSED 0x00000001 |
254 | #define FGP_LOCK 0x00000002 | |
255 | #define FGP_CREAT 0x00000004 | |
256 | #define FGP_WRITE 0x00000008 | |
257 | #define FGP_NOFS 0x00000010 | |
258 | #define FGP_NOWAIT 0x00000020 | |
259 | ||
260 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 261 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
262 | |
263 | /** | |
264 | * find_get_page - find and get a page reference | |
265 | * @mapping: the address_space to search | |
266 | * @offset: the page index | |
267 | * | |
268 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
269 | * page cache page, it is returned with an increased refcount. | |
270 | * | |
271 | * Otherwise, %NULL is returned. | |
272 | */ | |
273 | static inline struct page *find_get_page(struct address_space *mapping, | |
274 | pgoff_t offset) | |
275 | { | |
45f87de5 | 276 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
277 | } |
278 | ||
279 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
280 | pgoff_t offset, int fgp_flags) | |
281 | { | |
45f87de5 | 282 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
283 | } |
284 | ||
285 | /** | |
286 | * find_lock_page - locate, pin and lock a pagecache page | |
287 | * pagecache_get_page - find and get a page reference | |
288 | * @mapping: the address_space to search | |
289 | * @offset: the page index | |
290 | * | |
291 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
292 | * page cache page, it is returned locked and with an increased | |
293 | * refcount. | |
294 | * | |
295 | * Otherwise, %NULL is returned. | |
296 | * | |
297 | * find_lock_page() may sleep. | |
298 | */ | |
299 | static inline struct page *find_lock_page(struct address_space *mapping, | |
300 | pgoff_t offset) | |
301 | { | |
45f87de5 | 302 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
2457aec6 MG |
303 | } |
304 | ||
305 | /** | |
306 | * find_or_create_page - locate or add a pagecache page | |
307 | * @mapping: the page's address_space | |
308 | * @index: the page's index into the mapping | |
309 | * @gfp_mask: page allocation mode | |
310 | * | |
311 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
312 | * page cache page, it is returned locked and with an increased | |
313 | * refcount. | |
314 | * | |
315 | * If the page is not present, a new page is allocated using @gfp_mask | |
316 | * and added to the page cache and the VM's LRU list. The page is | |
317 | * returned locked and with an increased refcount. | |
318 | * | |
319 | * On memory exhaustion, %NULL is returned. | |
320 | * | |
321 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
322 | * atomic allocation! | |
323 | */ | |
324 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
325 | pgoff_t offset, gfp_t gfp_mask) | |
326 | { | |
327 | return pagecache_get_page(mapping, offset, | |
328 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | |
45f87de5 | 329 | gfp_mask); |
2457aec6 MG |
330 | } |
331 | ||
332 | /** | |
333 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
334 | * @mapping: target address_space | |
335 | * @index: the page index | |
336 | * | |
337 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
338 | * This is intended for speculative data generators, where the data can | |
339 | * be regenerated if the page couldn't be grabbed. This routine should | |
340 | * be safe to call while holding the lock for another page. | |
341 | * | |
342 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
343 | * and deadlock against the caller's locked page. | |
344 | */ | |
345 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
346 | pgoff_t index) | |
347 | { | |
348 | return pagecache_get_page(mapping, index, | |
349 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 350 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
351 | } |
352 | ||
0cd6144a | 353 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 354 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
355 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
356 | unsigned int nr_entries, struct page **entries, | |
357 | pgoff_t *indices); | |
1da177e4 LT |
358 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
359 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
360 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
361 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
362 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
363 | int tag, unsigned int nr_pages, struct page **pages); | |
7e7f7749 RZ |
364 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
365 | int tag, unsigned int nr_entries, | |
366 | struct page **entries, pgoff_t *indices); | |
1da177e4 | 367 | |
54566b2c NP |
368 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
369 | pgoff_t index, unsigned flags); | |
afddba49 | 370 | |
1da177e4 LT |
371 | /* |
372 | * Returns locked page at given index in given cache, creating it if needed. | |
373 | */ | |
57f6b96c FW |
374 | static inline struct page *grab_cache_page(struct address_space *mapping, |
375 | pgoff_t index) | |
1da177e4 LT |
376 | { |
377 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
378 | } | |
379 | ||
1da177e4 | 380 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 381 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
382 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
383 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
384 | extern int read_cache_pages(struct address_space *mapping, |
385 | struct list_head *pages, filler_t *filler, void *data); | |
386 | ||
090d2b18 | 387 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 388 | pgoff_t index, void *data) |
090d2b18 PE |
389 | { |
390 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
391 | return read_cache_page(mapping, index, filler, data); | |
392 | } | |
393 | ||
a0f7a756 NH |
394 | /* |
395 | * Get the offset in PAGE_SIZE. | |
396 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
397 | */ | |
398 | static inline pgoff_t page_to_pgoff(struct page *page) | |
399 | { | |
e9b61f19 KS |
400 | pgoff_t pgoff; |
401 | ||
a0f7a756 NH |
402 | if (unlikely(PageHeadHuge(page))) |
403 | return page->index << compound_order(page); | |
e9b61f19 KS |
404 | |
405 | if (likely(!PageTransTail(page))) | |
a0f7a756 | 406 | return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
e9b61f19 KS |
407 | |
408 | /* | |
409 | * We don't initialize ->index for tail pages: calculate based on | |
410 | * head page | |
411 | */ | |
412 | pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
413 | pgoff += page - compound_head(page); | |
414 | return pgoff; | |
a0f7a756 NH |
415 | } |
416 | ||
1da177e4 LT |
417 | /* |
418 | * Return byte-offset into filesystem object for page. | |
419 | */ | |
420 | static inline loff_t page_offset(struct page *page) | |
421 | { | |
422 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | |
423 | } | |
424 | ||
f981c595 MG |
425 | static inline loff_t page_file_offset(struct page *page) |
426 | { | |
427 | return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; | |
428 | } | |
429 | ||
0fe6e20b NH |
430 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
431 | unsigned long address); | |
432 | ||
1da177e4 LT |
433 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
434 | unsigned long address) | |
435 | { | |
0fe6e20b NH |
436 | pgoff_t pgoff; |
437 | if (unlikely(is_vm_hugetlb_page(vma))) | |
438 | return linear_hugepage_index(vma, address); | |
439 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 LT |
440 | pgoff += vma->vm_pgoff; |
441 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
442 | } | |
443 | ||
b3c97528 HH |
444 | extern void __lock_page(struct page *page); |
445 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
446 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
447 | unsigned int flags); | |
b3c97528 | 448 | extern void unlock_page(struct page *page); |
1da177e4 | 449 | |
529ae9aa NP |
450 | static inline int trylock_page(struct page *page) |
451 | { | |
48c935ad | 452 | page = compound_head(page); |
8413ac9d | 453 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
454 | } |
455 | ||
db37648c NP |
456 | /* |
457 | * lock_page may only be called if we have the page's inode pinned. | |
458 | */ | |
1da177e4 LT |
459 | static inline void lock_page(struct page *page) |
460 | { | |
461 | might_sleep(); | |
529ae9aa | 462 | if (!trylock_page(page)) |
1da177e4 LT |
463 | __lock_page(page); |
464 | } | |
db37648c | 465 | |
2687a356 MW |
466 | /* |
467 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
468 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
469 | * killed while waiting. | |
470 | */ | |
471 | static inline int lock_page_killable(struct page *page) | |
472 | { | |
473 | might_sleep(); | |
529ae9aa | 474 | if (!trylock_page(page)) |
2687a356 MW |
475 | return __lock_page_killable(page); |
476 | return 0; | |
477 | } | |
478 | ||
d065bd81 ML |
479 | /* |
480 | * lock_page_or_retry - Lock the page, unless this would block and the | |
481 | * caller indicated that it can handle a retry. | |
9a95f3cf PC |
482 | * |
483 | * Return value and mmap_sem implications depend on flags; see | |
484 | * __lock_page_or_retry(). | |
d065bd81 ML |
485 | */ |
486 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
487 | unsigned int flags) | |
488 | { | |
489 | might_sleep(); | |
490 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
491 | } | |
492 | ||
1da177e4 | 493 | /* |
a4796e37 N |
494 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, |
495 | * and for filesystems which need to wait on PG_private. | |
1da177e4 | 496 | */ |
b3c97528 | 497 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 | 498 | |
f62e00cc | 499 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
cbbce822 N |
500 | extern int wait_on_page_bit_killable_timeout(struct page *page, |
501 | int bit_nr, unsigned long timeout); | |
f62e00cc KM |
502 | |
503 | static inline int wait_on_page_locked_killable(struct page *page) | |
504 | { | |
48c935ad KS |
505 | if (!PageLocked(page)) |
506 | return 0; | |
507 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
f62e00cc KM |
508 | } |
509 | ||
a4796e37 N |
510 | extern wait_queue_head_t *page_waitqueue(struct page *page); |
511 | static inline void wake_up_page(struct page *page, int bit) | |
512 | { | |
513 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); | |
514 | } | |
515 | ||
1da177e4 LT |
516 | /* |
517 | * Wait for a page to be unlocked. | |
518 | * | |
519 | * This must be called with the caller "holding" the page, | |
520 | * ie with increased "page->count" so that the page won't | |
521 | * go away during the wait.. | |
522 | */ | |
523 | static inline void wait_on_page_locked(struct page *page) | |
524 | { | |
525 | if (PageLocked(page)) | |
48c935ad | 526 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
527 | } |
528 | ||
529 | /* | |
530 | * Wait for a page to complete writeback | |
531 | */ | |
532 | static inline void wait_on_page_writeback(struct page *page) | |
533 | { | |
534 | if (PageWriteback(page)) | |
535 | wait_on_page_bit(page, PG_writeback); | |
536 | } | |
537 | ||
538 | extern void end_page_writeback(struct page *page); | |
1d1d1a76 | 539 | void wait_for_stable_page(struct page *page); |
1da177e4 | 540 | |
57d99845 MW |
541 | void page_endio(struct page *page, int rw, int err); |
542 | ||
385e1ca5 DH |
543 | /* |
544 | * Add an arbitrary waiter to a page's wait queue | |
545 | */ | |
546 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |
547 | ||
1da177e4 LT |
548 | /* |
549 | * Fault a userspace page into pagetables. Return non-zero on a fault. | |
550 | * | |
551 | * This assumes that two userspace pages are always sufficient. That's | |
552 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. | |
553 | */ | |
554 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
555 | { | |
556 | int ret; | |
557 | ||
08291429 NP |
558 | if (unlikely(size == 0)) |
559 | return 0; | |
560 | ||
1da177e4 LT |
561 | /* |
562 | * Writing zeroes into userspace here is OK, because we know that if | |
563 | * the zero gets there, we'll be overwriting it. | |
564 | */ | |
565 | ret = __put_user(0, uaddr); | |
566 | if (ret == 0) { | |
567 | char __user *end = uaddr + size - 1; | |
568 | ||
569 | /* | |
570 | * If the page was already mapped, this will get a cache miss | |
571 | * for sure, so try to avoid doing it. | |
572 | */ | |
573 | if (((unsigned long)uaddr & PAGE_MASK) != | |
574 | ((unsigned long)end & PAGE_MASK)) | |
f56f821f | 575 | ret = __put_user(0, end); |
1da177e4 LT |
576 | } |
577 | return ret; | |
578 | } | |
579 | ||
08291429 | 580 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
1da177e4 LT |
581 | { |
582 | volatile char c; | |
583 | int ret; | |
584 | ||
08291429 NP |
585 | if (unlikely(size == 0)) |
586 | return 0; | |
587 | ||
1da177e4 LT |
588 | ret = __get_user(c, uaddr); |
589 | if (ret == 0) { | |
590 | const char __user *end = uaddr + size - 1; | |
591 | ||
592 | if (((unsigned long)uaddr & PAGE_MASK) != | |
627295e4 | 593 | ((unsigned long)end & PAGE_MASK)) { |
f56f821f | 594 | ret = __get_user(c, end); |
627295e4 AK |
595 | (void)c; |
596 | } | |
1da177e4 | 597 | } |
08291429 | 598 | return ret; |
1da177e4 LT |
599 | } |
600 | ||
f56f821f DV |
601 | /* |
602 | * Multipage variants of the above prefault helpers, useful if more than | |
603 | * PAGE_SIZE of data needs to be prefaulted. These are separate from the above | |
604 | * functions (which only handle up to PAGE_SIZE) to avoid clobbering the | |
605 | * filemap.c hotpaths. | |
606 | */ | |
607 | static inline int fault_in_multipages_writeable(char __user *uaddr, int size) | |
608 | { | |
af2e8409 | 609 | int ret = 0; |
9923777d | 610 | char __user *end = uaddr + size - 1; |
f56f821f DV |
611 | |
612 | if (unlikely(size == 0)) | |
af2e8409 | 613 | return ret; |
f56f821f DV |
614 | |
615 | /* | |
616 | * Writing zeroes into userspace here is OK, because we know that if | |
617 | * the zero gets there, we'll be overwriting it. | |
618 | */ | |
619 | while (uaddr <= end) { | |
620 | ret = __put_user(0, uaddr); | |
621 | if (ret != 0) | |
622 | return ret; | |
623 | uaddr += PAGE_SIZE; | |
624 | } | |
625 | ||
626 | /* Check whether the range spilled into the next page. */ | |
627 | if (((unsigned long)uaddr & PAGE_MASK) == | |
628 | ((unsigned long)end & PAGE_MASK)) | |
629 | ret = __put_user(0, end); | |
630 | ||
631 | return ret; | |
632 | } | |
633 | ||
634 | static inline int fault_in_multipages_readable(const char __user *uaddr, | |
635 | int size) | |
636 | { | |
637 | volatile char c; | |
af2e8409 | 638 | int ret = 0; |
f56f821f DV |
639 | const char __user *end = uaddr + size - 1; |
640 | ||
641 | if (unlikely(size == 0)) | |
af2e8409 | 642 | return ret; |
f56f821f DV |
643 | |
644 | while (uaddr <= end) { | |
645 | ret = __get_user(c, uaddr); | |
646 | if (ret != 0) | |
647 | return ret; | |
648 | uaddr += PAGE_SIZE; | |
649 | } | |
650 | ||
651 | /* Check whether the range spilled into the next page. */ | |
652 | if (((unsigned long)uaddr & PAGE_MASK) == | |
653 | ((unsigned long)end & PAGE_MASK)) { | |
654 | ret = __get_user(c, end); | |
655 | (void)c; | |
656 | } | |
657 | ||
658 | return ret; | |
659 | } | |
660 | ||
529ae9aa NP |
661 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
662 | pgoff_t index, gfp_t gfp_mask); | |
663 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
664 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 665 | extern void delete_from_page_cache(struct page *page); |
c4843a75 GT |
666 | extern void __delete_from_page_cache(struct page *page, void *shadow, |
667 | struct mem_cgroup *memcg); | |
ef6a3c63 | 668 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
529ae9aa NP |
669 | |
670 | /* | |
671 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 672 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
673 | */ |
674 | static inline int add_to_page_cache(struct page *page, | |
675 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
676 | { | |
677 | int error; | |
678 | ||
48c935ad | 679 | __SetPageLocked(page); |
529ae9aa NP |
680 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
681 | if (unlikely(error)) | |
48c935ad | 682 | __ClearPageLocked(page); |
529ae9aa NP |
683 | return error; |
684 | } | |
685 | ||
b57c2cb9 FF |
686 | static inline unsigned long dir_pages(struct inode *inode) |
687 | { | |
688 | return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> | |
689 | PAGE_CACHE_SHIFT; | |
690 | } | |
691 | ||
1da177e4 | 692 | #endif /* _LINUX_PAGEMAP_H */ |