Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
1da177e4 LT |
16 | |
17 | /* | |
18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | |
19 | * allocation mode flags. | |
20 | */ | |
9a896c9a LS |
21 | enum mapping_flags { |
22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | |
23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | |
24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | |
25 | #ifdef CONFIG_UNEVICTABLE_LRU | |
26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | |
27 | #endif | |
28 | }; | |
1da177e4 | 29 | |
3e9f45bd GC |
30 | static inline void mapping_set_error(struct address_space *mapping, int error) |
31 | { | |
2185e69f | 32 | if (unlikely(error)) { |
3e9f45bd GC |
33 | if (error == -ENOSPC) |
34 | set_bit(AS_ENOSPC, &mapping->flags); | |
35 | else | |
36 | set_bit(AS_EIO, &mapping->flags); | |
37 | } | |
38 | } | |
39 | ||
ba9ddf49 | 40 | #ifdef CONFIG_UNEVICTABLE_LRU |
ba9ddf49 LS |
41 | |
42 | static inline void mapping_set_unevictable(struct address_space *mapping) | |
43 | { | |
44 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
45 | } | |
46 | ||
89e004ea LS |
47 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
48 | { | |
49 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
50 | } | |
51 | ||
ba9ddf49 LS |
52 | static inline int mapping_unevictable(struct address_space *mapping) |
53 | { | |
89e004ea LS |
54 | if (likely(mapping)) |
55 | return test_bit(AS_UNEVICTABLE, &mapping->flags); | |
56 | return !!mapping; | |
ba9ddf49 LS |
57 | } |
58 | #else | |
59 | static inline void mapping_set_unevictable(struct address_space *mapping) { } | |
89e004ea | 60 | static inline void mapping_clear_unevictable(struct address_space *mapping) { } |
ba9ddf49 LS |
61 | static inline int mapping_unevictable(struct address_space *mapping) |
62 | { | |
63 | return 0; | |
64 | } | |
65 | #endif | |
66 | ||
dd0fc66f | 67 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 68 | { |
260b2367 | 69 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
1da177e4 LT |
70 | } |
71 | ||
72 | /* | |
73 | * This is non-atomic. Only to be used before the mapping is activated. | |
74 | * Probably needs a barrier... | |
75 | */ | |
260b2367 | 76 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 77 | { |
260b2367 AV |
78 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
79 | (__force unsigned long)mask; | |
1da177e4 LT |
80 | } |
81 | ||
82 | /* | |
83 | * The page cache can done in larger chunks than | |
84 | * one page, because it allows for more efficient | |
85 | * throughput (it can then be mapped into user | |
86 | * space in smaller chunks for same flexibility). | |
87 | * | |
88 | * Or rather, it _will_ be done in larger chunks. | |
89 | */ | |
90 | #define PAGE_CACHE_SHIFT PAGE_SHIFT | |
91 | #define PAGE_CACHE_SIZE PAGE_SIZE | |
92 | #define PAGE_CACHE_MASK PAGE_MASK | |
93 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) | |
94 | ||
95 | #define page_cache_get(page) get_page(page) | |
96 | #define page_cache_release(page) put_page(page) | |
97 | void release_pages(struct page **pages, int nr, int cold); | |
98 | ||
e286781d NP |
99 | /* |
100 | * speculatively take a reference to a page. | |
101 | * If the page is free (_count == 0), then _count is untouched, and 0 | |
102 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. | |
103 | * | |
104 | * This function must be called inside the same rcu_read_lock() section as has | |
105 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
106 | * this allows allocators to use a synchronize_rcu() to stabilize _count. | |
107 | * | |
108 | * Unless an RCU grace period has passed, the count of all pages coming out | |
109 | * of the allocator must be considered unstable. page_count may return higher | |
110 | * than expected, and put_page must be able to do the right thing when the | |
111 | * page has been finished with, no matter what it is subsequently allocated | |
112 | * for (because put_page is what is used here to drop an invalid speculative | |
113 | * reference). | |
114 | * | |
115 | * This is the interesting part of the lockless pagecache (and lockless | |
116 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
117 | * has the following pattern: | |
118 | * 1. find page in radix tree | |
119 | * 2. conditionally increment refcount | |
120 | * 3. check the page is still in pagecache (if no, goto 1) | |
121 | * | |
122 | * Remove-side that cares about stability of _count (eg. reclaim) has the | |
123 | * following (with tree_lock held for write): | |
124 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
125 | * B. remove page from pagecache | |
126 | * C. free the page | |
127 | * | |
128 | * There are 2 critical interleavings that matter: | |
129 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
130 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
131 | * subsequently, B will complete and 1 will find no page, causing the | |
132 | * lookup to return NULL. | |
133 | * | |
134 | * It is possible that between 1 and 2, the page is removed then the exact same | |
135 | * page is inserted into the same position in pagecache. That's OK: the | |
136 | * old find_get_page using tree_lock could equally have run before or after | |
137 | * such a re-insertion, depending on order that locks are granted. | |
138 | * | |
139 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
140 | * will find the page or it will not. Likewise, the old find_get_page could run | |
141 | * either before the insertion or afterwards, depending on timing. | |
142 | */ | |
143 | static inline int page_cache_get_speculative(struct page *page) | |
144 | { | |
145 | VM_BUG_ON(in_interrupt()); | |
146 | ||
147 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | |
148 | # ifdef CONFIG_PREEMPT | |
149 | VM_BUG_ON(!in_atomic()); | |
150 | # endif | |
151 | /* | |
152 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
153 | * this for us. | |
154 | * | |
155 | * Pagecache won't be truncated from interrupt context, so if we have | |
156 | * found a page in the radix tree here, we have pinned its refcount by | |
157 | * disabling preempt, and hence no need for the "speculative get" that | |
158 | * SMP requires. | |
159 | */ | |
160 | VM_BUG_ON(page_count(page) == 0); | |
161 | atomic_inc(&page->_count); | |
162 | ||
163 | #else | |
164 | if (unlikely(!get_page_unless_zero(page))) { | |
165 | /* | |
166 | * Either the page has been freed, or will be freed. | |
167 | * In either case, retry here and the caller should | |
168 | * do the right thing (see comments above). | |
169 | */ | |
170 | return 0; | |
171 | } | |
172 | #endif | |
173 | VM_BUG_ON(PageTail(page)); | |
174 | ||
175 | return 1; | |
176 | } | |
177 | ||
ce0ad7f0 NP |
178 | /* |
179 | * Same as above, but add instead of inc (could just be merged) | |
180 | */ | |
181 | static inline int page_cache_add_speculative(struct page *page, int count) | |
182 | { | |
183 | VM_BUG_ON(in_interrupt()); | |
184 | ||
185 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | |
186 | # ifdef CONFIG_PREEMPT | |
187 | VM_BUG_ON(!in_atomic()); | |
188 | # endif | |
189 | VM_BUG_ON(page_count(page) == 0); | |
190 | atomic_add(count, &page->_count); | |
191 | ||
192 | #else | |
193 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | |
194 | return 0; | |
195 | #endif | |
196 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | |
197 | ||
198 | return 1; | |
199 | } | |
200 | ||
e286781d NP |
201 | static inline int page_freeze_refs(struct page *page, int count) |
202 | { | |
203 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | |
204 | } | |
205 | ||
206 | static inline void page_unfreeze_refs(struct page *page, int count) | |
207 | { | |
208 | VM_BUG_ON(page_count(page) != 0); | |
209 | VM_BUG_ON(count == 0); | |
210 | ||
211 | atomic_set(&page->_count, count); | |
212 | } | |
213 | ||
44110fe3 | 214 | #ifdef CONFIG_NUMA |
2ae88149 | 215 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 216 | #else |
2ae88149 NP |
217 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
218 | { | |
219 | return alloc_pages(gfp, 0); | |
220 | } | |
221 | #endif | |
222 | ||
1da177e4 LT |
223 | static inline struct page *page_cache_alloc(struct address_space *x) |
224 | { | |
2ae88149 | 225 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
226 | } |
227 | ||
228 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
229 | { | |
2ae88149 | 230 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
231 | } |
232 | ||
233 | typedef int filler_t(void *, struct page *); | |
234 | ||
235 | extern struct page * find_get_page(struct address_space *mapping, | |
57f6b96c | 236 | pgoff_t index); |
1da177e4 | 237 | extern struct page * find_lock_page(struct address_space *mapping, |
57f6b96c | 238 | pgoff_t index); |
1da177e4 | 239 | extern struct page * find_or_create_page(struct address_space *mapping, |
57f6b96c | 240 | pgoff_t index, gfp_t gfp_mask); |
1da177e4 LT |
241 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
242 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
243 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
244 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
245 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
246 | int tag, unsigned int nr_pages, struct page **pages); | |
247 | ||
54566b2c NP |
248 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
249 | pgoff_t index, unsigned flags); | |
afddba49 | 250 | |
1da177e4 LT |
251 | /* |
252 | * Returns locked page at given index in given cache, creating it if needed. | |
253 | */ | |
57f6b96c FW |
254 | static inline struct page *grab_cache_page(struct address_space *mapping, |
255 | pgoff_t index) | |
1da177e4 LT |
256 | { |
257 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
258 | } | |
259 | ||
260 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, | |
57f6b96c | 261 | pgoff_t index); |
6fe6900e | 262 | extern struct page * read_cache_page_async(struct address_space *mapping, |
57f6b96c | 263 | pgoff_t index, filler_t *filler, |
6fe6900e | 264 | void *data); |
1da177e4 | 265 | extern struct page * read_cache_page(struct address_space *mapping, |
57f6b96c | 266 | pgoff_t index, filler_t *filler, |
1da177e4 LT |
267 | void *data); |
268 | extern int read_cache_pages(struct address_space *mapping, | |
269 | struct list_head *pages, filler_t *filler, void *data); | |
270 | ||
6fe6900e NP |
271 | static inline struct page *read_mapping_page_async( |
272 | struct address_space *mapping, | |
57f6b96c | 273 | pgoff_t index, void *data) |
6fe6900e NP |
274 | { |
275 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
276 | return read_cache_page_async(mapping, index, filler, data); | |
277 | } | |
278 | ||
090d2b18 | 279 | static inline struct page *read_mapping_page(struct address_space *mapping, |
57f6b96c | 280 | pgoff_t index, void *data) |
090d2b18 PE |
281 | { |
282 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
283 | return read_cache_page(mapping, index, filler, data); | |
284 | } | |
285 | ||
1da177e4 LT |
286 | /* |
287 | * Return byte-offset into filesystem object for page. | |
288 | */ | |
289 | static inline loff_t page_offset(struct page *page) | |
290 | { | |
291 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | |
292 | } | |
293 | ||
294 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |
295 | unsigned long address) | |
296 | { | |
297 | pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
298 | pgoff += vma->vm_pgoff; | |
299 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
300 | } | |
301 | ||
b3c97528 HH |
302 | extern void __lock_page(struct page *page); |
303 | extern int __lock_page_killable(struct page *page); | |
304 | extern void __lock_page_nosync(struct page *page); | |
305 | extern void unlock_page(struct page *page); | |
1da177e4 | 306 | |
f45840b5 | 307 | static inline void __set_page_locked(struct page *page) |
529ae9aa | 308 | { |
f45840b5 | 309 | __set_bit(PG_locked, &page->flags); |
529ae9aa NP |
310 | } |
311 | ||
f45840b5 | 312 | static inline void __clear_page_locked(struct page *page) |
529ae9aa | 313 | { |
f45840b5 | 314 | __clear_bit(PG_locked, &page->flags); |
529ae9aa NP |
315 | } |
316 | ||
317 | static inline int trylock_page(struct page *page) | |
318 | { | |
8413ac9d | 319 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
320 | } |
321 | ||
db37648c NP |
322 | /* |
323 | * lock_page may only be called if we have the page's inode pinned. | |
324 | */ | |
1da177e4 LT |
325 | static inline void lock_page(struct page *page) |
326 | { | |
327 | might_sleep(); | |
529ae9aa | 328 | if (!trylock_page(page)) |
1da177e4 LT |
329 | __lock_page(page); |
330 | } | |
db37648c | 331 | |
2687a356 MW |
332 | /* |
333 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
334 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
335 | * killed while waiting. | |
336 | */ | |
337 | static inline int lock_page_killable(struct page *page) | |
338 | { | |
339 | might_sleep(); | |
529ae9aa | 340 | if (!trylock_page(page)) |
2687a356 MW |
341 | return __lock_page_killable(page); |
342 | return 0; | |
343 | } | |
344 | ||
db37648c NP |
345 | /* |
346 | * lock_page_nosync should only be used if we can't pin the page's inode. | |
347 | * Doesn't play quite so well with block device plugging. | |
348 | */ | |
349 | static inline void lock_page_nosync(struct page *page) | |
350 | { | |
351 | might_sleep(); | |
529ae9aa | 352 | if (!trylock_page(page)) |
db37648c NP |
353 | __lock_page_nosync(page); |
354 | } | |
1da177e4 LT |
355 | |
356 | /* | |
357 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | |
358 | * Never use this directly! | |
359 | */ | |
b3c97528 | 360 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 LT |
361 | |
362 | /* | |
363 | * Wait for a page to be unlocked. | |
364 | * | |
365 | * This must be called with the caller "holding" the page, | |
366 | * ie with increased "page->count" so that the page won't | |
367 | * go away during the wait.. | |
368 | */ | |
369 | static inline void wait_on_page_locked(struct page *page) | |
370 | { | |
371 | if (PageLocked(page)) | |
372 | wait_on_page_bit(page, PG_locked); | |
373 | } | |
374 | ||
375 | /* | |
376 | * Wait for a page to complete writeback | |
377 | */ | |
378 | static inline void wait_on_page_writeback(struct page *page) | |
379 | { | |
380 | if (PageWriteback(page)) | |
381 | wait_on_page_bit(page, PG_writeback); | |
382 | } | |
383 | ||
384 | extern void end_page_writeback(struct page *page); | |
385 | ||
385e1ca5 DH |
386 | /* |
387 | * Add an arbitrary waiter to a page's wait queue | |
388 | */ | |
389 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |
390 | ||
1da177e4 LT |
391 | /* |
392 | * Fault a userspace page into pagetables. Return non-zero on a fault. | |
393 | * | |
394 | * This assumes that two userspace pages are always sufficient. That's | |
395 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. | |
396 | */ | |
397 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
398 | { | |
399 | int ret; | |
400 | ||
08291429 NP |
401 | if (unlikely(size == 0)) |
402 | return 0; | |
403 | ||
1da177e4 LT |
404 | /* |
405 | * Writing zeroes into userspace here is OK, because we know that if | |
406 | * the zero gets there, we'll be overwriting it. | |
407 | */ | |
408 | ret = __put_user(0, uaddr); | |
409 | if (ret == 0) { | |
410 | char __user *end = uaddr + size - 1; | |
411 | ||
412 | /* | |
413 | * If the page was already mapped, this will get a cache miss | |
414 | * for sure, so try to avoid doing it. | |
415 | */ | |
416 | if (((unsigned long)uaddr & PAGE_MASK) != | |
417 | ((unsigned long)end & PAGE_MASK)) | |
418 | ret = __put_user(0, end); | |
419 | } | |
420 | return ret; | |
421 | } | |
422 | ||
08291429 | 423 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
1da177e4 LT |
424 | { |
425 | volatile char c; | |
426 | int ret; | |
427 | ||
08291429 NP |
428 | if (unlikely(size == 0)) |
429 | return 0; | |
430 | ||
1da177e4 LT |
431 | ret = __get_user(c, uaddr); |
432 | if (ret == 0) { | |
433 | const char __user *end = uaddr + size - 1; | |
434 | ||
435 | if (((unsigned long)uaddr & PAGE_MASK) != | |
436 | ((unsigned long)end & PAGE_MASK)) | |
08291429 | 437 | ret = __get_user(c, end); |
1da177e4 | 438 | } |
08291429 | 439 | return ret; |
1da177e4 LT |
440 | } |
441 | ||
529ae9aa NP |
442 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
443 | pgoff_t index, gfp_t gfp_mask); | |
444 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
445 | pgoff_t index, gfp_t gfp_mask); | |
446 | extern void remove_from_page_cache(struct page *page); | |
447 | extern void __remove_from_page_cache(struct page *page); | |
448 | ||
449 | /* | |
450 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
f45840b5 | 451 | * the page is new, so we can just run __set_page_locked() against it. |
529ae9aa NP |
452 | */ |
453 | static inline int add_to_page_cache(struct page *page, | |
454 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
455 | { | |
456 | int error; | |
457 | ||
f45840b5 | 458 | __set_page_locked(page); |
529ae9aa NP |
459 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
460 | if (unlikely(error)) | |
f45840b5 | 461 | __clear_page_locked(page); |
529ae9aa NP |
462 | return error; |
463 | } | |
464 | ||
1da177e4 | 465 | #endif /* _LINUX_PAGEMAP_H */ |