mfd: add platform_data to mfd_cell
[deliverable/linux.git] / include / linux / pagemap.h
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16
17 /*
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags.
20 */
21 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
22 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
23
24 static inline void mapping_set_error(struct address_space *mapping, int error)
25 {
26 if (unlikely(error)) {
27 if (error == -ENOSPC)
28 set_bit(AS_ENOSPC, &mapping->flags);
29 else
30 set_bit(AS_EIO, &mapping->flags);
31 }
32 }
33
34 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
35 {
36 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
37 }
38
39 /*
40 * This is non-atomic. Only to be used before the mapping is activated.
41 * Probably needs a barrier...
42 */
43 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
44 {
45 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
46 (__force unsigned long)mask;
47 }
48
49 /*
50 * The page cache can done in larger chunks than
51 * one page, because it allows for more efficient
52 * throughput (it can then be mapped into user
53 * space in smaller chunks for same flexibility).
54 *
55 * Or rather, it _will_ be done in larger chunks.
56 */
57 #define PAGE_CACHE_SHIFT PAGE_SHIFT
58 #define PAGE_CACHE_SIZE PAGE_SIZE
59 #define PAGE_CACHE_MASK PAGE_MASK
60 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
61
62 #define page_cache_get(page) get_page(page)
63 #define page_cache_release(page) put_page(page)
64 void release_pages(struct page **pages, int nr, int cold);
65
66 /*
67 * speculatively take a reference to a page.
68 * If the page is free (_count == 0), then _count is untouched, and 0
69 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
70 *
71 * This function must be called inside the same rcu_read_lock() section as has
72 * been used to lookup the page in the pagecache radix-tree (or page table):
73 * this allows allocators to use a synchronize_rcu() to stabilize _count.
74 *
75 * Unless an RCU grace period has passed, the count of all pages coming out
76 * of the allocator must be considered unstable. page_count may return higher
77 * than expected, and put_page must be able to do the right thing when the
78 * page has been finished with, no matter what it is subsequently allocated
79 * for (because put_page is what is used here to drop an invalid speculative
80 * reference).
81 *
82 * This is the interesting part of the lockless pagecache (and lockless
83 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
84 * has the following pattern:
85 * 1. find page in radix tree
86 * 2. conditionally increment refcount
87 * 3. check the page is still in pagecache (if no, goto 1)
88 *
89 * Remove-side that cares about stability of _count (eg. reclaim) has the
90 * following (with tree_lock held for write):
91 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
92 * B. remove page from pagecache
93 * C. free the page
94 *
95 * There are 2 critical interleavings that matter:
96 * - 2 runs before A: in this case, A sees elevated refcount and bails out
97 * - A runs before 2: in this case, 2 sees zero refcount and retries;
98 * subsequently, B will complete and 1 will find no page, causing the
99 * lookup to return NULL.
100 *
101 * It is possible that between 1 and 2, the page is removed then the exact same
102 * page is inserted into the same position in pagecache. That's OK: the
103 * old find_get_page using tree_lock could equally have run before or after
104 * such a re-insertion, depending on order that locks are granted.
105 *
106 * Lookups racing against pagecache insertion isn't a big problem: either 1
107 * will find the page or it will not. Likewise, the old find_get_page could run
108 * either before the insertion or afterwards, depending on timing.
109 */
110 static inline int page_cache_get_speculative(struct page *page)
111 {
112 VM_BUG_ON(in_interrupt());
113
114 #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
115 # ifdef CONFIG_PREEMPT
116 VM_BUG_ON(!in_atomic());
117 # endif
118 /*
119 * Preempt must be disabled here - we rely on rcu_read_lock doing
120 * this for us.
121 *
122 * Pagecache won't be truncated from interrupt context, so if we have
123 * found a page in the radix tree here, we have pinned its refcount by
124 * disabling preempt, and hence no need for the "speculative get" that
125 * SMP requires.
126 */
127 VM_BUG_ON(page_count(page) == 0);
128 atomic_inc(&page->_count);
129
130 #else
131 if (unlikely(!get_page_unless_zero(page))) {
132 /*
133 * Either the page has been freed, or will be freed.
134 * In either case, retry here and the caller should
135 * do the right thing (see comments above).
136 */
137 return 0;
138 }
139 #endif
140 VM_BUG_ON(PageTail(page));
141
142 return 1;
143 }
144
145 static inline int page_freeze_refs(struct page *page, int count)
146 {
147 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
148 }
149
150 static inline void page_unfreeze_refs(struct page *page, int count)
151 {
152 VM_BUG_ON(page_count(page) != 0);
153 VM_BUG_ON(count == 0);
154
155 atomic_set(&page->_count, count);
156 }
157
158 #ifdef CONFIG_NUMA
159 extern struct page *__page_cache_alloc(gfp_t gfp);
160 #else
161 static inline struct page *__page_cache_alloc(gfp_t gfp)
162 {
163 return alloc_pages(gfp, 0);
164 }
165 #endif
166
167 static inline struct page *page_cache_alloc(struct address_space *x)
168 {
169 return __page_cache_alloc(mapping_gfp_mask(x));
170 }
171
172 static inline struct page *page_cache_alloc_cold(struct address_space *x)
173 {
174 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
175 }
176
177 typedef int filler_t(void *, struct page *);
178
179 extern struct page * find_get_page(struct address_space *mapping,
180 pgoff_t index);
181 extern struct page * find_lock_page(struct address_space *mapping,
182 pgoff_t index);
183 extern struct page * find_or_create_page(struct address_space *mapping,
184 pgoff_t index, gfp_t gfp_mask);
185 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
186 unsigned int nr_pages, struct page **pages);
187 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
188 unsigned int nr_pages, struct page **pages);
189 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
190 int tag, unsigned int nr_pages, struct page **pages);
191
192 struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
193
194 /*
195 * Returns locked page at given index in given cache, creating it if needed.
196 */
197 static inline struct page *grab_cache_page(struct address_space *mapping,
198 pgoff_t index)
199 {
200 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
201 }
202
203 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
204 pgoff_t index);
205 extern struct page * read_cache_page_async(struct address_space *mapping,
206 pgoff_t index, filler_t *filler,
207 void *data);
208 extern struct page * read_cache_page(struct address_space *mapping,
209 pgoff_t index, filler_t *filler,
210 void *data);
211 extern int read_cache_pages(struct address_space *mapping,
212 struct list_head *pages, filler_t *filler, void *data);
213
214 static inline struct page *read_mapping_page_async(
215 struct address_space *mapping,
216 pgoff_t index, void *data)
217 {
218 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
219 return read_cache_page_async(mapping, index, filler, data);
220 }
221
222 static inline struct page *read_mapping_page(struct address_space *mapping,
223 pgoff_t index, void *data)
224 {
225 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
226 return read_cache_page(mapping, index, filler, data);
227 }
228
229 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
230 pgoff_t index, gfp_t gfp_mask);
231 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
232 pgoff_t index, gfp_t gfp_mask);
233 extern void remove_from_page_cache(struct page *page);
234 extern void __remove_from_page_cache(struct page *page);
235
236 /*
237 * Like add_to_page_cache_locked, but used to add newly allocated pages:
238 * the page is new, so we can just run SetPageLocked() against it.
239 */
240 static inline int add_to_page_cache(struct page *page,
241 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
242 {
243 int error;
244
245 SetPageLocked(page);
246 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
247 if (unlikely(error))
248 ClearPageLocked(page);
249 return error;
250 }
251
252 /*
253 * Return byte-offset into filesystem object for page.
254 */
255 static inline loff_t page_offset(struct page *page)
256 {
257 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
258 }
259
260 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
261 unsigned long address)
262 {
263 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
264 pgoff += vma->vm_pgoff;
265 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
266 }
267
268 extern void __lock_page(struct page *page);
269 extern int __lock_page_killable(struct page *page);
270 extern void __lock_page_nosync(struct page *page);
271 extern void unlock_page(struct page *page);
272
273 /*
274 * lock_page may only be called if we have the page's inode pinned.
275 */
276 static inline void lock_page(struct page *page)
277 {
278 might_sleep();
279 if (TestSetPageLocked(page))
280 __lock_page(page);
281 }
282
283 /*
284 * lock_page_killable is like lock_page but can be interrupted by fatal
285 * signals. It returns 0 if it locked the page and -EINTR if it was
286 * killed while waiting.
287 */
288 static inline int lock_page_killable(struct page *page)
289 {
290 might_sleep();
291 if (TestSetPageLocked(page))
292 return __lock_page_killable(page);
293 return 0;
294 }
295
296 /*
297 * lock_page_nosync should only be used if we can't pin the page's inode.
298 * Doesn't play quite so well with block device plugging.
299 */
300 static inline void lock_page_nosync(struct page *page)
301 {
302 might_sleep();
303 if (TestSetPageLocked(page))
304 __lock_page_nosync(page);
305 }
306
307 /*
308 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
309 * Never use this directly!
310 */
311 extern void wait_on_page_bit(struct page *page, int bit_nr);
312
313 /*
314 * Wait for a page to be unlocked.
315 *
316 * This must be called with the caller "holding" the page,
317 * ie with increased "page->count" so that the page won't
318 * go away during the wait..
319 */
320 static inline void wait_on_page_locked(struct page *page)
321 {
322 if (PageLocked(page))
323 wait_on_page_bit(page, PG_locked);
324 }
325
326 /*
327 * Wait for a page to complete writeback
328 */
329 static inline void wait_on_page_writeback(struct page *page)
330 {
331 if (PageWriteback(page))
332 wait_on_page_bit(page, PG_writeback);
333 }
334
335 extern void end_page_writeback(struct page *page);
336
337 /*
338 * Fault a userspace page into pagetables. Return non-zero on a fault.
339 *
340 * This assumes that two userspace pages are always sufficient. That's
341 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
342 */
343 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
344 {
345 int ret;
346
347 if (unlikely(size == 0))
348 return 0;
349
350 /*
351 * Writing zeroes into userspace here is OK, because we know that if
352 * the zero gets there, we'll be overwriting it.
353 */
354 ret = __put_user(0, uaddr);
355 if (ret == 0) {
356 char __user *end = uaddr + size - 1;
357
358 /*
359 * If the page was already mapped, this will get a cache miss
360 * for sure, so try to avoid doing it.
361 */
362 if (((unsigned long)uaddr & PAGE_MASK) !=
363 ((unsigned long)end & PAGE_MASK))
364 ret = __put_user(0, end);
365 }
366 return ret;
367 }
368
369 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
370 {
371 volatile char c;
372 int ret;
373
374 if (unlikely(size == 0))
375 return 0;
376
377 ret = __get_user(c, uaddr);
378 if (ret == 0) {
379 const char __user *end = uaddr + size - 1;
380
381 if (((unsigned long)uaddr & PAGE_MASK) !=
382 ((unsigned long)end & PAGE_MASK))
383 ret = __get_user(c, end);
384 }
385 return ret;
386 }
387
388 #endif /* _LINUX_PAGEMAP_H */
This page took 0.085479 seconds and 5 git commands to generate.