Merge branch 'hwmon-for-linus' of git://jdelvare.pck.nerim.net/jdelvare-2.6
[deliverable/linux.git] / include / linux / pagemap.h
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15
16 /*
17 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
18 * allocation mode flags.
19 */
20 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
21 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
22
23 static inline void mapping_set_error(struct address_space *mapping, int error)
24 {
25 if (error) {
26 if (error == -ENOSPC)
27 set_bit(AS_ENOSPC, &mapping->flags);
28 else
29 set_bit(AS_EIO, &mapping->flags);
30 }
31 }
32
33 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
34 {
35 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
36 }
37
38 /*
39 * This is non-atomic. Only to be used before the mapping is activated.
40 * Probably needs a barrier...
41 */
42 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
43 {
44 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
45 (__force unsigned long)mask;
46 }
47
48 /*
49 * The page cache can done in larger chunks than
50 * one page, because it allows for more efficient
51 * throughput (it can then be mapped into user
52 * space in smaller chunks for same flexibility).
53 *
54 * Or rather, it _will_ be done in larger chunks.
55 */
56 #define PAGE_CACHE_SHIFT PAGE_SHIFT
57 #define PAGE_CACHE_SIZE PAGE_SIZE
58 #define PAGE_CACHE_MASK PAGE_MASK
59 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
60
61 #define page_cache_get(page) get_page(page)
62 #define page_cache_release(page) put_page(page)
63 void release_pages(struct page **pages, int nr, int cold);
64
65 #ifdef CONFIG_NUMA
66 extern struct page *__page_cache_alloc(gfp_t gfp);
67 #else
68 static inline struct page *__page_cache_alloc(gfp_t gfp)
69 {
70 return alloc_pages(gfp, 0);
71 }
72 #endif
73
74 static inline struct page *page_cache_alloc(struct address_space *x)
75 {
76 return __page_cache_alloc(mapping_gfp_mask(x));
77 }
78
79 static inline struct page *page_cache_alloc_cold(struct address_space *x)
80 {
81 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
82 }
83
84 typedef int filler_t(void *, struct page *);
85
86 extern struct page * find_get_page(struct address_space *mapping,
87 unsigned long index);
88 extern struct page * find_lock_page(struct address_space *mapping,
89 unsigned long index);
90 extern struct page * find_or_create_page(struct address_space *mapping,
91 unsigned long index, gfp_t gfp_mask);
92 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
93 unsigned int nr_pages, struct page **pages);
94 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
95 unsigned int nr_pages, struct page **pages);
96 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
97 int tag, unsigned int nr_pages, struct page **pages);
98
99 /*
100 * Returns locked page at given index in given cache, creating it if needed.
101 */
102 static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
103 {
104 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
105 }
106
107 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
108 unsigned long index);
109 extern struct page * read_cache_page_async(struct address_space *mapping,
110 unsigned long index, filler_t *filler,
111 void *data);
112 extern struct page * read_cache_page(struct address_space *mapping,
113 unsigned long index, filler_t *filler,
114 void *data);
115 extern int read_cache_pages(struct address_space *mapping,
116 struct list_head *pages, filler_t *filler, void *data);
117
118 static inline struct page *read_mapping_page_async(
119 struct address_space *mapping,
120 unsigned long index, void *data)
121 {
122 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
123 return read_cache_page_async(mapping, index, filler, data);
124 }
125
126 static inline struct page *read_mapping_page(struct address_space *mapping,
127 unsigned long index, void *data)
128 {
129 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
130 return read_cache_page(mapping, index, filler, data);
131 }
132
133 int add_to_page_cache(struct page *page, struct address_space *mapping,
134 unsigned long index, gfp_t gfp_mask);
135 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
136 unsigned long index, gfp_t gfp_mask);
137 extern void remove_from_page_cache(struct page *page);
138 extern void __remove_from_page_cache(struct page *page);
139
140 /*
141 * Return byte-offset into filesystem object for page.
142 */
143 static inline loff_t page_offset(struct page *page)
144 {
145 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
146 }
147
148 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
149 unsigned long address)
150 {
151 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
152 pgoff += vma->vm_pgoff;
153 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
154 }
155
156 extern void FASTCALL(__lock_page(struct page *page));
157 extern void FASTCALL(__lock_page_nosync(struct page *page));
158 extern void FASTCALL(unlock_page(struct page *page));
159
160 /*
161 * lock_page may only be called if we have the page's inode pinned.
162 */
163 static inline void lock_page(struct page *page)
164 {
165 might_sleep();
166 if (TestSetPageLocked(page))
167 __lock_page(page);
168 }
169
170 /*
171 * lock_page_nosync should only be used if we can't pin the page's inode.
172 * Doesn't play quite so well with block device plugging.
173 */
174 static inline void lock_page_nosync(struct page *page)
175 {
176 might_sleep();
177 if (TestSetPageLocked(page))
178 __lock_page_nosync(page);
179 }
180
181 /*
182 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
183 * Never use this directly!
184 */
185 extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
186
187 /*
188 * Wait for a page to be unlocked.
189 *
190 * This must be called with the caller "holding" the page,
191 * ie with increased "page->count" so that the page won't
192 * go away during the wait..
193 */
194 static inline void wait_on_page_locked(struct page *page)
195 {
196 if (PageLocked(page))
197 wait_on_page_bit(page, PG_locked);
198 }
199
200 /*
201 * Wait for a page to complete writeback
202 */
203 static inline void wait_on_page_writeback(struct page *page)
204 {
205 if (PageWriteback(page))
206 wait_on_page_bit(page, PG_writeback);
207 }
208
209 extern void end_page_writeback(struct page *page);
210
211 /*
212 * Fault a userspace page into pagetables. Return non-zero on a fault.
213 *
214 * This assumes that two userspace pages are always sufficient. That's
215 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
216 */
217 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
218 {
219 int ret;
220
221 /*
222 * Writing zeroes into userspace here is OK, because we know that if
223 * the zero gets there, we'll be overwriting it.
224 */
225 ret = __put_user(0, uaddr);
226 if (ret == 0) {
227 char __user *end = uaddr + size - 1;
228
229 /*
230 * If the page was already mapped, this will get a cache miss
231 * for sure, so try to avoid doing it.
232 */
233 if (((unsigned long)uaddr & PAGE_MASK) !=
234 ((unsigned long)end & PAGE_MASK))
235 ret = __put_user(0, end);
236 }
237 return ret;
238 }
239
240 static inline void fault_in_pages_readable(const char __user *uaddr, int size)
241 {
242 volatile char c;
243 int ret;
244
245 ret = __get_user(c, uaddr);
246 if (ret == 0) {
247 const char __user *end = uaddr + size - 1;
248
249 if (((unsigned long)uaddr & PAGE_MASK) !=
250 ((unsigned long)end & PAGE_MASK))
251 __get_user(c, end);
252 }
253 }
254
255 #endif /* _LINUX_PAGEMAP_H */
This page took 0.127943 seconds and 6 git commands to generate.