Merge commit 'v2.6.37-rc1' into for-2.6.38
[deliverable/linux.git] / include / linux / highmem.h
1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
3
4 #include <linux/fs.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/uaccess.h>
8
9 #include <asm/cacheflush.h>
10
11 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
12 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
13 {
14 }
15 #endif
16
17 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
18 static inline void flush_kernel_dcache_page(struct page *page)
19 {
20 }
21 static inline void flush_kernel_vmap_range(void *vaddr, int size)
22 {
23 }
24 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
25 {
26 }
27 #endif
28
29 #include <asm/kmap_types.h>
30
31 #ifdef CONFIG_HIGHMEM
32 #include <asm/highmem.h>
33
34 /* declarations for linux/mm/highmem.c */
35 unsigned int nr_free_highpages(void);
36 extern unsigned long totalhigh_pages;
37
38 void kmap_flush_unused(void);
39
40 #else /* CONFIG_HIGHMEM */
41
42 static inline unsigned int nr_free_highpages(void) { return 0; }
43
44 #define totalhigh_pages 0UL
45
46 #ifndef ARCH_HAS_KMAP
47 static inline void *kmap(struct page *page)
48 {
49 might_sleep();
50 return page_address(page);
51 }
52
53 static inline void kunmap(struct page *page)
54 {
55 }
56
57 static inline void *__kmap_atomic(struct page *page)
58 {
59 pagefault_disable();
60 return page_address(page);
61 }
62 #define kmap_atomic_prot(page, prot) __kmap_atomic(page)
63
64 static inline void __kunmap_atomic(void *addr)
65 {
66 pagefault_enable();
67 }
68
69 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
70 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
71
72 #define kmap_flush_unused() do {} while(0)
73 #endif
74
75 #endif /* CONFIG_HIGHMEM */
76
77 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
78
79 DECLARE_PER_CPU(int, __kmap_atomic_idx);
80
81 static inline int kmap_atomic_idx_push(void)
82 {
83 int idx = __get_cpu_var(__kmap_atomic_idx)++;
84 #ifdef CONFIG_DEBUG_HIGHMEM
85 WARN_ON_ONCE(in_irq() && !irqs_disabled());
86 BUG_ON(idx > KM_TYPE_NR);
87 #endif
88 return idx;
89 }
90
91 static inline int kmap_atomic_idx(void)
92 {
93 return __get_cpu_var(__kmap_atomic_idx) - 1;
94 }
95
96 static inline int kmap_atomic_idx_pop(void)
97 {
98 int idx = --__get_cpu_var(__kmap_atomic_idx);
99 #ifdef CONFIG_DEBUG_HIGHMEM
100 BUG_ON(idx < 0);
101 #endif
102 return idx;
103 }
104
105 #endif
106
107 /*
108 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
109 */
110 #define kmap_atomic(page, args...) __kmap_atomic(page)
111
112 /*
113 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
114 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
115 */
116 #define kunmap_atomic(addr, args...) \
117 do { \
118 BUILD_BUG_ON(__same_type((addr), struct page *)); \
119 __kunmap_atomic(addr); \
120 } while (0)
121
122 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
123 #ifndef clear_user_highpage
124 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
125 {
126 void *addr = kmap_atomic(page, KM_USER0);
127 clear_user_page(addr, vaddr, page);
128 kunmap_atomic(addr, KM_USER0);
129 }
130 #endif
131
132 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
133 /**
134 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
135 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
136 * @vma: The VMA the page is to be allocated for
137 * @vaddr: The virtual address the page will be inserted into
138 *
139 * This function will allocate a page for a VMA but the caller is expected
140 * to specify via movableflags whether the page will be movable in the
141 * future or not
142 *
143 * An architecture may override this function by defining
144 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
145 * implementation.
146 */
147 static inline struct page *
148 __alloc_zeroed_user_highpage(gfp_t movableflags,
149 struct vm_area_struct *vma,
150 unsigned long vaddr)
151 {
152 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
153 vma, vaddr);
154
155 if (page)
156 clear_user_highpage(page, vaddr);
157
158 return page;
159 }
160 #endif
161
162 /**
163 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
164 * @vma: The VMA the page is to be allocated for
165 * @vaddr: The virtual address the page will be inserted into
166 *
167 * This function will allocate a page for a VMA that the caller knows will
168 * be able to migrate in the future using move_pages() or reclaimed
169 */
170 static inline struct page *
171 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
172 unsigned long vaddr)
173 {
174 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
175 }
176
177 static inline void clear_highpage(struct page *page)
178 {
179 void *kaddr = kmap_atomic(page, KM_USER0);
180 clear_page(kaddr);
181 kunmap_atomic(kaddr, KM_USER0);
182 }
183
184 static inline void zero_user_segments(struct page *page,
185 unsigned start1, unsigned end1,
186 unsigned start2, unsigned end2)
187 {
188 void *kaddr = kmap_atomic(page, KM_USER0);
189
190 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
191
192 if (end1 > start1)
193 memset(kaddr + start1, 0, end1 - start1);
194
195 if (end2 > start2)
196 memset(kaddr + start2, 0, end2 - start2);
197
198 kunmap_atomic(kaddr, KM_USER0);
199 flush_dcache_page(page);
200 }
201
202 static inline void zero_user_segment(struct page *page,
203 unsigned start, unsigned end)
204 {
205 zero_user_segments(page, start, end, 0, 0);
206 }
207
208 static inline void zero_user(struct page *page,
209 unsigned start, unsigned size)
210 {
211 zero_user_segments(page, start, start + size, 0, 0);
212 }
213
214 static inline void __deprecated memclear_highpage_flush(struct page *page,
215 unsigned int offset, unsigned int size)
216 {
217 zero_user(page, offset, size);
218 }
219
220 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
221
222 static inline void copy_user_highpage(struct page *to, struct page *from,
223 unsigned long vaddr, struct vm_area_struct *vma)
224 {
225 char *vfrom, *vto;
226
227 vfrom = kmap_atomic(from, KM_USER0);
228 vto = kmap_atomic(to, KM_USER1);
229 copy_user_page(vto, vfrom, vaddr, to);
230 kunmap_atomic(vto, KM_USER1);
231 kunmap_atomic(vfrom, KM_USER0);
232 }
233
234 #endif
235
236 static inline void copy_highpage(struct page *to, struct page *from)
237 {
238 char *vfrom, *vto;
239
240 vfrom = kmap_atomic(from, KM_USER0);
241 vto = kmap_atomic(to, KM_USER1);
242 copy_page(vto, vfrom);
243 kunmap_atomic(vto, KM_USER1);
244 kunmap_atomic(vfrom, KM_USER0);
245 }
246
247 #endif /* _LINUX_HIGHMEM_H */
This page took 0.044605 seconds and 5 git commands to generate.