fuse: fuse_flush() should wait on writeback
[deliverable/linux.git] / mm / fremap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
0b173bc4 8#include <linux/export.h>
4af3c9cc 9#include <linux/backing-dev.h>
1da177e4
LT
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/file.h>
13#include <linux/mman.h>
14#include <linux/pagemap.h>
15#include <linux/swapops.h>
16#include <linux/rmap.h>
1da177e4 17#include <linux/syscalls.h>
cddb8a5c 18#include <linux/mmu_notifier.h>
1da177e4
LT
19
20#include <asm/mmu_context.h>
21#include <asm/cacheflush.h>
22#include <asm/tlbflush.h>
23
ba470de4
RR
24#include "internal.h"
25
88784396
HD
26static int mm_counter(struct page *page)
27{
28 return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
29}
30
d0217ac0 31static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
1da177e4
LT
32 unsigned long addr, pte_t *ptep)
33{
34 pte_t pte = *ptep;
88784396
HD
35 struct page *page;
36 swp_entry_t entry;
1da177e4 37
1da177e4 38 if (pte_present(pte)) {
6aab341e 39 flush_cache_page(vma, addr, pte_pfn(pte));
1da177e4 40 pte = ptep_clear_flush(vma, addr, ptep);
6aab341e
LT
41 page = vm_normal_page(vma, addr, pte);
42 if (page) {
43 if (pte_dirty(pte))
44 set_page_dirty(page);
88784396
HD
45 update_hiwater_rss(mm);
46 dec_mm_counter(mm, mm_counter(page));
edc315fd 47 page_remove_rmap(page);
6aab341e 48 page_cache_release(page);
88784396
HD
49 }
50 } else { /* zap_pte() is not called when pte_none() */
51 if (!pte_file(pte)) {
d0217ac0 52 update_hiwater_rss(mm);
88784396
HD
53 entry = pte_to_swp_entry(pte);
54 if (non_swap_entry(entry)) {
55 if (is_migration_entry(entry)) {
56 page = migration_entry_to_page(entry);
57 dec_mm_counter(mm, mm_counter(page));
58 }
59 } else {
60 free_swap_and_cache(entry);
61 dec_mm_counter(mm, MM_SWAPENTS);
62 }
1da177e4 63 }
9888a1ca 64 pte_clear_not_present_full(mm, addr, ptep, 0);
1da177e4
LT
65 }
66}
67
1da177e4
LT
68/*
69 * Install a file pte to a given virtual memory address, release any
70 * previously existing mapping.
71 */
d0217ac0 72static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
1da177e4
LT
73 unsigned long addr, unsigned long pgoff, pgprot_t prot)
74{
75 int err = -ENOMEM;
41bb3476 76 pte_t *pte, ptfile;
c74df32c 77 spinlock_t *ptl;
1da177e4 78
c9cfcddf 79 pte = get_locked_pte(mm, addr, &ptl);
1da177e4 80 if (!pte)
c74df32c 81 goto out;
1da177e4 82
41bb3476
CG
83 ptfile = pgoff_to_pte(pgoff);
84
85 if (!pte_none(*pte)) {
86 if (pte_present(*pte) && pte_soft_dirty(*pte))
87 pte_file_mksoft_dirty(ptfile);
d0217ac0 88 zap_pte(mm, vma, addr, pte);
41bb3476 89 }
1da177e4 90
41bb3476 91 set_pte_at(mm, addr, pte, ptfile);
668e0d8f
HD
92 /*
93 * We don't need to run update_mmu_cache() here because the "file pte"
94 * being installed by install_file_pte() is not a real pte - it's a
95 * non-present entry (like a swap entry), noting what file offset should
96 * be mapped there when there's a fault (in a non-linear vma where
97 * that's not obvious).
98 */
c74df32c
HD
99 pte_unmap_unlock(pte, ptl);
100 err = 0;
101out:
1da177e4
LT
102 return err;
103}
104
0b173bc4
KK
105int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
106 unsigned long size, pgoff_t pgoff)
54cb8821 107{
0b173bc4 108 struct mm_struct *mm = vma->vm_mm;
54cb8821
NP
109 int err;
110
111 do {
112 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
113 if (err)
114 return err;
115
116 size -= PAGE_SIZE;
117 addr += PAGE_SIZE;
118 pgoff++;
119 } while (size);
120
0b173bc4 121 return 0;
54cb8821 122}
0b173bc4 123EXPORT_SYMBOL(generic_file_remap_pages);
54cb8821 124
8d63494f
RD
125/**
126 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
1da177e4
LT
127 * @start: start of the remapped virtual memory range
128 * @size: size of the remapped virtual memory range
8d63494f
RD
129 * @prot: new protection bits of the range (see NOTE)
130 * @pgoff: to-be-mapped page of the backing store file
1da177e4
LT
131 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
132 *
8d63494f
RD
133 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
134 * (shared backing store file).
135 *
136 * This syscall works purely via pagetables, so it's the most efficient
1da177e4
LT
137 * way to map the same (large) file into a given virtual window. Unlike
138 * mmap()/mremap() it does not create any new vmas. The new mappings are
139 * also safe across swapout.
140 *
7682486b 141 * NOTE: the @prot parameter right now is ignored (but must be zero),
8d63494f
RD
142 * and the vma's default protection is used. Arbitrary protections
143 * might be implemented in the future.
1da177e4 144 */
6a6160a7
HC
145SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
146 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1da177e4
LT
147{
148 struct mm_struct *mm = current->mm;
149 struct address_space *mapping;
1da177e4
LT
150 struct vm_area_struct *vma;
151 int err = -EINVAL;
152 int has_write_lock = 0;
a2362d24 153 vm_flags_t vm_flags = 0;
1da177e4 154
8d63494f 155 if (prot)
1da177e4
LT
156 return err;
157 /*
158 * Sanitize the syscall parameters:
159 */
160 start = start & PAGE_MASK;
161 size = size & PAGE_MASK;
162
163 /* Does the address range wrap, or is the span zero-sized? */
164 if (start + size <= start)
165 return err;
166
5ec1055a
LW
167 /* Does pgoff wrap? */
168 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
169 return err;
170
1da177e4
LT
171 /* Can we represent this offset inside this architecture's pte's? */
172#if PTE_FILE_MAX_BITS < BITS_PER_LONG
173 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
174 return err;
175#endif
176
177 /* We need down_write() to change vma->vm_flags. */
178 down_read(&mm->mmap_sem);
179 retry:
180 vma = find_vma(mm, start);
181
182 /*
183 * Make sure the vma is shared, that it supports prefaulting,
184 * and that the remapped range is valid and fully within
940e7da5 185 * the single existing vma.
1da177e4 186 */
a2362d24 187 if (!vma || !(vma->vm_flags & VM_SHARED))
54cb8821
NP
188 goto out;
189
deb521c4 190 if (!vma->vm_ops || !vma->vm_ops->remap_pages)
54cb8821
NP
191 goto out;
192
e92b05de 193 if (start < vma->vm_start || start + size > vma->vm_end)
54cb8821
NP
194 goto out;
195
196 /* Must set VM_NONLINEAR before any pages are populated. */
197 if (!(vma->vm_flags & VM_NONLINEAR)) {
940e7da5
ML
198 /*
199 * vm_private_data is used as a swapout cursor
200 * in a VM_NONLINEAR vma.
201 */
202 if (vma->vm_private_data)
203 goto out;
204
54cb8821
NP
205 /* Don't need a nonlinear mapping, exit success */
206 if (pgoff == linear_page_index(vma, start)) {
207 err = 0;
208 goto out;
209 }
210
211 if (!has_write_lock) {
940e7da5 212get_write_lock:
54cb8821
NP
213 up_read(&mm->mmap_sem);
214 down_write(&mm->mmap_sem);
215 has_write_lock = 1;
216 goto retry;
217 }
218 mapping = vma->vm_file->f_mapping;
3ee6dafc
MS
219 /*
220 * page_mkclean doesn't work on nonlinear vmas, so if
221 * dirty pages need to be accounted, emulate with linear
222 * vmas.
223 */
224 if (mapping_cap_account_dirty(mapping)) {
225 unsigned long addr;
cb0942b8 226 struct file *file = get_file(vma->vm_file);
4eb91982
RR
227 /* mmap_region may free vma; grab the info now */
228 vm_flags = vma->vm_flags;
3ee6dafc 229
4eb91982 230 addr = mmap_region(file, start, size, vm_flags, pgoff);
8a459e44 231 fput(file);
3ee6dafc
MS
232 if (IS_ERR_VALUE(addr)) {
233 err = addr;
234 } else {
235 BUG_ON(addr != start);
236 err = 0;
237 }
4eb91982 238 goto out_freed;
3ee6dafc 239 }
3d48ae45 240 mutex_lock(&mapping->i_mmap_mutex);
54cb8821
NP
241 flush_dcache_mmap_lock(mapping);
242 vma->vm_flags |= VM_NONLINEAR;
6b2dbba8 243 vma_interval_tree_remove(vma, &mapping->i_mmap);
54cb8821
NP
244 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
245 flush_dcache_mmap_unlock(mapping);
3d48ae45 246 mutex_unlock(&mapping->i_mmap_mutex);
54cb8821
NP
247 }
248
ba470de4
RR
249 if (vma->vm_flags & VM_LOCKED) {
250 /*
251 * drop PG_Mlocked flag for over-mapped range
252 */
940e7da5
ML
253 if (!has_write_lock)
254 goto get_write_lock;
a1ea9549 255 vm_flags = vma->vm_flags;
ba470de4 256 munlock_vma_pages_range(vma, start, start + size);
a1ea9549 257 vma->vm_flags = vm_flags;
ba470de4
RR
258 }
259
cddb8a5c 260 mmu_notifier_invalidate_range_start(mm, start, start + size);
0b173bc4 261 err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
cddb8a5c 262 mmu_notifier_invalidate_range_end(mm, start, start + size);
1da177e4 263
54cb8821
NP
264 /*
265 * We can't clear VM_NONLINEAR because we'd have to do
266 * it after ->populate completes, and that would prevent
267 * downgrading the lock. (Locks can't be upgraded).
268 */
1da177e4 269
54cb8821 270out:
6d7825b1
AM
271 if (vma)
272 vm_flags = vma->vm_flags;
4eb91982 273out_freed:
1da177e4
LT
274 if (likely(!has_write_lock))
275 up_read(&mm->mmap_sem);
276 else
277 up_write(&mm->mmap_sem);
a1ea9549
ML
278 if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
279 mm_populate(start, size);
1da177e4
LT
280
281 return err;
282}
This page took 0.910475 seconds and 5 git commands to generate.