Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / mm / fremap.c
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8 #include <linux/export.h>
9 #include <linux/backing-dev.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/file.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swapops.h>
16 #include <linux/rmap.h>
17 #include <linux/syscalls.h>
18 #include <linux/mmu_notifier.h>
19
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23
24 #include "internal.h"
25
26 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
27 unsigned long addr, pte_t *ptep)
28 {
29 pte_t pte = *ptep;
30
31 if (pte_present(pte)) {
32 struct page *page;
33
34 flush_cache_page(vma, addr, pte_pfn(pte));
35 pte = ptep_clear_flush(vma, addr, ptep);
36 page = vm_normal_page(vma, addr, pte);
37 if (page) {
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
42 update_hiwater_rss(mm);
43 dec_mm_counter(mm, MM_FILEPAGES);
44 }
45 } else {
46 if (!pte_file(pte))
47 free_swap_and_cache(pte_to_swp_entry(pte));
48 pte_clear_not_present_full(mm, addr, ptep, 0);
49 }
50 }
51
52 /*
53 * Install a file pte to a given virtual memory address, release any
54 * previously existing mapping.
55 */
56 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 unsigned long addr, unsigned long pgoff, pgprot_t prot)
58 {
59 int err = -ENOMEM;
60 pte_t *pte;
61 spinlock_t *ptl;
62
63 pte = get_locked_pte(mm, addr, &ptl);
64 if (!pte)
65 goto out;
66
67 if (!pte_none(*pte))
68 zap_pte(mm, vma, addr, pte);
69
70 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
71 /*
72 * We don't need to run update_mmu_cache() here because the "file pte"
73 * being installed by install_file_pte() is not a real pte - it's a
74 * non-present entry (like a swap entry), noting what file offset should
75 * be mapped there when there's a fault (in a non-linear vma where
76 * that's not obvious).
77 */
78 pte_unmap_unlock(pte, ptl);
79 err = 0;
80 out:
81 return err;
82 }
83
84 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
85 unsigned long size, pgoff_t pgoff)
86 {
87 struct mm_struct *mm = vma->vm_mm;
88 int err;
89
90 do {
91 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
92 if (err)
93 return err;
94
95 size -= PAGE_SIZE;
96 addr += PAGE_SIZE;
97 pgoff++;
98 } while (size);
99
100 return 0;
101 }
102 EXPORT_SYMBOL(generic_file_remap_pages);
103
104 /**
105 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
106 * @start: start of the remapped virtual memory range
107 * @size: size of the remapped virtual memory range
108 * @prot: new protection bits of the range (see NOTE)
109 * @pgoff: to-be-mapped page of the backing store file
110 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
111 *
112 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
113 * (shared backing store file).
114 *
115 * This syscall works purely via pagetables, so it's the most efficient
116 * way to map the same (large) file into a given virtual window. Unlike
117 * mmap()/mremap() it does not create any new vmas. The new mappings are
118 * also safe across swapout.
119 *
120 * NOTE: the @prot parameter right now is ignored (but must be zero),
121 * and the vma's default protection is used. Arbitrary protections
122 * might be implemented in the future.
123 */
124 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
125 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
126 {
127 struct mm_struct *mm = current->mm;
128 struct address_space *mapping;
129 struct vm_area_struct *vma;
130 int err = -EINVAL;
131 int has_write_lock = 0;
132 vm_flags_t vm_flags;
133
134 if (prot)
135 return err;
136 /*
137 * Sanitize the syscall parameters:
138 */
139 start = start & PAGE_MASK;
140 size = size & PAGE_MASK;
141
142 /* Does the address range wrap, or is the span zero-sized? */
143 if (start + size <= start)
144 return err;
145
146 /* Does pgoff wrap? */
147 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
148 return err;
149
150 /* Can we represent this offset inside this architecture's pte's? */
151 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
152 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
153 return err;
154 #endif
155
156 /* We need down_write() to change vma->vm_flags. */
157 down_read(&mm->mmap_sem);
158 retry:
159 vma = find_vma(mm, start);
160
161 /*
162 * Make sure the vma is shared, that it supports prefaulting,
163 * and that the remapped range is valid and fully within
164 * the single existing vma.
165 */
166 vm_flags = vma->vm_flags;
167 if (!vma || !(vm_flags & VM_SHARED))
168 goto out;
169
170 if (!vma->vm_ops || !vma->vm_ops->remap_pages)
171 goto out;
172
173 if (start < vma->vm_start || start + size > vma->vm_end)
174 goto out;
175
176 /* Must set VM_NONLINEAR before any pages are populated. */
177 if (!(vma->vm_flags & VM_NONLINEAR)) {
178 /*
179 * vm_private_data is used as a swapout cursor
180 * in a VM_NONLINEAR vma.
181 */
182 if (vma->vm_private_data)
183 goto out;
184
185 /* Don't need a nonlinear mapping, exit success */
186 if (pgoff == linear_page_index(vma, start)) {
187 err = 0;
188 goto out;
189 }
190
191 if (!has_write_lock) {
192 get_write_lock:
193 up_read(&mm->mmap_sem);
194 down_write(&mm->mmap_sem);
195 has_write_lock = 1;
196 goto retry;
197 }
198 mapping = vma->vm_file->f_mapping;
199 /*
200 * page_mkclean doesn't work on nonlinear vmas, so if
201 * dirty pages need to be accounted, emulate with linear
202 * vmas.
203 */
204 if (mapping_cap_account_dirty(mapping)) {
205 unsigned long addr;
206 struct file *file = get_file(vma->vm_file);
207
208 vm_flags = vma->vm_flags;
209 if (!(flags & MAP_NONBLOCK))
210 vm_flags |= VM_POPULATE;
211 addr = mmap_region(file, start, size, vm_flags, pgoff);
212 fput(file);
213 if (IS_ERR_VALUE(addr)) {
214 err = addr;
215 } else {
216 BUG_ON(addr != start);
217 err = 0;
218 }
219 goto out;
220 }
221 mutex_lock(&mapping->i_mmap_mutex);
222 flush_dcache_mmap_lock(mapping);
223 vma->vm_flags |= VM_NONLINEAR;
224 vma_interval_tree_remove(vma, &mapping->i_mmap);
225 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
226 flush_dcache_mmap_unlock(mapping);
227 mutex_unlock(&mapping->i_mmap_mutex);
228 }
229
230 if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
231 if (!has_write_lock)
232 goto get_write_lock;
233 vma->vm_flags |= VM_POPULATE;
234 }
235
236 if (vma->vm_flags & VM_LOCKED) {
237 /*
238 * drop PG_Mlocked flag for over-mapped range
239 */
240 if (!has_write_lock)
241 goto get_write_lock;
242 vm_flags = vma->vm_flags;
243 munlock_vma_pages_range(vma, start, start + size);
244 vma->vm_flags = vm_flags;
245 }
246
247 mmu_notifier_invalidate_range_start(mm, start, start + size);
248 err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
249 mmu_notifier_invalidate_range_end(mm, start, start + size);
250
251 /*
252 * We can't clear VM_NONLINEAR because we'd have to do
253 * it after ->populate completes, and that would prevent
254 * downgrading the lock. (Locks can't be upgraded).
255 */
256
257 out:
258 if (vma)
259 vm_flags = vma->vm_flags;
260 if (likely(!has_write_lock))
261 up_read(&mm->mmap_sem);
262 else
263 up_write(&mm->mmap_sem);
264 if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
265 mm_populate(start, size);
266
267 return err;
268 }
This page took 0.138264 seconds and 5 git commands to generate.