Merge master.kernel.org:/home/rmk/linux-2.6-mmc
[deliverable/linux.git] / mm / fremap.c
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep)
25 {
26 pte_t pte = *ptep;
27 struct page *page = NULL;
28
29 if (pte_present(pte)) {
30 flush_cache_page(vma, addr, pte_pfn(pte));
31 pte = ptep_clear_flush(vma, addr, ptep);
32 page = vm_normal_page(vma, addr, pte);
33 if (page) {
34 if (pte_dirty(pte))
35 set_page_dirty(page);
36 page_remove_rmap(page);
37 page_cache_release(page);
38 }
39 } else {
40 if (!pte_file(pte))
41 free_swap_and_cache(pte_to_swp_entry(pte));
42 pte_clear(mm, addr, ptep);
43 }
44 return !!page;
45 }
46
47 /*
48 * Install a file page to a given virtual memory address, release any
49 * previously existing mapping.
50 */
51 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
52 unsigned long addr, struct page *page, pgprot_t prot)
53 {
54 struct inode *inode;
55 pgoff_t size;
56 int err = -ENOMEM;
57 pte_t *pte;
58 pmd_t *pmd;
59 pud_t *pud;
60 pgd_t *pgd;
61 pte_t pte_val;
62 spinlock_t *ptl;
63
64 pgd = pgd_offset(mm, addr);
65 pud = pud_alloc(mm, pgd, addr);
66 if (!pud)
67 goto out;
68 pmd = pmd_alloc(mm, pud, addr);
69 if (!pmd)
70 goto out;
71 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
72 if (!pte)
73 goto out;
74
75 /*
76 * This page may have been truncated. Tell the
77 * caller about it.
78 */
79 err = -EINVAL;
80 inode = vma->vm_file->f_mapping->host;
81 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
82 if (!page->mapping || page->index >= size)
83 goto unlock;
84 err = -ENOMEM;
85 if (page_mapcount(page) > INT_MAX/2)
86 goto unlock;
87
88 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
89 inc_mm_counter(mm, file_rss);
90
91 flush_icache_page(vma, page);
92 set_pte_at(mm, addr, pte, mk_pte(page, prot));
93 page_add_file_rmap(page);
94 pte_val = *pte;
95 update_mmu_cache(vma, addr, pte_val);
96 err = 0;
97 unlock:
98 pte_unmap_unlock(pte, ptl);
99 out:
100 return err;
101 }
102 EXPORT_SYMBOL(install_page);
103
104 /*
105 * Install a file pte to a given virtual memory address, release any
106 * previously existing mapping.
107 */
108 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
109 unsigned long addr, unsigned long pgoff, pgprot_t prot)
110 {
111 int err = -ENOMEM;
112 pte_t *pte;
113 pmd_t *pmd;
114 pud_t *pud;
115 pgd_t *pgd;
116 pte_t pte_val;
117 spinlock_t *ptl;
118
119 pgd = pgd_offset(mm, addr);
120 pud = pud_alloc(mm, pgd, addr);
121 if (!pud)
122 goto out;
123 pmd = pmd_alloc(mm, pud, addr);
124 if (!pmd)
125 goto out;
126 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
127 if (!pte)
128 goto out;
129
130 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
131 update_hiwater_rss(mm);
132 dec_mm_counter(mm, file_rss);
133 }
134
135 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
136 pte_val = *pte;
137 update_mmu_cache(vma, addr, pte_val);
138 pte_unmap_unlock(pte, ptl);
139 err = 0;
140 out:
141 return err;
142 }
143
144 /***
145 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
146 * file within an existing vma.
147 * @start: start of the remapped virtual memory range
148 * @size: size of the remapped virtual memory range
149 * @prot: new protection bits of the range
150 * @pgoff: to be mapped page of the backing store file
151 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
152 *
153 * this syscall works purely via pagetables, so it's the most efficient
154 * way to map the same (large) file into a given virtual window. Unlike
155 * mmap()/mremap() it does not create any new vmas. The new mappings are
156 * also safe across swapout.
157 *
158 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
159 * protection is used. Arbitrary protections might be implemented in the
160 * future.
161 */
162 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
163 unsigned long __prot, unsigned long pgoff, unsigned long flags)
164 {
165 struct mm_struct *mm = current->mm;
166 struct address_space *mapping;
167 unsigned long end = start + size;
168 struct vm_area_struct *vma;
169 int err = -EINVAL;
170 int has_write_lock = 0;
171
172 if (__prot)
173 return err;
174 /*
175 * Sanitize the syscall parameters:
176 */
177 start = start & PAGE_MASK;
178 size = size & PAGE_MASK;
179
180 /* Does the address range wrap, or is the span zero-sized? */
181 if (start + size <= start)
182 return err;
183
184 /* Can we represent this offset inside this architecture's pte's? */
185 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
186 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
187 return err;
188 #endif
189
190 /* We need down_write() to change vma->vm_flags. */
191 down_read(&mm->mmap_sem);
192 retry:
193 vma = find_vma(mm, start);
194
195 /*
196 * Make sure the vma is shared, that it supports prefaulting,
197 * and that the remapped range is valid and fully within
198 * the single existing vma. vm_private_data is used as a
199 * swapout cursor in a VM_NONLINEAR vma.
200 */
201 if (vma && (vma->vm_flags & VM_SHARED) &&
202 (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
203 vma->vm_ops && vma->vm_ops->populate &&
204 end > start && start >= vma->vm_start &&
205 end <= vma->vm_end) {
206
207 /* Must set VM_NONLINEAR before any pages are populated. */
208 if (pgoff != linear_page_index(vma, start) &&
209 !(vma->vm_flags & VM_NONLINEAR)) {
210 if (!has_write_lock) {
211 up_read(&mm->mmap_sem);
212 down_write(&mm->mmap_sem);
213 has_write_lock = 1;
214 goto retry;
215 }
216 mapping = vma->vm_file->f_mapping;
217 spin_lock(&mapping->i_mmap_lock);
218 flush_dcache_mmap_lock(mapping);
219 vma->vm_flags |= VM_NONLINEAR;
220 vma_prio_tree_remove(vma, &mapping->i_mmap);
221 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
222 flush_dcache_mmap_unlock(mapping);
223 spin_unlock(&mapping->i_mmap_lock);
224 }
225
226 err = vma->vm_ops->populate(vma, start, size,
227 vma->vm_page_prot,
228 pgoff, flags & MAP_NONBLOCK);
229
230 /*
231 * We can't clear VM_NONLINEAR because we'd have to do
232 * it after ->populate completes, and that would prevent
233 * downgrading the lock. (Locks can't be upgraded).
234 */
235 }
236 if (likely(!has_write_lock))
237 up_read(&mm->mmap_sem);
238 else
239 up_write(&mm->mmap_sem);
240
241 return err;
242 }
243
This page took 0.03562 seconds and 6 git commands to generate.