Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/mprotect.c | |
3 | * | |
4 | * (C) Copyright 1994 Linus Torvalds | |
5 | * (C) Copyright 2002 Christoph Hellwig | |
6 | * | |
046c6884 | 7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
9 | */ | |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/hugetlb.h> | |
1da177e4 LT |
13 | #include <linux/shm.h> |
14 | #include <linux/mman.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/security.h> | |
18 | #include <linux/mempolicy.h> | |
19 | #include <linux/personality.h> | |
20 | #include <linux/syscalls.h> | |
0697212a CL |
21 | #include <linux/swap.h> |
22 | #include <linux/swapops.h> | |
cddb8a5c | 23 | #include <linux/mmu_notifier.h> |
64cdd548 | 24 | #include <linux/migrate.h> |
cdd6c482 | 25 | #include <linux/perf_event.h> |
64a9a34e | 26 | #include <linux/ksm.h> |
1da177e4 LT |
27 | #include <asm/uaccess.h> |
28 | #include <asm/pgtable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | ||
1ad9f620 MG |
32 | /* |
33 | * For a prot_numa update we only hold mmap_sem for read so there is a | |
34 | * potential race with faulting where a pmd was temporarily none. This | |
35 | * function checks for a transhuge pmd under the appropriate lock. It | |
36 | * returns a pte if it was successfully locked or NULL if it raced with | |
37 | * a transhuge insertion. | |
38 | */ | |
39 | static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, | |
40 | unsigned long addr, int prot_numa, spinlock_t **ptl) | |
41 | { | |
42 | pte_t *pte; | |
43 | spinlock_t *pmdl; | |
44 | ||
45 | /* !prot_numa is protected by mmap_sem held for write */ | |
46 | if (!prot_numa) | |
47 | return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); | |
48 | ||
49 | pmdl = pmd_lock(vma->vm_mm, pmd); | |
50 | if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { | |
51 | spin_unlock(pmdl); | |
52 | return NULL; | |
53 | } | |
54 | ||
55 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); | |
56 | spin_unlock(pmdl); | |
57 | return pte; | |
58 | } | |
59 | ||
4b10e7d5 | 60 | static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
c1e6098b | 61 | unsigned long addr, unsigned long end, pgprot_t newprot, |
0f19c179 | 62 | int dirty_accountable, int prot_numa) |
1da177e4 | 63 | { |
4b10e7d5 | 64 | struct mm_struct *mm = vma->vm_mm; |
0697212a | 65 | pte_t *pte, oldpte; |
705e87c0 | 66 | spinlock_t *ptl; |
7da4d641 | 67 | unsigned long pages = 0; |
1da177e4 | 68 | |
1ad9f620 MG |
69 | pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); |
70 | if (!pte) | |
71 | return 0; | |
72 | ||
6606c3e0 | 73 | arch_enter_lazy_mmu_mode(); |
1da177e4 | 74 | do { |
0697212a CL |
75 | oldpte = *pte; |
76 | if (pte_present(oldpte)) { | |
1da177e4 | 77 | pte_t ptent; |
b191f9b1 | 78 | bool preserve_write = prot_numa && pte_write(oldpte); |
1da177e4 | 79 | |
e944fd67 MG |
80 | /* |
81 | * Avoid trapping faults against the zero or KSM | |
82 | * pages. See similar comment in change_huge_pmd. | |
83 | */ | |
84 | if (prot_numa) { | |
85 | struct page *page; | |
86 | ||
87 | page = vm_normal_page(vma, addr, oldpte); | |
88 | if (!page || PageKsm(page)) | |
89 | continue; | |
10c1045f MG |
90 | |
91 | /* Avoid TLB flush if possible */ | |
92 | if (pte_protnone(oldpte)) | |
93 | continue; | |
e944fd67 MG |
94 | } |
95 | ||
8a0516ed MG |
96 | ptent = ptep_modify_prot_start(mm, addr, pte); |
97 | ptent = pte_modify(ptent, newprot); | |
b191f9b1 MG |
98 | if (preserve_write) |
99 | ptent = pte_mkwrite(ptent); | |
4b10e7d5 | 100 | |
8a0516ed MG |
101 | /* Avoid taking write faults for known dirty pages */ |
102 | if (dirty_accountable && pte_dirty(ptent) && | |
103 | (pte_soft_dirty(ptent) || | |
104 | !(vma->vm_flags & VM_SOFTDIRTY))) { | |
105 | ptent = pte_mkwrite(ptent); | |
4b10e7d5 | 106 | } |
8a0516ed MG |
107 | ptep_modify_prot_commit(mm, addr, pte, ptent); |
108 | pages++; | |
0661a336 | 109 | } else if (IS_ENABLED(CONFIG_MIGRATION)) { |
0697212a CL |
110 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
111 | ||
112 | if (is_write_migration_entry(entry)) { | |
c3d16e16 | 113 | pte_t newpte; |
0697212a CL |
114 | /* |
115 | * A protection check is difficult so | |
116 | * just be safe and disable write | |
117 | */ | |
118 | make_migration_entry_read(&entry); | |
c3d16e16 CG |
119 | newpte = swp_entry_to_pte(entry); |
120 | if (pte_swp_soft_dirty(oldpte)) | |
121 | newpte = pte_swp_mksoft_dirty(newpte); | |
122 | set_pte_at(mm, addr, pte, newpte); | |
e920e14c MG |
123 | |
124 | pages++; | |
0697212a | 125 | } |
1da177e4 LT |
126 | } |
127 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
6606c3e0 | 128 | arch_leave_lazy_mmu_mode(); |
705e87c0 | 129 | pte_unmap_unlock(pte - 1, ptl); |
7da4d641 PZ |
130 | |
131 | return pages; | |
1da177e4 LT |
132 | } |
133 | ||
7d12efae AM |
134 | static inline unsigned long change_pmd_range(struct vm_area_struct *vma, |
135 | pud_t *pud, unsigned long addr, unsigned long end, | |
136 | pgprot_t newprot, int dirty_accountable, int prot_numa) | |
1da177e4 LT |
137 | { |
138 | pmd_t *pmd; | |
a5338093 | 139 | struct mm_struct *mm = vma->vm_mm; |
1da177e4 | 140 | unsigned long next; |
7da4d641 | 141 | unsigned long pages = 0; |
72403b4a | 142 | unsigned long nr_huge_updates = 0; |
a5338093 | 143 | unsigned long mni_start = 0; |
1da177e4 LT |
144 | |
145 | pmd = pmd_offset(pud, addr); | |
146 | do { | |
25cbbef1 MG |
147 | unsigned long this_pages; |
148 | ||
1da177e4 | 149 | next = pmd_addr_end(addr, end); |
88a9ab6e RR |
150 | if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd)) |
151 | continue; | |
a5338093 RR |
152 | |
153 | /* invoke the mmu notifier if the pmd is populated */ | |
154 | if (!mni_start) { | |
155 | mni_start = addr; | |
156 | mmu_notifier_invalidate_range_start(mm, mni_start, end); | |
157 | } | |
158 | ||
cd7548ab JW |
159 | if (pmd_trans_huge(*pmd)) { |
160 | if (next - addr != HPAGE_PMD_SIZE) | |
e180377f | 161 | split_huge_page_pmd(vma, addr, pmd); |
f123d74a MG |
162 | else { |
163 | int nr_ptes = change_huge_pmd(vma, pmd, addr, | |
e944fd67 | 164 | newprot, prot_numa); |
f123d74a MG |
165 | |
166 | if (nr_ptes) { | |
72403b4a MG |
167 | if (nr_ptes == HPAGE_PMD_NR) { |
168 | pages += HPAGE_PMD_NR; | |
169 | nr_huge_updates++; | |
170 | } | |
1ad9f620 MG |
171 | |
172 | /* huge pmd was handled */ | |
f123d74a MG |
173 | continue; |
174 | } | |
7da4d641 | 175 | } |
88a9ab6e | 176 | /* fall through, the trans huge pmd just split */ |
cd7548ab | 177 | } |
25cbbef1 | 178 | this_pages = change_pte_range(vma, pmd, addr, next, newprot, |
0f19c179 | 179 | dirty_accountable, prot_numa); |
25cbbef1 | 180 | pages += this_pages; |
1da177e4 | 181 | } while (pmd++, addr = next, addr != end); |
7da4d641 | 182 | |
a5338093 RR |
183 | if (mni_start) |
184 | mmu_notifier_invalidate_range_end(mm, mni_start, end); | |
185 | ||
72403b4a MG |
186 | if (nr_huge_updates) |
187 | count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); | |
7da4d641 | 188 | return pages; |
1da177e4 LT |
189 | } |
190 | ||
7d12efae AM |
191 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
192 | pgd_t *pgd, unsigned long addr, unsigned long end, | |
193 | pgprot_t newprot, int dirty_accountable, int prot_numa) | |
1da177e4 LT |
194 | { |
195 | pud_t *pud; | |
196 | unsigned long next; | |
7da4d641 | 197 | unsigned long pages = 0; |
1da177e4 LT |
198 | |
199 | pud = pud_offset(pgd, addr); | |
200 | do { | |
201 | next = pud_addr_end(addr, end); | |
202 | if (pud_none_or_clear_bad(pud)) | |
203 | continue; | |
7da4d641 | 204 | pages += change_pmd_range(vma, pud, addr, next, newprot, |
4b10e7d5 | 205 | dirty_accountable, prot_numa); |
1da177e4 | 206 | } while (pud++, addr = next, addr != end); |
7da4d641 PZ |
207 | |
208 | return pages; | |
1da177e4 LT |
209 | } |
210 | ||
7da4d641 | 211 | static unsigned long change_protection_range(struct vm_area_struct *vma, |
c1e6098b | 212 | unsigned long addr, unsigned long end, pgprot_t newprot, |
4b10e7d5 | 213 | int dirty_accountable, int prot_numa) |
1da177e4 LT |
214 | { |
215 | struct mm_struct *mm = vma->vm_mm; | |
216 | pgd_t *pgd; | |
217 | unsigned long next; | |
218 | unsigned long start = addr; | |
7da4d641 | 219 | unsigned long pages = 0; |
1da177e4 LT |
220 | |
221 | BUG_ON(addr >= end); | |
222 | pgd = pgd_offset(mm, addr); | |
223 | flush_cache_range(vma, addr, end); | |
20841405 | 224 | set_tlb_flush_pending(mm); |
1da177e4 LT |
225 | do { |
226 | next = pgd_addr_end(addr, end); | |
227 | if (pgd_none_or_clear_bad(pgd)) | |
228 | continue; | |
7da4d641 | 229 | pages += change_pud_range(vma, pgd, addr, next, newprot, |
4b10e7d5 | 230 | dirty_accountable, prot_numa); |
1da177e4 | 231 | } while (pgd++, addr = next, addr != end); |
7da4d641 | 232 | |
1233d588 IM |
233 | /* Only flush the TLB if we actually modified any entries: */ |
234 | if (pages) | |
235 | flush_tlb_range(vma, start, end); | |
20841405 | 236 | clear_tlb_flush_pending(mm); |
7da4d641 PZ |
237 | |
238 | return pages; | |
239 | } | |
240 | ||
241 | unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, | |
242 | unsigned long end, pgprot_t newprot, | |
4b10e7d5 | 243 | int dirty_accountable, int prot_numa) |
7da4d641 | 244 | { |
7da4d641 PZ |
245 | unsigned long pages; |
246 | ||
7da4d641 PZ |
247 | if (is_vm_hugetlb_page(vma)) |
248 | pages = hugetlb_change_protection(vma, start, end, newprot); | |
249 | else | |
4b10e7d5 | 250 | pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); |
7da4d641 PZ |
251 | |
252 | return pages; | |
1da177e4 LT |
253 | } |
254 | ||
b6a2fea3 | 255 | int |
1da177e4 LT |
256 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
257 | unsigned long start, unsigned long end, unsigned long newflags) | |
258 | { | |
259 | struct mm_struct *mm = vma->vm_mm; | |
260 | unsigned long oldflags = vma->vm_flags; | |
261 | long nrpages = (end - start) >> PAGE_SHIFT; | |
262 | unsigned long charged = 0; | |
1da177e4 LT |
263 | pgoff_t pgoff; |
264 | int error; | |
c1e6098b | 265 | int dirty_accountable = 0; |
1da177e4 LT |
266 | |
267 | if (newflags == oldflags) { | |
268 | *pprev = vma; | |
269 | return 0; | |
270 | } | |
271 | ||
272 | /* | |
273 | * If we make a private mapping writable we increase our commit; | |
274 | * but (without finer accounting) cannot reduce our commit if we | |
5a6fe125 MG |
275 | * make it unwritable again. hugetlb mapping were accounted for |
276 | * even if read-only so there is no need to account for them here | |
1da177e4 LT |
277 | */ |
278 | if (newflags & VM_WRITE) { | |
5a6fe125 | 279 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
cdfd4325 | 280 | VM_SHARED|VM_NORESERVE))) { |
1da177e4 | 281 | charged = nrpages; |
191c5424 | 282 | if (security_vm_enough_memory_mm(mm, charged)) |
1da177e4 LT |
283 | return -ENOMEM; |
284 | newflags |= VM_ACCOUNT; | |
285 | } | |
286 | } | |
287 | ||
1da177e4 LT |
288 | /* |
289 | * First try to merge with previous and/or next vma. | |
290 | */ | |
291 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
292 | *pprev = vma_merge(mm, *pprev, start, end, newflags, | |
293 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); | |
294 | if (*pprev) { | |
295 | vma = *pprev; | |
296 | goto success; | |
297 | } | |
298 | ||
299 | *pprev = vma; | |
300 | ||
301 | if (start != vma->vm_start) { | |
302 | error = split_vma(mm, vma, start, 1); | |
303 | if (error) | |
304 | goto fail; | |
305 | } | |
306 | ||
307 | if (end != vma->vm_end) { | |
308 | error = split_vma(mm, vma, end, 0); | |
309 | if (error) | |
310 | goto fail; | |
311 | } | |
312 | ||
313 | success: | |
314 | /* | |
315 | * vm_flags and vm_page_prot are protected by the mmap_sem | |
316 | * held in write mode. | |
317 | */ | |
318 | vma->vm_flags = newflags; | |
64e45507 PF |
319 | dirty_accountable = vma_wants_writenotify(vma); |
320 | vma_set_page_prot(vma); | |
d08b3851 | 321 | |
7d12efae AM |
322 | change_protection(vma, start, end, vma->vm_page_prot, |
323 | dirty_accountable, 0); | |
7da4d641 | 324 | |
ab50b8ed HD |
325 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); |
326 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); | |
63bfd738 | 327 | perf_event_mmap(vma); |
1da177e4 LT |
328 | return 0; |
329 | ||
330 | fail: | |
331 | vm_unacct_memory(charged); | |
332 | return error; | |
333 | } | |
334 | ||
6a6160a7 HC |
335 | SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, |
336 | unsigned long, prot) | |
1da177e4 LT |
337 | { |
338 | unsigned long vm_flags, nstart, end, tmp, reqprot; | |
339 | struct vm_area_struct *vma, *prev; | |
340 | int error = -EINVAL; | |
341 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); | |
342 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); | |
343 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ | |
344 | return -EINVAL; | |
345 | ||
346 | if (start & ~PAGE_MASK) | |
347 | return -EINVAL; | |
348 | if (!len) | |
349 | return 0; | |
350 | len = PAGE_ALIGN(len); | |
351 | end = start + len; | |
352 | if (end <= start) | |
353 | return -ENOMEM; | |
b845f313 | 354 | if (!arch_validate_prot(prot)) |
1da177e4 LT |
355 | return -EINVAL; |
356 | ||
357 | reqprot = prot; | |
358 | /* | |
359 | * Does the application expect PROT_READ to imply PROT_EXEC: | |
360 | */ | |
b344e05c | 361 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
1da177e4 LT |
362 | prot |= PROT_EXEC; |
363 | ||
364 | vm_flags = calc_vm_prot_bits(prot); | |
365 | ||
366 | down_write(¤t->mm->mmap_sem); | |
367 | ||
097d5910 | 368 | vma = find_vma(current->mm, start); |
1da177e4 LT |
369 | error = -ENOMEM; |
370 | if (!vma) | |
371 | goto out; | |
097d5910 | 372 | prev = vma->vm_prev; |
1da177e4 LT |
373 | if (unlikely(grows & PROT_GROWSDOWN)) { |
374 | if (vma->vm_start >= end) | |
375 | goto out; | |
376 | start = vma->vm_start; | |
377 | error = -EINVAL; | |
378 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
379 | goto out; | |
7d12efae | 380 | } else { |
1da177e4 LT |
381 | if (vma->vm_start > start) |
382 | goto out; | |
383 | if (unlikely(grows & PROT_GROWSUP)) { | |
384 | end = vma->vm_end; | |
385 | error = -EINVAL; | |
386 | if (!(vma->vm_flags & VM_GROWSUP)) | |
387 | goto out; | |
388 | } | |
389 | } | |
390 | if (start > vma->vm_start) | |
391 | prev = vma; | |
392 | ||
393 | for (nstart = start ; ; ) { | |
394 | unsigned long newflags; | |
395 | ||
7d12efae | 396 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
1da177e4 | 397 | |
7d12efae AM |
398 | newflags = vm_flags; |
399 | newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | |
1da177e4 | 400 | |
7e2cff42 PBG |
401 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
402 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | |
1da177e4 LT |
403 | error = -EACCES; |
404 | goto out; | |
405 | } | |
406 | ||
407 | error = security_file_mprotect(vma, reqprot, prot); | |
408 | if (error) | |
409 | goto out; | |
410 | ||
411 | tmp = vma->vm_end; | |
412 | if (tmp > end) | |
413 | tmp = end; | |
414 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | |
415 | if (error) | |
416 | goto out; | |
417 | nstart = tmp; | |
418 | ||
419 | if (nstart < prev->vm_end) | |
420 | nstart = prev->vm_end; | |
421 | if (nstart >= end) | |
422 | goto out; | |
423 | ||
424 | vma = prev->vm_next; | |
425 | if (!vma || vma->vm_start != nstart) { | |
426 | error = -ENOMEM; | |
427 | goto out; | |
428 | } | |
429 | } | |
430 | out: | |
431 | up_write(¤t->mm->mmap_sem); | |
432 | return error; | |
433 | } |