7c415ddde948b9dcf3ad11c54ad94c647da278b1
[deliverable/linux.git] / arch / powerpc / mm / subpage-prot.c
1 /*
2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16
17 #include <asm/pgtable.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
20
21 /*
22 * Free all pages allocated for subpage protection maps and pointers.
23 * Also makes sure that the subpage_prot_table structure is
24 * reinitialized for the next user.
25 */
26 void subpage_prot_free(struct mm_struct *mm)
27 {
28 struct subpage_prot_table *spt = &mm->context.spt;
29 unsigned long i, j, addr;
30 u32 **p;
31
32 for (i = 0; i < 4; ++i) {
33 if (spt->low_prot[i]) {
34 free_page((unsigned long)spt->low_prot[i]);
35 spt->low_prot[i] = NULL;
36 }
37 }
38 addr = 0;
39 for (i = 0; i < 2; ++i) {
40 p = spt->protptrs[i];
41 if (!p)
42 continue;
43 spt->protptrs[i] = NULL;
44 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
45 ++j, addr += PAGE_SIZE)
46 if (p[j])
47 free_page((unsigned long)p[j]);
48 free_page((unsigned long)p);
49 }
50 spt->maxaddr = 0;
51 }
52
53 void subpage_prot_init_new_context(struct mm_struct *mm)
54 {
55 struct subpage_prot_table *spt = &mm->context.spt;
56
57 memset(spt, 0, sizeof(*spt));
58 }
59
60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
61 int npages)
62 {
63 pgd_t *pgd;
64 pud_t *pud;
65 pmd_t *pmd;
66 pte_t *pte;
67 spinlock_t *ptl;
68
69 pgd = pgd_offset(mm, addr);
70 if (pgd_none(*pgd))
71 return;
72 pud = pud_offset(pgd, addr);
73 if (pud_none(*pud))
74 return;
75 pmd = pmd_offset(pud, addr);
76 if (pmd_none(*pmd))
77 return;
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
79 arch_enter_lazy_mmu_mode();
80 for (; npages > 0; --npages) {
81 pte_update(mm, addr, pte, 0, 0);
82 addr += PAGE_SIZE;
83 ++pte;
84 }
85 arch_leave_lazy_mmu_mode();
86 pte_unmap_unlock(pte - 1, ptl);
87 }
88
89 /*
90 * Clear the subpage protection map for an address range, allowing
91 * all accesses that are allowed by the pte permissions.
92 */
93 static void subpage_prot_clear(unsigned long addr, unsigned long len)
94 {
95 struct mm_struct *mm = current->mm;
96 struct subpage_prot_table *spt = &mm->context.spt;
97 u32 **spm, *spp;
98 unsigned long i;
99 size_t nw;
100 unsigned long next, limit;
101
102 down_write(&mm->mmap_sem);
103 limit = addr + len;
104 if (limit > spt->maxaddr)
105 limit = spt->maxaddr;
106 for (; addr < limit; addr = next) {
107 next = pmd_addr_end(addr, limit);
108 if (addr < 0x100000000) {
109 spm = spt->low_prot;
110 } else {
111 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
112 if (!spm)
113 continue;
114 }
115 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
116 if (!spp)
117 continue;
118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
119
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
121 nw = PTRS_PER_PTE - i;
122 if (addr + (nw << PAGE_SHIFT) > next)
123 nw = (next - addr) >> PAGE_SHIFT;
124
125 memset(spp, 0, nw * sizeof(u32));
126
127 /* now flush any existing HPTEs for the range */
128 hpte_flush_range(mm, addr, nw);
129 }
130 up_write(&mm->mmap_sem);
131 }
132
133 /*
134 * Copy in a subpage protection map for an address range.
135 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
136 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
137 * 2 or 3 to prevent all accesses.
138 * Note that the normal page protections also apply; the subpage
139 * protection mechanism is an additional constraint, so putting 0
140 * in a 2-bit field won't allow writes to a page that is otherwise
141 * write-protected.
142 */
143 long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
144 {
145 struct mm_struct *mm = current->mm;
146 struct subpage_prot_table *spt = &mm->context.spt;
147 u32 **spm, *spp;
148 unsigned long i;
149 size_t nw;
150 unsigned long next, limit;
151 int err;
152
153 /* Check parameters */
154 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
155 addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
156 return -EINVAL;
157
158 if (is_hugepage_only_range(mm, addr, len))
159 return -EINVAL;
160
161 if (!map) {
162 /* Clear out the protection map for the address range */
163 subpage_prot_clear(addr, len);
164 return 0;
165 }
166
167 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
168 return -EFAULT;
169
170 down_write(&mm->mmap_sem);
171 for (limit = addr + len; addr < limit; addr = next) {
172 next = pmd_addr_end(addr, limit);
173 err = -ENOMEM;
174 if (addr < 0x100000000) {
175 spm = spt->low_prot;
176 } else {
177 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
178 if (!spm) {
179 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
180 if (!spm)
181 goto out;
182 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
183 }
184 }
185 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
186 spp = *spm;
187 if (!spp) {
188 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
189 if (!spp)
190 goto out;
191 *spm = spp;
192 }
193 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
194
195 local_irq_disable();
196 demote_segment_4k(mm, addr);
197 local_irq_enable();
198
199 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
200 nw = PTRS_PER_PTE - i;
201 if (addr + (nw << PAGE_SHIFT) > next)
202 nw = (next - addr) >> PAGE_SHIFT;
203
204 up_write(&mm->mmap_sem);
205 err = -EFAULT;
206 if (__copy_from_user(spp, map, nw * sizeof(u32)))
207 goto out2;
208 map += nw;
209 down_write(&mm->mmap_sem);
210
211 /* now flush any existing HPTEs for the range */
212 hpte_flush_range(mm, addr, nw);
213 }
214 if (limit > spt->maxaddr)
215 spt->maxaddr = limit;
216 err = 0;
217 out:
218 up_write(&mm->mmap_sem);
219 out2:
220 return err;
221 }
This page took 0.035341 seconds and 4 git commands to generate.