Commit | Line | Data |
---|---|---|
a3dff71c KC |
1 | /* |
2 | * This is for all the tests related to copy_to_user() and copy_from_user() | |
3 | * hardening. | |
4 | */ | |
5 | #define pr_fmt(fmt) "lkdtm: " fmt | |
6 | ||
7 | #include <linux/kernel.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/vmalloc.h> | |
10 | #include <linux/mman.h> | |
11 | #include <linux/uaccess.h> | |
12 | #include <asm/cacheflush.h> | |
13 | ||
14 | static size_t cache_size = 1024; | |
15 | static struct kmem_cache *bad_cache; | |
16 | ||
17 | static const unsigned char test_text[] = "This is a test.\n"; | |
18 | ||
19 | /* | |
20 | * Instead of adding -Wno-return-local-addr, just pass the stack address | |
21 | * through a function to obfuscate it from the compiler. | |
22 | */ | |
23 | static noinline unsigned char *trick_compiler(unsigned char *stack) | |
24 | { | |
25 | return stack + 0; | |
26 | } | |
27 | ||
28 | static noinline unsigned char *do_usercopy_stack_callee(int value) | |
29 | { | |
30 | unsigned char buf[32]; | |
31 | int i; | |
32 | ||
33 | /* Exercise stack to avoid everything living in registers. */ | |
34 | for (i = 0; i < sizeof(buf); i++) { | |
35 | buf[i] = value & 0xff; | |
36 | } | |
37 | ||
38 | return trick_compiler(buf); | |
39 | } | |
40 | ||
41 | static noinline void do_usercopy_stack(bool to_user, bool bad_frame) | |
42 | { | |
43 | unsigned long user_addr; | |
44 | unsigned char good_stack[32]; | |
45 | unsigned char *bad_stack; | |
46 | int i; | |
47 | ||
48 | /* Exercise stack to avoid everything living in registers. */ | |
49 | for (i = 0; i < sizeof(good_stack); i++) | |
50 | good_stack[i] = test_text[i % sizeof(test_text)]; | |
51 | ||
52 | /* This is a pointer to outside our current stack frame. */ | |
53 | if (bad_frame) { | |
54 | bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); | |
55 | } else { | |
56 | /* Put start address just inside stack. */ | |
57 | bad_stack = task_stack_page(current) + THREAD_SIZE; | |
58 | bad_stack -= sizeof(unsigned long); | |
59 | } | |
60 | ||
61 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
62 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
63 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
64 | if (user_addr >= TASK_SIZE) { | |
65 | pr_warn("Failed to allocate user memory\n"); | |
66 | return; | |
67 | } | |
68 | ||
69 | if (to_user) { | |
70 | pr_info("attempting good copy_to_user of local stack\n"); | |
71 | if (copy_to_user((void __user *)user_addr, good_stack, | |
72 | sizeof(good_stack))) { | |
73 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
74 | goto free_user; | |
75 | } | |
76 | ||
77 | pr_info("attempting bad copy_to_user of distant stack\n"); | |
78 | if (copy_to_user((void __user *)user_addr, bad_stack, | |
79 | sizeof(good_stack))) { | |
80 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
81 | goto free_user; | |
82 | } | |
83 | } else { | |
84 | /* | |
85 | * There isn't a safe way to not be protected by usercopy | |
86 | * if we're going to write to another thread's stack. | |
87 | */ | |
88 | if (!bad_frame) | |
89 | goto free_user; | |
90 | ||
91 | pr_info("attempting good copy_from_user of local stack\n"); | |
92 | if (copy_from_user(good_stack, (void __user *)user_addr, | |
93 | sizeof(good_stack))) { | |
94 | pr_warn("copy_from_user failed unexpectedly?!\n"); | |
95 | goto free_user; | |
96 | } | |
97 | ||
98 | pr_info("attempting bad copy_from_user of distant stack\n"); | |
99 | if (copy_from_user(bad_stack, (void __user *)user_addr, | |
100 | sizeof(good_stack))) { | |
101 | pr_warn("copy_from_user failed, but lacked Oops\n"); | |
102 | goto free_user; | |
103 | } | |
104 | } | |
105 | ||
106 | free_user: | |
107 | vm_munmap(user_addr, PAGE_SIZE); | |
108 | } | |
109 | ||
110 | static void do_usercopy_heap_size(bool to_user) | |
111 | { | |
112 | unsigned long user_addr; | |
113 | unsigned char *one, *two; | |
114 | const size_t size = 1024; | |
115 | ||
116 | one = kmalloc(size, GFP_KERNEL); | |
117 | two = kmalloc(size, GFP_KERNEL); | |
118 | if (!one || !two) { | |
119 | pr_warn("Failed to allocate kernel memory\n"); | |
120 | goto free_kernel; | |
121 | } | |
122 | ||
123 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
124 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
125 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
126 | if (user_addr >= TASK_SIZE) { | |
127 | pr_warn("Failed to allocate user memory\n"); | |
128 | goto free_kernel; | |
129 | } | |
130 | ||
131 | memset(one, 'A', size); | |
132 | memset(two, 'B', size); | |
133 | ||
134 | if (to_user) { | |
135 | pr_info("attempting good copy_to_user of correct size\n"); | |
136 | if (copy_to_user((void __user *)user_addr, one, size)) { | |
137 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
138 | goto free_user; | |
139 | } | |
140 | ||
141 | pr_info("attempting bad copy_to_user of too large size\n"); | |
142 | if (copy_to_user((void __user *)user_addr, one, 2 * size)) { | |
143 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
144 | goto free_user; | |
145 | } | |
146 | } else { | |
147 | pr_info("attempting good copy_from_user of correct size\n"); | |
148 | if (copy_from_user(one, (void __user *)user_addr, size)) { | |
149 | pr_warn("copy_from_user failed unexpectedly?!\n"); | |
150 | goto free_user; | |
151 | } | |
152 | ||
153 | pr_info("attempting bad copy_from_user of too large size\n"); | |
154 | if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { | |
155 | pr_warn("copy_from_user failed, but lacked Oops\n"); | |
156 | goto free_user; | |
157 | } | |
158 | } | |
159 | ||
160 | free_user: | |
161 | vm_munmap(user_addr, PAGE_SIZE); | |
162 | free_kernel: | |
163 | kfree(one); | |
164 | kfree(two); | |
165 | } | |
166 | ||
167 | static void do_usercopy_heap_flag(bool to_user) | |
168 | { | |
169 | unsigned long user_addr; | |
170 | unsigned char *good_buf = NULL; | |
171 | unsigned char *bad_buf = NULL; | |
172 | ||
173 | /* Make sure cache was prepared. */ | |
174 | if (!bad_cache) { | |
175 | pr_warn("Failed to allocate kernel cache\n"); | |
176 | return; | |
177 | } | |
178 | ||
179 | /* | |
180 | * Allocate one buffer from each cache (kmalloc will have the | |
181 | * SLAB_USERCOPY flag already, but "bad_cache" won't). | |
182 | */ | |
183 | good_buf = kmalloc(cache_size, GFP_KERNEL); | |
184 | bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); | |
185 | if (!good_buf || !bad_buf) { | |
186 | pr_warn("Failed to allocate buffers from caches\n"); | |
187 | goto free_alloc; | |
188 | } | |
189 | ||
190 | /* Allocate user memory we'll poke at. */ | |
191 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
192 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
193 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
194 | if (user_addr >= TASK_SIZE) { | |
195 | pr_warn("Failed to allocate user memory\n"); | |
196 | goto free_alloc; | |
197 | } | |
198 | ||
199 | memset(good_buf, 'A', cache_size); | |
200 | memset(bad_buf, 'B', cache_size); | |
201 | ||
202 | if (to_user) { | |
203 | pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); | |
204 | if (copy_to_user((void __user *)user_addr, good_buf, | |
205 | cache_size)) { | |
206 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
207 | goto free_user; | |
208 | } | |
209 | ||
210 | pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); | |
211 | if (copy_to_user((void __user *)user_addr, bad_buf, | |
212 | cache_size)) { | |
213 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
214 | goto free_user; | |
215 | } | |
216 | } else { | |
217 | pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); | |
218 | if (copy_from_user(good_buf, (void __user *)user_addr, | |
219 | cache_size)) { | |
220 | pr_warn("copy_from_user failed unexpectedly?!\n"); | |
221 | goto free_user; | |
222 | } | |
223 | ||
224 | pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); | |
225 | if (copy_from_user(bad_buf, (void __user *)user_addr, | |
226 | cache_size)) { | |
227 | pr_warn("copy_from_user failed, but lacked Oops\n"); | |
228 | goto free_user; | |
229 | } | |
230 | } | |
231 | ||
232 | free_user: | |
233 | vm_munmap(user_addr, PAGE_SIZE); | |
234 | free_alloc: | |
235 | if (bad_buf) | |
236 | kmem_cache_free(bad_cache, bad_buf); | |
237 | kfree(good_buf); | |
238 | } | |
239 | ||
240 | /* Callable tests. */ | |
241 | void lkdtm_USERCOPY_HEAP_SIZE_TO(void) | |
242 | { | |
243 | do_usercopy_heap_size(true); | |
244 | } | |
245 | ||
246 | void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) | |
247 | { | |
248 | do_usercopy_heap_size(false); | |
249 | } | |
250 | ||
251 | void lkdtm_USERCOPY_HEAP_FLAG_TO(void) | |
252 | { | |
253 | do_usercopy_heap_flag(true); | |
254 | } | |
255 | ||
256 | void lkdtm_USERCOPY_HEAP_FLAG_FROM(void) | |
257 | { | |
258 | do_usercopy_heap_flag(false); | |
259 | } | |
260 | ||
261 | void lkdtm_USERCOPY_STACK_FRAME_TO(void) | |
262 | { | |
263 | do_usercopy_stack(true, true); | |
264 | } | |
265 | ||
266 | void lkdtm_USERCOPY_STACK_FRAME_FROM(void) | |
267 | { | |
268 | do_usercopy_stack(false, true); | |
269 | } | |
270 | ||
271 | void lkdtm_USERCOPY_STACK_BEYOND(void) | |
272 | { | |
273 | do_usercopy_stack(true, false); | |
274 | } | |
275 | ||
276 | void lkdtm_USERCOPY_KERNEL(void) | |
277 | { | |
278 | unsigned long user_addr; | |
279 | ||
280 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
281 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
282 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
283 | if (user_addr >= TASK_SIZE) { | |
284 | pr_warn("Failed to allocate user memory\n"); | |
285 | return; | |
286 | } | |
287 | ||
288 | pr_info("attempting good copy_to_user from kernel rodata\n"); | |
289 | if (copy_to_user((void __user *)user_addr, test_text, | |
290 | sizeof(test_text))) { | |
291 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
292 | goto free_user; | |
293 | } | |
294 | ||
295 | pr_info("attempting bad copy_to_user from kernel text\n"); | |
296 | if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { | |
297 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
298 | goto free_user; | |
299 | } | |
300 | ||
301 | free_user: | |
302 | vm_munmap(user_addr, PAGE_SIZE); | |
303 | } | |
304 | ||
305 | void __init lkdtm_usercopy_init(void) | |
306 | { | |
307 | /* Prepare cache that lacks SLAB_USERCOPY flag. */ | |
308 | bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, | |
309 | 0, NULL); | |
310 | } | |
311 | ||
312 | void __exit lkdtm_usercopy_exit(void) | |
313 | { | |
314 | kmem_cache_destroy(bad_cache); | |
315 | } |