Commit | Line | Data |
---|---|---|
a3dff71c KC |
1 | /* |
2 | * This is for all the tests related to copy_to_user() and copy_from_user() | |
3 | * hardening. | |
4 | */ | |
6d2e91a6 | 5 | #include "lkdtm.h" |
a3dff71c KC |
6 | #include <linux/slab.h> |
7 | #include <linux/vmalloc.h> | |
8 | #include <linux/mman.h> | |
9 | #include <linux/uaccess.h> | |
10 | #include <asm/cacheflush.h> | |
11 | ||
12 | static size_t cache_size = 1024; | |
13 | static struct kmem_cache *bad_cache; | |
14 | ||
15 | static const unsigned char test_text[] = "This is a test.\n"; | |
16 | ||
17 | /* | |
18 | * Instead of adding -Wno-return-local-addr, just pass the stack address | |
19 | * through a function to obfuscate it from the compiler. | |
20 | */ | |
21 | static noinline unsigned char *trick_compiler(unsigned char *stack) | |
22 | { | |
23 | return stack + 0; | |
24 | } | |
25 | ||
26 | static noinline unsigned char *do_usercopy_stack_callee(int value) | |
27 | { | |
28 | unsigned char buf[32]; | |
29 | int i; | |
30 | ||
31 | /* Exercise stack to avoid everything living in registers. */ | |
32 | for (i = 0; i < sizeof(buf); i++) { | |
33 | buf[i] = value & 0xff; | |
34 | } | |
35 | ||
36 | return trick_compiler(buf); | |
37 | } | |
38 | ||
39 | static noinline void do_usercopy_stack(bool to_user, bool bad_frame) | |
40 | { | |
41 | unsigned long user_addr; | |
42 | unsigned char good_stack[32]; | |
43 | unsigned char *bad_stack; | |
44 | int i; | |
45 | ||
46 | /* Exercise stack to avoid everything living in registers. */ | |
47 | for (i = 0; i < sizeof(good_stack); i++) | |
48 | good_stack[i] = test_text[i % sizeof(test_text)]; | |
49 | ||
50 | /* This is a pointer to outside our current stack frame. */ | |
51 | if (bad_frame) { | |
52 | bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); | |
53 | } else { | |
54 | /* Put start address just inside stack. */ | |
55 | bad_stack = task_stack_page(current) + THREAD_SIZE; | |
56 | bad_stack -= sizeof(unsigned long); | |
57 | } | |
58 | ||
59 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
60 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
61 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
62 | if (user_addr >= TASK_SIZE) { | |
63 | pr_warn("Failed to allocate user memory\n"); | |
64 | return; | |
65 | } | |
66 | ||
67 | if (to_user) { | |
68 | pr_info("attempting good copy_to_user of local stack\n"); | |
69 | if (copy_to_user((void __user *)user_addr, good_stack, | |
70 | sizeof(good_stack))) { | |
71 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
72 | goto free_user; | |
73 | } | |
74 | ||
75 | pr_info("attempting bad copy_to_user of distant stack\n"); | |
76 | if (copy_to_user((void __user *)user_addr, bad_stack, | |
77 | sizeof(good_stack))) { | |
78 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
79 | goto free_user; | |
80 | } | |
81 | } else { | |
82 | /* | |
83 | * There isn't a safe way to not be protected by usercopy | |
84 | * if we're going to write to another thread's stack. | |
85 | */ | |
86 | if (!bad_frame) | |
87 | goto free_user; | |
88 | ||
89 | pr_info("attempting good copy_from_user of local stack\n"); | |
90 | if (copy_from_user(good_stack, (void __user *)user_addr, | |
91 | sizeof(good_stack))) { | |
92 | pr_warn("copy_from_user failed unexpectedly?!\n"); | |
93 | goto free_user; | |
94 | } | |
95 | ||
96 | pr_info("attempting bad copy_from_user of distant stack\n"); | |
97 | if (copy_from_user(bad_stack, (void __user *)user_addr, | |
98 | sizeof(good_stack))) { | |
99 | pr_warn("copy_from_user failed, but lacked Oops\n"); | |
100 | goto free_user; | |
101 | } | |
102 | } | |
103 | ||
104 | free_user: | |
105 | vm_munmap(user_addr, PAGE_SIZE); | |
106 | } | |
107 | ||
108 | static void do_usercopy_heap_size(bool to_user) | |
109 | { | |
110 | unsigned long user_addr; | |
111 | unsigned char *one, *two; | |
112 | const size_t size = 1024; | |
113 | ||
114 | one = kmalloc(size, GFP_KERNEL); | |
115 | two = kmalloc(size, GFP_KERNEL); | |
116 | if (!one || !two) { | |
117 | pr_warn("Failed to allocate kernel memory\n"); | |
118 | goto free_kernel; | |
119 | } | |
120 | ||
121 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
122 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
123 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
124 | if (user_addr >= TASK_SIZE) { | |
125 | pr_warn("Failed to allocate user memory\n"); | |
126 | goto free_kernel; | |
127 | } | |
128 | ||
129 | memset(one, 'A', size); | |
130 | memset(two, 'B', size); | |
131 | ||
132 | if (to_user) { | |
133 | pr_info("attempting good copy_to_user of correct size\n"); | |
134 | if (copy_to_user((void __user *)user_addr, one, size)) { | |
135 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
136 | goto free_user; | |
137 | } | |
138 | ||
139 | pr_info("attempting bad copy_to_user of too large size\n"); | |
140 | if (copy_to_user((void __user *)user_addr, one, 2 * size)) { | |
141 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
142 | goto free_user; | |
143 | } | |
144 | } else { | |
145 | pr_info("attempting good copy_from_user of correct size\n"); | |
146 | if (copy_from_user(one, (void __user *)user_addr, size)) { | |
147 | pr_warn("copy_from_user failed unexpectedly?!\n"); | |
148 | goto free_user; | |
149 | } | |
150 | ||
151 | pr_info("attempting bad copy_from_user of too large size\n"); | |
152 | if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { | |
153 | pr_warn("copy_from_user failed, but lacked Oops\n"); | |
154 | goto free_user; | |
155 | } | |
156 | } | |
157 | ||
158 | free_user: | |
159 | vm_munmap(user_addr, PAGE_SIZE); | |
160 | free_kernel: | |
161 | kfree(one); | |
162 | kfree(two); | |
163 | } | |
164 | ||
165 | static void do_usercopy_heap_flag(bool to_user) | |
166 | { | |
167 | unsigned long user_addr; | |
168 | unsigned char *good_buf = NULL; | |
169 | unsigned char *bad_buf = NULL; | |
170 | ||
171 | /* Make sure cache was prepared. */ | |
172 | if (!bad_cache) { | |
173 | pr_warn("Failed to allocate kernel cache\n"); | |
174 | return; | |
175 | } | |
176 | ||
177 | /* | |
178 | * Allocate one buffer from each cache (kmalloc will have the | |
179 | * SLAB_USERCOPY flag already, but "bad_cache" won't). | |
180 | */ | |
181 | good_buf = kmalloc(cache_size, GFP_KERNEL); | |
182 | bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); | |
183 | if (!good_buf || !bad_buf) { | |
184 | pr_warn("Failed to allocate buffers from caches\n"); | |
185 | goto free_alloc; | |
186 | } | |
187 | ||
188 | /* Allocate user memory we'll poke at. */ | |
189 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
190 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
191 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
192 | if (user_addr >= TASK_SIZE) { | |
193 | pr_warn("Failed to allocate user memory\n"); | |
194 | goto free_alloc; | |
195 | } | |
196 | ||
197 | memset(good_buf, 'A', cache_size); | |
198 | memset(bad_buf, 'B', cache_size); | |
199 | ||
200 | if (to_user) { | |
201 | pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); | |
202 | if (copy_to_user((void __user *)user_addr, good_buf, | |
203 | cache_size)) { | |
204 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
205 | goto free_user; | |
206 | } | |
207 | ||
208 | pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); | |
209 | if (copy_to_user((void __user *)user_addr, bad_buf, | |
210 | cache_size)) { | |
211 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
212 | goto free_user; | |
213 | } | |
214 | } else { | |
215 | pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); | |
216 | if (copy_from_user(good_buf, (void __user *)user_addr, | |
217 | cache_size)) { | |
218 | pr_warn("copy_from_user failed unexpectedly?!\n"); | |
219 | goto free_user; | |
220 | } | |
221 | ||
222 | pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); | |
223 | if (copy_from_user(bad_buf, (void __user *)user_addr, | |
224 | cache_size)) { | |
225 | pr_warn("copy_from_user failed, but lacked Oops\n"); | |
226 | goto free_user; | |
227 | } | |
228 | } | |
229 | ||
230 | free_user: | |
231 | vm_munmap(user_addr, PAGE_SIZE); | |
232 | free_alloc: | |
233 | if (bad_buf) | |
234 | kmem_cache_free(bad_cache, bad_buf); | |
235 | kfree(good_buf); | |
236 | } | |
237 | ||
238 | /* Callable tests. */ | |
239 | void lkdtm_USERCOPY_HEAP_SIZE_TO(void) | |
240 | { | |
241 | do_usercopy_heap_size(true); | |
242 | } | |
243 | ||
244 | void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) | |
245 | { | |
246 | do_usercopy_heap_size(false); | |
247 | } | |
248 | ||
249 | void lkdtm_USERCOPY_HEAP_FLAG_TO(void) | |
250 | { | |
251 | do_usercopy_heap_flag(true); | |
252 | } | |
253 | ||
254 | void lkdtm_USERCOPY_HEAP_FLAG_FROM(void) | |
255 | { | |
256 | do_usercopy_heap_flag(false); | |
257 | } | |
258 | ||
259 | void lkdtm_USERCOPY_STACK_FRAME_TO(void) | |
260 | { | |
261 | do_usercopy_stack(true, true); | |
262 | } | |
263 | ||
264 | void lkdtm_USERCOPY_STACK_FRAME_FROM(void) | |
265 | { | |
266 | do_usercopy_stack(false, true); | |
267 | } | |
268 | ||
269 | void lkdtm_USERCOPY_STACK_BEYOND(void) | |
270 | { | |
271 | do_usercopy_stack(true, false); | |
272 | } | |
273 | ||
274 | void lkdtm_USERCOPY_KERNEL(void) | |
275 | { | |
276 | unsigned long user_addr; | |
277 | ||
278 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE, | |
279 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
280 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | |
281 | if (user_addr >= TASK_SIZE) { | |
282 | pr_warn("Failed to allocate user memory\n"); | |
283 | return; | |
284 | } | |
285 | ||
286 | pr_info("attempting good copy_to_user from kernel rodata\n"); | |
287 | if (copy_to_user((void __user *)user_addr, test_text, | |
288 | sizeof(test_text))) { | |
289 | pr_warn("copy_to_user failed unexpectedly?!\n"); | |
290 | goto free_user; | |
291 | } | |
292 | ||
293 | pr_info("attempting bad copy_to_user from kernel text\n"); | |
294 | if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { | |
295 | pr_warn("copy_to_user failed, but lacked Oops\n"); | |
296 | goto free_user; | |
297 | } | |
298 | ||
299 | free_user: | |
300 | vm_munmap(user_addr, PAGE_SIZE); | |
301 | } | |
302 | ||
303 | void __init lkdtm_usercopy_init(void) | |
304 | { | |
305 | /* Prepare cache that lacks SLAB_USERCOPY flag. */ | |
306 | bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, | |
307 | 0, NULL); | |
308 | } | |
309 | ||
310 | void __exit lkdtm_usercopy_exit(void) | |
311 | { | |
312 | kmem_cache_destroy(bad_cache); | |
313 | } |