Merge tag 'ntb-4.3' of git://github.com/jonmason/ntb
[deliverable/linux.git] / mm / kasan / kasan.c
1 /*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18
19 #include <linux/export.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/memblock.h>
23 #include <linux/memory.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/printk.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/stacktrace.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/kasan.h>
34
35 #include "kasan.h"
36 #include "../slab.h"
37
38 /*
39 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
40 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
41 */
42 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
43 {
44 void *shadow_start, *shadow_end;
45
46 shadow_start = kasan_mem_to_shadow(address);
47 shadow_end = kasan_mem_to_shadow(address + size);
48
49 memset(shadow_start, value, shadow_end - shadow_start);
50 }
51
52 void kasan_unpoison_shadow(const void *address, size_t size)
53 {
54 kasan_poison_shadow(address, size, 0);
55
56 if (size & KASAN_SHADOW_MASK) {
57 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
58 *shadow = size & KASAN_SHADOW_MASK;
59 }
60 }
61
62
63 /*
64 * All functions below always inlined so compiler could
65 * perform better optimizations in each of __asan_loadX/__assn_storeX
66 * depending on memory access size X.
67 */
68
69 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
70 {
71 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
72
73 if (unlikely(shadow_value)) {
74 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
75 return unlikely(last_accessible_byte >= shadow_value);
76 }
77
78 return false;
79 }
80
81 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
82 {
83 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
84
85 if (unlikely(*shadow_addr)) {
86 if (memory_is_poisoned_1(addr + 1))
87 return true;
88
89 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
90 return false;
91
92 return unlikely(*(u8 *)shadow_addr);
93 }
94
95 return false;
96 }
97
98 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
99 {
100 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
101
102 if (unlikely(*shadow_addr)) {
103 if (memory_is_poisoned_1(addr + 3))
104 return true;
105
106 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
107 return false;
108
109 return unlikely(*(u8 *)shadow_addr);
110 }
111
112 return false;
113 }
114
115 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
116 {
117 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
118
119 if (unlikely(*shadow_addr)) {
120 if (memory_is_poisoned_1(addr + 7))
121 return true;
122
123 if (likely(((addr + 7) & KASAN_SHADOW_MASK) >= 7))
124 return false;
125
126 return unlikely(*(u8 *)shadow_addr);
127 }
128
129 return false;
130 }
131
132 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
133 {
134 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
135
136 if (unlikely(*shadow_addr)) {
137 u16 shadow_first_bytes = *(u16 *)shadow_addr;
138 s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
139
140 if (unlikely(shadow_first_bytes))
141 return true;
142
143 if (likely(!last_byte))
144 return false;
145
146 return memory_is_poisoned_1(addr + 15);
147 }
148
149 return false;
150 }
151
152 static __always_inline unsigned long bytes_is_zero(const u8 *start,
153 size_t size)
154 {
155 while (size) {
156 if (unlikely(*start))
157 return (unsigned long)start;
158 start++;
159 size--;
160 }
161
162 return 0;
163 }
164
165 static __always_inline unsigned long memory_is_zero(const void *start,
166 const void *end)
167 {
168 unsigned int words;
169 unsigned long ret;
170 unsigned int prefix = (unsigned long)start % 8;
171
172 if (end - start <= 16)
173 return bytes_is_zero(start, end - start);
174
175 if (prefix) {
176 prefix = 8 - prefix;
177 ret = bytes_is_zero(start, prefix);
178 if (unlikely(ret))
179 return ret;
180 start += prefix;
181 }
182
183 words = (end - start) / 8;
184 while (words) {
185 if (unlikely(*(u64 *)start))
186 return bytes_is_zero(start, 8);
187 start += 8;
188 words--;
189 }
190
191 return bytes_is_zero(start, (end - start) % 8);
192 }
193
194 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
195 size_t size)
196 {
197 unsigned long ret;
198
199 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
200 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
201
202 if (unlikely(ret)) {
203 unsigned long last_byte = addr + size - 1;
204 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
205
206 if (unlikely(ret != (unsigned long)last_shadow ||
207 ((last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
208 return true;
209 }
210 return false;
211 }
212
213 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
214 {
215 if (__builtin_constant_p(size)) {
216 switch (size) {
217 case 1:
218 return memory_is_poisoned_1(addr);
219 case 2:
220 return memory_is_poisoned_2(addr);
221 case 4:
222 return memory_is_poisoned_4(addr);
223 case 8:
224 return memory_is_poisoned_8(addr);
225 case 16:
226 return memory_is_poisoned_16(addr);
227 default:
228 BUILD_BUG();
229 }
230 }
231
232 return memory_is_poisoned_n(addr, size);
233 }
234
235
236 static __always_inline void check_memory_region(unsigned long addr,
237 size_t size, bool write)
238 {
239 struct kasan_access_info info;
240
241 if (unlikely(size == 0))
242 return;
243
244 if (unlikely((void *)addr <
245 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
246 info.access_addr = (void *)addr;
247 info.access_size = size;
248 info.is_write = write;
249 info.ip = _RET_IP_;
250 kasan_report_user_access(&info);
251 return;
252 }
253
254 if (likely(!memory_is_poisoned(addr, size)))
255 return;
256
257 kasan_report(addr, size, write, _RET_IP_);
258 }
259
260 void __asan_loadN(unsigned long addr, size_t size);
261 void __asan_storeN(unsigned long addr, size_t size);
262
263 #undef memset
264 void *memset(void *addr, int c, size_t len)
265 {
266 __asan_storeN((unsigned long)addr, len);
267
268 return __memset(addr, c, len);
269 }
270
271 #undef memmove
272 void *memmove(void *dest, const void *src, size_t len)
273 {
274 __asan_loadN((unsigned long)src, len);
275 __asan_storeN((unsigned long)dest, len);
276
277 return __memmove(dest, src, len);
278 }
279
280 #undef memcpy
281 void *memcpy(void *dest, const void *src, size_t len)
282 {
283 __asan_loadN((unsigned long)src, len);
284 __asan_storeN((unsigned long)dest, len);
285
286 return __memcpy(dest, src, len);
287 }
288
289 void kasan_alloc_pages(struct page *page, unsigned int order)
290 {
291 if (likely(!PageHighMem(page)))
292 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
293 }
294
295 void kasan_free_pages(struct page *page, unsigned int order)
296 {
297 if (likely(!PageHighMem(page)))
298 kasan_poison_shadow(page_address(page),
299 PAGE_SIZE << order,
300 KASAN_FREE_PAGE);
301 }
302
303 void kasan_poison_slab(struct page *page)
304 {
305 kasan_poison_shadow(page_address(page),
306 PAGE_SIZE << compound_order(page),
307 KASAN_KMALLOC_REDZONE);
308 }
309
310 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
311 {
312 kasan_unpoison_shadow(object, cache->object_size);
313 }
314
315 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
316 {
317 kasan_poison_shadow(object,
318 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
319 KASAN_KMALLOC_REDZONE);
320 }
321
322 void kasan_slab_alloc(struct kmem_cache *cache, void *object)
323 {
324 kasan_kmalloc(cache, object, cache->object_size);
325 }
326
327 void kasan_slab_free(struct kmem_cache *cache, void *object)
328 {
329 unsigned long size = cache->object_size;
330 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
331
332 /* RCU slabs could be legally used after free within the RCU period */
333 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
334 return;
335
336 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
337 }
338
339 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
340 {
341 unsigned long redzone_start;
342 unsigned long redzone_end;
343
344 if (unlikely(object == NULL))
345 return;
346
347 redzone_start = round_up((unsigned long)(object + size),
348 KASAN_SHADOW_SCALE_SIZE);
349 redzone_end = round_up((unsigned long)object + cache->object_size,
350 KASAN_SHADOW_SCALE_SIZE);
351
352 kasan_unpoison_shadow(object, size);
353 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
354 KASAN_KMALLOC_REDZONE);
355 }
356 EXPORT_SYMBOL(kasan_kmalloc);
357
358 void kasan_kmalloc_large(const void *ptr, size_t size)
359 {
360 struct page *page;
361 unsigned long redzone_start;
362 unsigned long redzone_end;
363
364 if (unlikely(ptr == NULL))
365 return;
366
367 page = virt_to_page(ptr);
368 redzone_start = round_up((unsigned long)(ptr + size),
369 KASAN_SHADOW_SCALE_SIZE);
370 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
371
372 kasan_unpoison_shadow(ptr, size);
373 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
374 KASAN_PAGE_REDZONE);
375 }
376
377 void kasan_krealloc(const void *object, size_t size)
378 {
379 struct page *page;
380
381 if (unlikely(object == ZERO_SIZE_PTR))
382 return;
383
384 page = virt_to_head_page(object);
385
386 if (unlikely(!PageSlab(page)))
387 kasan_kmalloc_large(object, size);
388 else
389 kasan_kmalloc(page->slab_cache, object, size);
390 }
391
392 void kasan_kfree(void *ptr)
393 {
394 struct page *page;
395
396 page = virt_to_head_page(ptr);
397
398 if (unlikely(!PageSlab(page)))
399 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
400 KASAN_FREE_PAGE);
401 else
402 kasan_slab_free(page->slab_cache, ptr);
403 }
404
405 void kasan_kfree_large(const void *ptr)
406 {
407 struct page *page = virt_to_page(ptr);
408
409 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
410 KASAN_FREE_PAGE);
411 }
412
413 int kasan_module_alloc(void *addr, size_t size)
414 {
415 void *ret;
416 size_t shadow_size;
417 unsigned long shadow_start;
418
419 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
420 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
421 PAGE_SIZE);
422
423 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
424 return -EINVAL;
425
426 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
427 shadow_start + shadow_size,
428 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
429 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
430 __builtin_return_address(0));
431
432 if (ret) {
433 find_vm_area(addr)->flags |= VM_KASAN;
434 return 0;
435 }
436
437 return -ENOMEM;
438 }
439
440 void kasan_free_shadow(const struct vm_struct *vm)
441 {
442 if (vm->flags & VM_KASAN)
443 vfree(kasan_mem_to_shadow(vm->addr));
444 }
445
446 static void register_global(struct kasan_global *global)
447 {
448 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
449
450 kasan_unpoison_shadow(global->beg, global->size);
451
452 kasan_poison_shadow(global->beg + aligned_size,
453 global->size_with_redzone - aligned_size,
454 KASAN_GLOBAL_REDZONE);
455 }
456
457 void __asan_register_globals(struct kasan_global *globals, size_t size)
458 {
459 int i;
460
461 for (i = 0; i < size; i++)
462 register_global(&globals[i]);
463 }
464 EXPORT_SYMBOL(__asan_register_globals);
465
466 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
467 {
468 }
469 EXPORT_SYMBOL(__asan_unregister_globals);
470
471 #define DEFINE_ASAN_LOAD_STORE(size) \
472 void __asan_load##size(unsigned long addr) \
473 { \
474 check_memory_region(addr, size, false); \
475 } \
476 EXPORT_SYMBOL(__asan_load##size); \
477 __alias(__asan_load##size) \
478 void __asan_load##size##_noabort(unsigned long); \
479 EXPORT_SYMBOL(__asan_load##size##_noabort); \
480 void __asan_store##size(unsigned long addr) \
481 { \
482 check_memory_region(addr, size, true); \
483 } \
484 EXPORT_SYMBOL(__asan_store##size); \
485 __alias(__asan_store##size) \
486 void __asan_store##size##_noabort(unsigned long); \
487 EXPORT_SYMBOL(__asan_store##size##_noabort)
488
489 DEFINE_ASAN_LOAD_STORE(1);
490 DEFINE_ASAN_LOAD_STORE(2);
491 DEFINE_ASAN_LOAD_STORE(4);
492 DEFINE_ASAN_LOAD_STORE(8);
493 DEFINE_ASAN_LOAD_STORE(16);
494
495 void __asan_loadN(unsigned long addr, size_t size)
496 {
497 check_memory_region(addr, size, false);
498 }
499 EXPORT_SYMBOL(__asan_loadN);
500
501 __alias(__asan_loadN)
502 void __asan_loadN_noabort(unsigned long, size_t);
503 EXPORT_SYMBOL(__asan_loadN_noabort);
504
505 void __asan_storeN(unsigned long addr, size_t size)
506 {
507 check_memory_region(addr, size, true);
508 }
509 EXPORT_SYMBOL(__asan_storeN);
510
511 __alias(__asan_storeN)
512 void __asan_storeN_noabort(unsigned long, size_t);
513 EXPORT_SYMBOL(__asan_storeN_noabort);
514
515 /* to shut up compiler complaints */
516 void __asan_handle_no_return(void) {}
517 EXPORT_SYMBOL(__asan_handle_no_return);
518
519 #ifdef CONFIG_MEMORY_HOTPLUG
520 static int kasan_mem_notifier(struct notifier_block *nb,
521 unsigned long action, void *data)
522 {
523 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
524 }
525
526 static int __init kasan_memhotplug_init(void)
527 {
528 pr_err("WARNING: KASan doesn't support memory hot-add\n");
529 pr_err("Memory hot-add will be disabled\n");
530
531 hotplug_memory_notifier(kasan_mem_notifier, 0);
532
533 return 0;
534 }
535
536 module_init(kasan_memhotplug_init);
537 #endif
This page took 0.097103 seconds and 5 git commands to generate.