2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <adech.fo@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
42 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
43 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
45 static void kasan_poison_shadow(const void *address
, size_t size
, u8 value
)
47 void *shadow_start
, *shadow_end
;
49 shadow_start
= kasan_mem_to_shadow(address
);
50 shadow_end
= kasan_mem_to_shadow(address
+ size
);
52 memset(shadow_start
, value
, shadow_end
- shadow_start
);
55 void kasan_unpoison_shadow(const void *address
, size_t size
)
57 kasan_poison_shadow(address
, size
, 0);
59 if (size
& KASAN_SHADOW_MASK
) {
60 u8
*shadow
= (u8
*)kasan_mem_to_shadow(address
+ size
);
61 *shadow
= size
& KASAN_SHADOW_MASK
;
65 static void __kasan_unpoison_stack(struct task_struct
*task
, void *sp
)
67 void *base
= task_stack_page(task
);
68 size_t size
= sp
- base
;
70 kasan_unpoison_shadow(base
, size
);
73 /* Unpoison the entire stack for a task. */
74 void kasan_unpoison_task_stack(struct task_struct
*task
)
76 __kasan_unpoison_stack(task
, task_stack_page(task
) + THREAD_SIZE
);
79 /* Unpoison the stack for the current task beyond a watermark sp value. */
80 asmlinkage
void kasan_unpoison_remaining_stack(void *sp
)
82 __kasan_unpoison_stack(current
, sp
);
86 * All functions below always inlined so compiler could
87 * perform better optimizations in each of __asan_loadX/__assn_storeX
88 * depending on memory access size X.
91 static __always_inline
bool memory_is_poisoned_1(unsigned long addr
)
93 s8 shadow_value
= *(s8
*)kasan_mem_to_shadow((void *)addr
);
95 if (unlikely(shadow_value
)) {
96 s8 last_accessible_byte
= addr
& KASAN_SHADOW_MASK
;
97 return unlikely(last_accessible_byte
>= shadow_value
);
103 static __always_inline
bool memory_is_poisoned_2(unsigned long addr
)
105 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
107 if (unlikely(*shadow_addr
)) {
108 if (memory_is_poisoned_1(addr
+ 1))
112 * If single shadow byte covers 2-byte access, we don't
113 * need to do anything more. Otherwise, test the first
116 if (likely(((addr
+ 1) & KASAN_SHADOW_MASK
) != 0))
119 return unlikely(*(u8
*)shadow_addr
);
125 static __always_inline
bool memory_is_poisoned_4(unsigned long addr
)
127 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
129 if (unlikely(*shadow_addr
)) {
130 if (memory_is_poisoned_1(addr
+ 3))
134 * If single shadow byte covers 4-byte access, we don't
135 * need to do anything more. Otherwise, test the first
138 if (likely(((addr
+ 3) & KASAN_SHADOW_MASK
) >= 3))
141 return unlikely(*(u8
*)shadow_addr
);
147 static __always_inline
bool memory_is_poisoned_8(unsigned long addr
)
149 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
151 if (unlikely(*shadow_addr
)) {
152 if (memory_is_poisoned_1(addr
+ 7))
156 * If single shadow byte covers 8-byte access, we don't
157 * need to do anything more. Otherwise, test the first
160 if (likely(IS_ALIGNED(addr
, KASAN_SHADOW_SCALE_SIZE
)))
163 return unlikely(*(u8
*)shadow_addr
);
169 static __always_inline
bool memory_is_poisoned_16(unsigned long addr
)
171 u32
*shadow_addr
= (u32
*)kasan_mem_to_shadow((void *)addr
);
173 if (unlikely(*shadow_addr
)) {
174 u16 shadow_first_bytes
= *(u16
*)shadow_addr
;
176 if (unlikely(shadow_first_bytes
))
180 * If two shadow bytes covers 16-byte access, we don't
181 * need to do anything more. Otherwise, test the last
184 if (likely(IS_ALIGNED(addr
, KASAN_SHADOW_SCALE_SIZE
)))
187 return memory_is_poisoned_1(addr
+ 15);
193 static __always_inline
unsigned long bytes_is_zero(const u8
*start
,
197 if (unlikely(*start
))
198 return (unsigned long)start
;
206 static __always_inline
unsigned long memory_is_zero(const void *start
,
211 unsigned int prefix
= (unsigned long)start
% 8;
213 if (end
- start
<= 16)
214 return bytes_is_zero(start
, end
- start
);
218 ret
= bytes_is_zero(start
, prefix
);
224 words
= (end
- start
) / 8;
226 if (unlikely(*(u64
*)start
))
227 return bytes_is_zero(start
, 8);
232 return bytes_is_zero(start
, (end
- start
) % 8);
235 static __always_inline
bool memory_is_poisoned_n(unsigned long addr
,
240 ret
= memory_is_zero(kasan_mem_to_shadow((void *)addr
),
241 kasan_mem_to_shadow((void *)addr
+ size
- 1) + 1);
244 unsigned long last_byte
= addr
+ size
- 1;
245 s8
*last_shadow
= (s8
*)kasan_mem_to_shadow((void *)last_byte
);
247 if (unlikely(ret
!= (unsigned long)last_shadow
||
248 ((long)(last_byte
& KASAN_SHADOW_MASK
) >= *last_shadow
)))
254 static __always_inline
bool memory_is_poisoned(unsigned long addr
, size_t size
)
256 if (__builtin_constant_p(size
)) {
259 return memory_is_poisoned_1(addr
);
261 return memory_is_poisoned_2(addr
);
263 return memory_is_poisoned_4(addr
);
265 return memory_is_poisoned_8(addr
);
267 return memory_is_poisoned_16(addr
);
273 return memory_is_poisoned_n(addr
, size
);
276 static __always_inline
void check_memory_region_inline(unsigned long addr
,
277 size_t size
, bool write
,
278 unsigned long ret_ip
)
280 if (unlikely(size
== 0))
283 if (unlikely((void *)addr
<
284 kasan_shadow_to_mem((void *)KASAN_SHADOW_START
))) {
285 kasan_report(addr
, size
, write
, ret_ip
);
289 if (likely(!memory_is_poisoned(addr
, size
)))
292 kasan_report(addr
, size
, write
, ret_ip
);
295 static void check_memory_region(unsigned long addr
,
296 size_t size
, bool write
,
297 unsigned long ret_ip
)
299 check_memory_region_inline(addr
, size
, write
, ret_ip
);
302 void kasan_check_read(const void *p
, unsigned int size
)
304 check_memory_region((unsigned long)p
, size
, false, _RET_IP_
);
306 EXPORT_SYMBOL(kasan_check_read
);
308 void kasan_check_write(const void *p
, unsigned int size
)
310 check_memory_region((unsigned long)p
, size
, true, _RET_IP_
);
312 EXPORT_SYMBOL(kasan_check_write
);
315 void *memset(void *addr
, int c
, size_t len
)
317 check_memory_region((unsigned long)addr
, len
, true, _RET_IP_
);
319 return __memset(addr
, c
, len
);
323 void *memmove(void *dest
, const void *src
, size_t len
)
325 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
326 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
328 return __memmove(dest
, src
, len
);
332 void *memcpy(void *dest
, const void *src
, size_t len
)
334 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
335 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
337 return __memcpy(dest
, src
, len
);
340 void kasan_alloc_pages(struct page
*page
, unsigned int order
)
342 if (likely(!PageHighMem(page
)))
343 kasan_unpoison_shadow(page_address(page
), PAGE_SIZE
<< order
);
346 void kasan_free_pages(struct page
*page
, unsigned int order
)
348 if (likely(!PageHighMem(page
)))
349 kasan_poison_shadow(page_address(page
),
355 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
356 * For larger allocations larger redzones are used.
358 static size_t optimal_redzone(size_t object_size
)
361 object_size
<= 64 - 16 ? 16 :
362 object_size
<= 128 - 32 ? 32 :
363 object_size
<= 512 - 64 ? 64 :
364 object_size
<= 4096 - 128 ? 128 :
365 object_size
<= (1 << 14) - 256 ? 256 :
366 object_size
<= (1 << 15) - 512 ? 512 :
367 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
371 void kasan_cache_create(struct kmem_cache
*cache
, size_t *size
,
372 unsigned long *flags
)
375 int orig_size
= *size
;
377 /* Add alloc meta. */
378 cache
->kasan_info
.alloc_meta_offset
= *size
;
379 *size
+= sizeof(struct kasan_alloc_meta
);
382 if (cache
->flags
& SLAB_DESTROY_BY_RCU
|| cache
->ctor
||
383 cache
->object_size
< sizeof(struct kasan_free_meta
)) {
384 cache
->kasan_info
.free_meta_offset
= *size
;
385 *size
+= sizeof(struct kasan_free_meta
);
387 redzone_adjust
= optimal_redzone(cache
->object_size
) -
388 (*size
- cache
->object_size
);
390 if (redzone_adjust
> 0)
391 *size
+= redzone_adjust
;
393 *size
= min(KMALLOC_MAX_SIZE
, max(*size
, cache
->object_size
+
394 optimal_redzone(cache
->object_size
)));
397 * If the metadata doesn't fit, don't enable KASAN at all.
399 if (*size
<= cache
->kasan_info
.alloc_meta_offset
||
400 *size
<= cache
->kasan_info
.free_meta_offset
) {
401 cache
->kasan_info
.alloc_meta_offset
= 0;
402 cache
->kasan_info
.free_meta_offset
= 0;
407 *flags
|= SLAB_KASAN
;
410 void kasan_cache_shrink(struct kmem_cache
*cache
)
412 quarantine_remove_cache(cache
);
415 void kasan_cache_destroy(struct kmem_cache
*cache
)
417 quarantine_remove_cache(cache
);
420 size_t kasan_metadata_size(struct kmem_cache
*cache
)
422 return (cache
->kasan_info
.alloc_meta_offset
?
423 sizeof(struct kasan_alloc_meta
) : 0) +
424 (cache
->kasan_info
.free_meta_offset
?
425 sizeof(struct kasan_free_meta
) : 0);
428 void kasan_poison_slab(struct page
*page
)
430 kasan_poison_shadow(page_address(page
),
431 PAGE_SIZE
<< compound_order(page
),
432 KASAN_KMALLOC_REDZONE
);
435 void kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
437 kasan_unpoison_shadow(object
, cache
->object_size
);
440 void kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
442 kasan_poison_shadow(object
,
443 round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
),
444 KASAN_KMALLOC_REDZONE
);
445 if (cache
->flags
& SLAB_KASAN
) {
446 struct kasan_alloc_meta
*alloc_info
=
447 get_alloc_info(cache
, object
);
448 alloc_info
->state
= KASAN_STATE_INIT
;
452 static inline int in_irqentry_text(unsigned long ptr
)
454 return (ptr
>= (unsigned long)&__irqentry_text_start
&&
455 ptr
< (unsigned long)&__irqentry_text_end
) ||
456 (ptr
>= (unsigned long)&__softirqentry_text_start
&&
457 ptr
< (unsigned long)&__softirqentry_text_end
);
460 static inline void filter_irq_stacks(struct stack_trace
*trace
)
464 if (!trace
->nr_entries
)
466 for (i
= 0; i
< trace
->nr_entries
; i
++)
467 if (in_irqentry_text(trace
->entries
[i
])) {
468 /* Include the irqentry function into the stack. */
469 trace
->nr_entries
= i
+ 1;
474 static inline depot_stack_handle_t
save_stack(gfp_t flags
)
476 unsigned long entries
[KASAN_STACK_DEPTH
];
477 struct stack_trace trace
= {
480 .max_entries
= KASAN_STACK_DEPTH
,
484 save_stack_trace(&trace
);
485 filter_irq_stacks(&trace
);
486 if (trace
.nr_entries
!= 0 &&
487 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
490 return depot_save_stack(&trace
, flags
);
493 static inline void set_track(struct kasan_track
*track
, gfp_t flags
)
495 track
->pid
= current
->pid
;
496 track
->stack
= save_stack(flags
);
499 struct kasan_alloc_meta
*get_alloc_info(struct kmem_cache
*cache
,
502 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta
) > 32);
503 return (void *)object
+ cache
->kasan_info
.alloc_meta_offset
;
506 struct kasan_free_meta
*get_free_info(struct kmem_cache
*cache
,
509 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
510 return (void *)object
+ cache
->kasan_info
.free_meta_offset
;
513 void kasan_slab_alloc(struct kmem_cache
*cache
, void *object
, gfp_t flags
)
515 kasan_kmalloc(cache
, object
, cache
->object_size
, flags
);
518 static void kasan_poison_slab_free(struct kmem_cache
*cache
, void *object
)
520 unsigned long size
= cache
->object_size
;
521 unsigned long rounded_up_size
= round_up(size
, KASAN_SHADOW_SCALE_SIZE
);
523 /* RCU slabs could be legally used after free within the RCU period */
524 if (unlikely(cache
->flags
& SLAB_DESTROY_BY_RCU
))
527 kasan_poison_shadow(object
, rounded_up_size
, KASAN_KMALLOC_FREE
);
530 bool kasan_slab_free(struct kmem_cache
*cache
, void *object
)
532 /* RCU slabs could be legally used after free within the RCU period */
533 if (unlikely(cache
->flags
& SLAB_DESTROY_BY_RCU
))
536 if (likely(cache
->flags
& SLAB_KASAN
)) {
537 struct kasan_alloc_meta
*alloc_info
;
538 struct kasan_free_meta
*free_info
;
540 alloc_info
= get_alloc_info(cache
, object
);
541 free_info
= get_free_info(cache
, object
);
543 switch (alloc_info
->state
) {
544 case KASAN_STATE_ALLOC
:
545 alloc_info
->state
= KASAN_STATE_QUARANTINE
;
546 quarantine_put(free_info
, cache
);
547 set_track(&free_info
->track
, GFP_NOWAIT
);
548 kasan_poison_slab_free(cache
, object
);
550 case KASAN_STATE_QUARANTINE
:
551 case KASAN_STATE_FREE
:
552 pr_err("Double free");
562 void kasan_kmalloc(struct kmem_cache
*cache
, const void *object
, size_t size
,
565 unsigned long redzone_start
;
566 unsigned long redzone_end
;
568 if (flags
& __GFP_RECLAIM
)
571 if (unlikely(object
== NULL
))
574 redzone_start
= round_up((unsigned long)(object
+ size
),
575 KASAN_SHADOW_SCALE_SIZE
);
576 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
577 KASAN_SHADOW_SCALE_SIZE
);
579 kasan_unpoison_shadow(object
, size
);
580 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
581 KASAN_KMALLOC_REDZONE
);
582 if (cache
->flags
& SLAB_KASAN
) {
583 struct kasan_alloc_meta
*alloc_info
=
584 get_alloc_info(cache
, object
);
586 alloc_info
->state
= KASAN_STATE_ALLOC
;
587 alloc_info
->alloc_size
= size
;
588 set_track(&alloc_info
->track
, flags
);
591 EXPORT_SYMBOL(kasan_kmalloc
);
593 void kasan_kmalloc_large(const void *ptr
, size_t size
, gfp_t flags
)
596 unsigned long redzone_start
;
597 unsigned long redzone_end
;
599 if (flags
& __GFP_RECLAIM
)
602 if (unlikely(ptr
== NULL
))
605 page
= virt_to_page(ptr
);
606 redzone_start
= round_up((unsigned long)(ptr
+ size
),
607 KASAN_SHADOW_SCALE_SIZE
);
608 redzone_end
= (unsigned long)ptr
+ (PAGE_SIZE
<< compound_order(page
));
610 kasan_unpoison_shadow(ptr
, size
);
611 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
615 void kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
619 if (unlikely(object
== ZERO_SIZE_PTR
))
622 page
= virt_to_head_page(object
);
624 if (unlikely(!PageSlab(page
)))
625 kasan_kmalloc_large(object
, size
, flags
);
627 kasan_kmalloc(page
->slab_cache
, object
, size
, flags
);
630 void kasan_poison_kfree(void *ptr
)
634 page
= virt_to_head_page(ptr
);
636 if (unlikely(!PageSlab(page
)))
637 kasan_poison_shadow(ptr
, PAGE_SIZE
<< compound_order(page
),
640 kasan_poison_slab_free(page
->slab_cache
, ptr
);
643 void kasan_kfree_large(const void *ptr
)
645 struct page
*page
= virt_to_page(ptr
);
647 kasan_poison_shadow(ptr
, PAGE_SIZE
<< compound_order(page
),
651 int kasan_module_alloc(void *addr
, size_t size
)
655 unsigned long shadow_start
;
657 shadow_start
= (unsigned long)kasan_mem_to_shadow(addr
);
658 shadow_size
= round_up(size
>> KASAN_SHADOW_SCALE_SHIFT
,
661 if (WARN_ON(!PAGE_ALIGNED(shadow_start
)))
664 ret
= __vmalloc_node_range(shadow_size
, 1, shadow_start
,
665 shadow_start
+ shadow_size
,
666 GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
,
667 PAGE_KERNEL
, VM_NO_GUARD
, NUMA_NO_NODE
,
668 __builtin_return_address(0));
671 find_vm_area(addr
)->flags
|= VM_KASAN
;
672 kmemleak_ignore(ret
);
679 void kasan_free_shadow(const struct vm_struct
*vm
)
681 if (vm
->flags
& VM_KASAN
)
682 vfree(kasan_mem_to_shadow(vm
->addr
));
685 static void register_global(struct kasan_global
*global
)
687 size_t aligned_size
= round_up(global
->size
, KASAN_SHADOW_SCALE_SIZE
);
689 kasan_unpoison_shadow(global
->beg
, global
->size
);
691 kasan_poison_shadow(global
->beg
+ aligned_size
,
692 global
->size_with_redzone
- aligned_size
,
693 KASAN_GLOBAL_REDZONE
);
696 void __asan_register_globals(struct kasan_global
*globals
, size_t size
)
700 for (i
= 0; i
< size
; i
++)
701 register_global(&globals
[i
]);
703 EXPORT_SYMBOL(__asan_register_globals
);
705 void __asan_unregister_globals(struct kasan_global
*globals
, size_t size
)
708 EXPORT_SYMBOL(__asan_unregister_globals
);
710 #define DEFINE_ASAN_LOAD_STORE(size) \
711 void __asan_load##size(unsigned long addr) \
713 check_memory_region_inline(addr, size, false, _RET_IP_);\
715 EXPORT_SYMBOL(__asan_load##size); \
716 __alias(__asan_load##size) \
717 void __asan_load##size##_noabort(unsigned long); \
718 EXPORT_SYMBOL(__asan_load##size##_noabort); \
719 void __asan_store##size(unsigned long addr) \
721 check_memory_region_inline(addr, size, true, _RET_IP_); \
723 EXPORT_SYMBOL(__asan_store##size); \
724 __alias(__asan_store##size) \
725 void __asan_store##size##_noabort(unsigned long); \
726 EXPORT_SYMBOL(__asan_store##size##_noabort)
728 DEFINE_ASAN_LOAD_STORE(1);
729 DEFINE_ASAN_LOAD_STORE(2);
730 DEFINE_ASAN_LOAD_STORE(4);
731 DEFINE_ASAN_LOAD_STORE(8);
732 DEFINE_ASAN_LOAD_STORE(16);
734 void __asan_loadN(unsigned long addr
, size_t size
)
736 check_memory_region(addr
, size
, false, _RET_IP_
);
738 EXPORT_SYMBOL(__asan_loadN
);
740 __alias(__asan_loadN
)
741 void __asan_loadN_noabort(unsigned long, size_t);
742 EXPORT_SYMBOL(__asan_loadN_noabort
);
744 void __asan_storeN(unsigned long addr
, size_t size
)
746 check_memory_region(addr
, size
, true, _RET_IP_
);
748 EXPORT_SYMBOL(__asan_storeN
);
750 __alias(__asan_storeN
)
751 void __asan_storeN_noabort(unsigned long, size_t);
752 EXPORT_SYMBOL(__asan_storeN_noabort
);
754 /* to shut up compiler complaints */
755 void __asan_handle_no_return(void) {}
756 EXPORT_SYMBOL(__asan_handle_no_return
);
758 #ifdef CONFIG_MEMORY_HOTPLUG
759 static int kasan_mem_notifier(struct notifier_block
*nb
,
760 unsigned long action
, void *data
)
762 return (action
== MEM_GOING_ONLINE
) ? NOTIFY_BAD
: NOTIFY_OK
;
765 static int __init
kasan_memhotplug_init(void)
767 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
768 pr_info("Memory hot-add will be disabled\n");
770 hotplug_memory_notifier(kasan_mem_notifier
, 0);
775 module_init(kasan_memhotplug_init
);