1 #include <linux/bootmem.h>
2 #include <linux/kasan.h>
3 #include <linux/kdebug.h>
5 #include <linux/sched.h>
6 #include <linux/vmalloc.h>
8 #include <asm/tlbflush.h>
9 #include <asm/sections.h>
11 extern pgd_t early_level4_pgt
[PTRS_PER_PGD
];
12 extern struct range pfn_mapped
[E820_X_MAX
];
14 extern unsigned char kasan_zero_page
[PAGE_SIZE
];
16 static int __init
map_range(struct range
*range
)
21 start
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->start
));
22 end
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->end
));
25 * end + 1 here is intentional. We check several shadow bytes in advance
26 * to slightly speed up fastpath. In some rare cases we could cross
27 * boundary of mapped shadow, so we just map some more here.
29 return vmemmap_populate(start
, end
+ 1, NUMA_NO_NODE
);
32 static void __init
clear_pgds(unsigned long start
,
35 for (; start
< end
; start
+= PGDIR_SIZE
)
36 pgd_clear(pgd_offset_k(start
));
39 void __init
kasan_map_early_shadow(pgd_t
*pgd
)
42 unsigned long start
= KASAN_SHADOW_START
;
43 unsigned long end
= KASAN_SHADOW_END
;
45 for (i
= pgd_index(start
); start
< end
; i
++) {
46 pgd
[i
] = __pgd(__pa_nodebug(kasan_zero_pud
)
52 static int __init
zero_pte_populate(pmd_t
*pmd
, unsigned long addr
,
55 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
57 while (addr
+ PAGE_SIZE
<= end
) {
58 WARN_ON(!pte_none(*pte
));
59 set_pte(pte
, __pte(__pa_nodebug(kasan_zero_page
)
62 pte
= pte_offset_kernel(pmd
, addr
);
67 static int __init
zero_pmd_populate(pud_t
*pud
, unsigned long addr
,
71 pmd_t
*pmd
= pmd_offset(pud
, addr
);
73 while (IS_ALIGNED(addr
, PMD_SIZE
) && addr
+ PMD_SIZE
<= end
) {
74 WARN_ON(!pmd_none(*pmd
));
75 set_pmd(pmd
, __pmd(__pa_nodebug(kasan_zero_pte
)
78 pmd
= pmd_offset(pud
, addr
);
82 void *p
= vmemmap_alloc_block(PAGE_SIZE
, NUMA_NO_NODE
);
85 set_pmd(pmd
, __pmd(__pa_nodebug(p
) | _KERNPG_TABLE
));
87 ret
= zero_pte_populate(pmd
, addr
, end
);
93 static int __init
zero_pud_populate(pgd_t
*pgd
, unsigned long addr
,
97 pud_t
*pud
= pud_offset(pgd
, addr
);
99 while (IS_ALIGNED(addr
, PUD_SIZE
) && addr
+ PUD_SIZE
<= end
) {
100 WARN_ON(!pud_none(*pud
));
101 set_pud(pud
, __pud(__pa_nodebug(kasan_zero_pmd
)
102 | __PAGE_KERNEL_RO
));
104 pud
= pud_offset(pgd
, addr
);
108 if (pud_none(*pud
)) {
109 void *p
= vmemmap_alloc_block(PAGE_SIZE
, NUMA_NO_NODE
);
112 set_pud(pud
, __pud(__pa_nodebug(p
) | _KERNPG_TABLE
));
114 ret
= zero_pmd_populate(pud
, addr
, end
);
119 static int __init
zero_pgd_populate(unsigned long addr
, unsigned long end
)
122 pgd_t
*pgd
= pgd_offset_k(addr
);
124 while (IS_ALIGNED(addr
, PGDIR_SIZE
) && addr
+ PGDIR_SIZE
<= end
) {
125 WARN_ON(!pgd_none(*pgd
));
126 set_pgd(pgd
, __pgd(__pa_nodebug(kasan_zero_pud
)
127 | __PAGE_KERNEL_RO
));
129 pgd
= pgd_offset_k(addr
);
133 if (pgd_none(*pgd
)) {
134 void *p
= vmemmap_alloc_block(PAGE_SIZE
, NUMA_NO_NODE
);
137 set_pgd(pgd
, __pgd(__pa_nodebug(p
) | _KERNPG_TABLE
));
139 ret
= zero_pud_populate(pgd
, addr
, end
);
145 static void __init
populate_zero_shadow(const void *start
, const void *end
)
147 if (zero_pgd_populate((unsigned long)start
, (unsigned long)end
))
148 panic("kasan: unable to map zero shadow!");
152 #ifdef CONFIG_KASAN_INLINE
153 static int kasan_die_handler(struct notifier_block
*self
,
157 if (val
== DIE_GPF
) {
158 pr_emerg("CONFIG_KASAN_INLINE enabled");
159 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
164 static struct notifier_block kasan_die_notifier
= {
165 .notifier_call
= kasan_die_handler
,
169 void __init
kasan_init(void)
173 #ifdef CONFIG_KASAN_INLINE
174 register_die_notifier(&kasan_die_notifier
);
177 memcpy(early_level4_pgt
, init_level4_pgt
, sizeof(early_level4_pgt
));
178 load_cr3(early_level4_pgt
);
180 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
182 populate_zero_shadow((void *)KASAN_SHADOW_START
,
183 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
185 for (i
= 0; i
< E820_X_MAX
; i
++) {
186 if (pfn_mapped
[i
].end
== 0)
189 if (map_range(&pfn_mapped
[i
]))
190 panic("kasan: unable to allocate shadow!");
192 populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET
+ MAXMEM
),
193 kasan_mem_to_shadow((void *)__START_KERNEL_map
));
195 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext
),
196 (unsigned long)kasan_mem_to_shadow(_end
),
199 populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END
),
200 (void *)KASAN_SHADOW_END
);
202 memset(kasan_zero_page
, 0, PAGE_SIZE
);
204 load_cr3(init_level4_pgt
);
205 init_task
.kasan_depth
= 0;