Commit | Line | Data |
---|---|---|
3a94707d KC |
1 | /* |
2 | * This code is used on x86_64 to create page table identity mappings on | |
3 | * demand by building up a new set of page tables (or appending to the | |
4 | * existing ones), and then switching over to them when ready. | |
11fdf97a KC |
5 | * |
6 | * Copyright (C) 2015-2016 Yinghai Lu | |
7 | * Copyright (C) 2016 Kees Cook | |
3a94707d KC |
8 | */ |
9 | ||
10 | /* | |
11 | * Since we're dealing with identity mappings, physical and virtual | |
12 | * addresses are the same, so override these defines which are ultimately | |
13 | * used by the headers in misc.h. | |
14 | */ | |
15 | #define __pa(x) ((unsigned long)(x)) | |
16 | #define __va(x) ((void *)((unsigned long)(x))) | |
17 | ||
18 | #include "misc.h" | |
19 | ||
20 | /* These actually do the work of building the kernel identity maps. */ | |
21 | #include <asm/init.h> | |
22 | #include <asm/pgtable.h> | |
021182e5 TG |
23 | /* Use the static base for this part of the boot process */ |
24 | #undef __PAGE_OFFSET | |
25 | #define __PAGE_OFFSET __PAGE_OFFSET_BASE | |
3a94707d KC |
26 | #include "../../mm/ident_map.c" |
27 | ||
28 | /* Used by pgtable.h asm code to force instruction serialization. */ | |
29 | unsigned long __force_order; | |
30 | ||
31 | /* Used to track our page table allocation area. */ | |
32 | struct alloc_pgt_data { | |
33 | unsigned char *pgt_buf; | |
34 | unsigned long pgt_buf_size; | |
35 | unsigned long pgt_buf_offset; | |
36 | }; | |
37 | ||
38 | /* | |
39 | * Allocates space for a page table entry, using struct alloc_pgt_data | |
40 | * above. Besides the local callers, this is used as the allocation | |
41 | * callback in mapping_info below. | |
42 | */ | |
43 | static void *alloc_pgt_page(void *context) | |
44 | { | |
45 | struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; | |
46 | unsigned char *entry; | |
47 | ||
48 | /* Validate there is space available for a new page. */ | |
49 | if (pages->pgt_buf_offset >= pages->pgt_buf_size) { | |
50 | debug_putstr("out of pgt_buf in " __FILE__ "!?\n"); | |
51 | debug_putaddr(pages->pgt_buf_offset); | |
52 | debug_putaddr(pages->pgt_buf_size); | |
53 | return NULL; | |
54 | } | |
55 | ||
56 | entry = pages->pgt_buf + pages->pgt_buf_offset; | |
57 | pages->pgt_buf_offset += PAGE_SIZE; | |
58 | ||
59 | return entry; | |
60 | } | |
61 | ||
62 | /* Used to track our allocated page tables. */ | |
63 | static struct alloc_pgt_data pgt_data; | |
64 | ||
65 | /* The top level page table entry pointer. */ | |
66 | static unsigned long level4p; | |
67 | ||
11fdf97a KC |
68 | /* |
69 | * Mapping information structure passed to kernel_ident_mapping_init(). | |
70 | * Due to relocation, pointers must be assigned at run time not build time. | |
71 | */ | |
72 | static struct x86_mapping_info mapping_info = { | |
73 | .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, | |
74 | }; | |
75 | ||
3a94707d | 76 | /* Locates and clears a region for a new top level page table. */ |
11fdf97a | 77 | void initialize_identity_maps(void) |
3a94707d | 78 | { |
11fdf97a KC |
79 | /* Init mapping_info with run-time function/buffer pointers. */ |
80 | mapping_info.alloc_pgt_page = alloc_pgt_page; | |
81 | mapping_info.context = &pgt_data; | |
82 | ||
3a94707d KC |
83 | /* |
84 | * It should be impossible for this not to already be true, | |
85 | * but since calling this a second time would rewind the other | |
86 | * counters, let's just make sure this is reset too. | |
87 | */ | |
88 | pgt_data.pgt_buf_offset = 0; | |
89 | ||
90 | /* | |
91 | * If we came here via startup_32(), cr3 will be _pgtable already | |
92 | * and we must append to the existing area instead of entirely | |
93 | * overwriting it. | |
94 | */ | |
95 | level4p = read_cr3(); | |
96 | if (level4p == (unsigned long)_pgtable) { | |
97 | debug_putstr("booted via startup_32()\n"); | |
98 | pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; | |
99 | pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; | |
100 | memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); | |
101 | } else { | |
102 | debug_putstr("booted via startup_64()\n"); | |
103 | pgt_data.pgt_buf = _pgtable; | |
104 | pgt_data.pgt_buf_size = BOOT_PGT_SIZE; | |
105 | memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); | |
106 | level4p = (unsigned long)alloc_pgt_page(&pgt_data); | |
107 | } | |
108 | } | |
109 | ||
3a94707d KC |
110 | /* |
111 | * Adds the specified range to what will become the new identity mappings. | |
112 | * Once all ranges have been added, the new mapping is activated by calling | |
113 | * finalize_identity_maps() below. | |
114 | */ | |
115 | void add_identity_map(unsigned long start, unsigned long size) | |
116 | { | |
117 | unsigned long end = start + size; | |
118 | ||
3a94707d KC |
119 | /* Align boundary to 2M. */ |
120 | start = round_down(start, PMD_SIZE); | |
121 | end = round_up(end, PMD_SIZE); | |
122 | if (start >= end) | |
123 | return; | |
124 | ||
125 | /* Build the mapping. */ | |
126 | kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p, | |
127 | start, end); | |
128 | } | |
129 | ||
130 | /* | |
131 | * This switches the page tables to the new level4 that has been built | |
132 | * via calls to add_identity_map() above. If booted via startup_32(), | |
133 | * this is effectively a no-op. | |
134 | */ | |
135 | void finalize_identity_maps(void) | |
136 | { | |
137 | write_cr3(level4p); | |
138 | } |