Commit | Line | Data |
---|---|---|
257cb251 WD |
1 | /* |
2 | * AArch64 loadable module support. | |
3 | * | |
4 | * Copyright (C) 2012 ARM Limited | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | * | |
18 | * Author: Will Deacon <will.deacon@arm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/bitops.h> | |
22 | #include <linux/elf.h> | |
23 | #include <linux/gfp.h> | |
39d114dd | 24 | #include <linux/kasan.h> |
257cb251 WD |
25 | #include <linux/kernel.h> |
26 | #include <linux/mm.h> | |
27 | #include <linux/moduleloader.h> | |
28 | #include <linux/vmalloc.h> | |
2c2b282d | 29 | #include <asm/alternative.h> |
c84fced8 | 30 | #include <asm/insn.h> |
932ded4b | 31 | #include <asm/sections.h> |
c84fced8 | 32 | |
257cb251 WD |
33 | void *module_alloc(unsigned long size) |
34 | { | |
39d114dd AR |
35 | void *p; |
36 | ||
37 | p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, | |
38 | GFP_KERNEL, PAGE_KERNEL_EXEC, 0, | |
39 | NUMA_NO_NODE, __builtin_return_address(0)); | |
40 | ||
fd045f6c AB |
41 | if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && |
42 | !IS_ENABLED(CONFIG_KASAN)) | |
43 | /* | |
44 | * KASAN can only deal with module allocations being served | |
45 | * from the reserved module region, since the remainder of | |
46 | * the vmalloc region is already backed by zero shadow pages, | |
47 | * and punching holes into it is non-trivial. Since the module | |
48 | * region is not randomized when KASAN is enabled, it is even | |
49 | * less likely that the module region gets exhausted, so we | |
50 | * can simply omit this fallback in that case. | |
51 | */ | |
52 | p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, | |
53 | VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, | |
54 | NUMA_NO_NODE, __builtin_return_address(0)); | |
55 | ||
39d114dd AR |
56 | if (p && (kasan_module_alloc(p, size) < 0)) { |
57 | vfree(p); | |
58 | return NULL; | |
59 | } | |
60 | ||
61 | return p; | |
257cb251 WD |
62 | } |
63 | ||
64 | enum aarch64_reloc_op { | |
65 | RELOC_OP_NONE, | |
66 | RELOC_OP_ABS, | |
67 | RELOC_OP_PREL, | |
68 | RELOC_OP_PAGE, | |
69 | }; | |
70 | ||
71 | static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) | |
72 | { | |
73 | switch (reloc_op) { | |
74 | case RELOC_OP_ABS: | |
75 | return val; | |
76 | case RELOC_OP_PREL: | |
77 | return val - (u64)place; | |
78 | case RELOC_OP_PAGE: | |
79 | return (val & ~0xfff) - ((u64)place & ~0xfff); | |
80 | case RELOC_OP_NONE: | |
81 | return 0; | |
82 | } | |
83 | ||
84 | pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) | |
89 | { | |
257cb251 WD |
90 | s64 sval = do_reloc(op, place, val); |
91 | ||
92 | switch (len) { | |
93 | case 16: | |
94 | *(s16 *)place = sval; | |
f9308969 AB |
95 | if (sval < S16_MIN || sval > U16_MAX) |
96 | return -ERANGE; | |
257cb251 WD |
97 | break; |
98 | case 32: | |
99 | *(s32 *)place = sval; | |
f9308969 AB |
100 | if (sval < S32_MIN || sval > U32_MAX) |
101 | return -ERANGE; | |
257cb251 WD |
102 | break; |
103 | case 64: | |
104 | *(s64 *)place = sval; | |
105 | break; | |
106 | default: | |
107 | pr_err("Invalid length (%d) for data relocation\n", len); | |
108 | return 0; | |
109 | } | |
257cb251 WD |
110 | return 0; |
111 | } | |
112 | ||
b24a5575 AB |
113 | enum aarch64_insn_movw_imm_type { |
114 | AARCH64_INSN_IMM_MOVNZ, | |
115 | AARCH64_INSN_IMM_MOVKZ, | |
116 | }; | |
117 | ||
c84fced8 | 118 | static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, |
b24a5575 | 119 | int lsb, enum aarch64_insn_movw_imm_type imm_type) |
257cb251 | 120 | { |
b24a5575 | 121 | u64 imm; |
c84fced8 JL |
122 | s64 sval; |
123 | u32 insn = le32_to_cpu(*(u32 *)place); | |
257cb251 | 124 | |
c84fced8 | 125 | sval = do_reloc(op, place, val); |
b24a5575 | 126 | imm = sval >> lsb; |
122e2fa0 | 127 | |
c84fced8 | 128 | if (imm_type == AARCH64_INSN_IMM_MOVNZ) { |
257cb251 WD |
129 | /* |
130 | * For signed MOVW relocations, we have to manipulate the | |
131 | * instruction encoding depending on whether or not the | |
132 | * immediate is less than zero. | |
133 | */ | |
134 | insn &= ~(3 << 29); | |
b24a5575 | 135 | if (sval >= 0) { |
257cb251 WD |
136 | /* >=0: Set the instruction to MOVZ (opcode 10b). */ |
137 | insn |= 2 << 29; | |
138 | } else { | |
139 | /* | |
140 | * <0: Set the instruction to MOVN (opcode 00b). | |
141 | * Since we've masked the opcode already, we | |
142 | * don't need to do anything other than | |
143 | * inverting the new immediate field. | |
144 | */ | |
145 | imm = ~imm; | |
146 | } | |
257cb251 WD |
147 | } |
148 | ||
257cb251 | 149 | /* Update the instruction with the new encoding. */ |
b24a5575 | 150 | insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); |
c84fced8 | 151 | *(u32 *)place = cpu_to_le32(insn); |
257cb251 | 152 | |
b24a5575 | 153 | if (imm > U16_MAX) |
257cb251 WD |
154 | return -ERANGE; |
155 | ||
156 | return 0; | |
157 | } | |
158 | ||
159 | static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, | |
c84fced8 | 160 | int lsb, int len, enum aarch64_insn_imm_type imm_type) |
257cb251 WD |
161 | { |
162 | u64 imm, imm_mask; | |
163 | s64 sval; | |
c84fced8 | 164 | u32 insn = le32_to_cpu(*(u32 *)place); |
257cb251 WD |
165 | |
166 | /* Calculate the relocation value. */ | |
167 | sval = do_reloc(op, place, val); | |
168 | sval >>= lsb; | |
169 | ||
170 | /* Extract the value bits and shift them to bit 0. */ | |
171 | imm_mask = (BIT(lsb + len) - 1) >> lsb; | |
172 | imm = sval & imm_mask; | |
173 | ||
174 | /* Update the instruction's immediate field. */ | |
c84fced8 JL |
175 | insn = aarch64_insn_encode_immediate(imm_type, insn, imm); |
176 | *(u32 *)place = cpu_to_le32(insn); | |
257cb251 WD |
177 | |
178 | /* | |
179 | * Extract the upper value bits (including the sign bit) and | |
180 | * shift them to bit 0. | |
181 | */ | |
182 | sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); | |
183 | ||
184 | /* | |
185 | * Overflow has occurred if the upper bits are not all equal to | |
186 | * the sign bit of the value. | |
187 | */ | |
188 | if ((u64)(sval + 1) >= 2) | |
189 | return -ERANGE; | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
194 | int apply_relocate_add(Elf64_Shdr *sechdrs, | |
195 | const char *strtab, | |
196 | unsigned int symindex, | |
197 | unsigned int relsec, | |
198 | struct module *me) | |
199 | { | |
200 | unsigned int i; | |
201 | int ovf; | |
202 | bool overflow_check; | |
203 | Elf64_Sym *sym; | |
204 | void *loc; | |
205 | u64 val; | |
206 | Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; | |
207 | ||
208 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | |
209 | /* loc corresponds to P in the AArch64 ELF document. */ | |
210 | loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | |
211 | + rel[i].r_offset; | |
212 | ||
213 | /* sym is the ELF symbol we're referring to. */ | |
214 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr | |
215 | + ELF64_R_SYM(rel[i].r_info); | |
216 | ||
217 | /* val corresponds to (S + A) in the AArch64 ELF document. */ | |
218 | val = sym->st_value + rel[i].r_addend; | |
219 | ||
220 | /* Check for overflow by default. */ | |
221 | overflow_check = true; | |
222 | ||
223 | /* Perform the static relocation. */ | |
224 | switch (ELF64_R_TYPE(rel[i].r_info)) { | |
225 | /* Null relocations. */ | |
226 | case R_ARM_NONE: | |
227 | case R_AARCH64_NONE: | |
228 | ovf = 0; | |
229 | break; | |
230 | ||
231 | /* Data relocations. */ | |
232 | case R_AARCH64_ABS64: | |
233 | overflow_check = false; | |
234 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); | |
235 | break; | |
236 | case R_AARCH64_ABS32: | |
237 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); | |
238 | break; | |
239 | case R_AARCH64_ABS16: | |
240 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); | |
241 | break; | |
242 | case R_AARCH64_PREL64: | |
243 | overflow_check = false; | |
244 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); | |
245 | break; | |
246 | case R_AARCH64_PREL32: | |
247 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); | |
248 | break; | |
249 | case R_AARCH64_PREL16: | |
250 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); | |
251 | break; | |
252 | ||
253 | /* MOVW instruction relocations. */ | |
254 | case R_AARCH64_MOVW_UABS_G0_NC: | |
255 | overflow_check = false; | |
256 | case R_AARCH64_MOVW_UABS_G0: | |
257 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, | |
b24a5575 | 258 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
259 | break; |
260 | case R_AARCH64_MOVW_UABS_G1_NC: | |
261 | overflow_check = false; | |
262 | case R_AARCH64_MOVW_UABS_G1: | |
263 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, | |
b24a5575 | 264 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
265 | break; |
266 | case R_AARCH64_MOVW_UABS_G2_NC: | |
267 | overflow_check = false; | |
268 | case R_AARCH64_MOVW_UABS_G2: | |
269 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, | |
b24a5575 | 270 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
271 | break; |
272 | case R_AARCH64_MOVW_UABS_G3: | |
273 | /* We're using the top bits so we can't overflow. */ | |
274 | overflow_check = false; | |
275 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, | |
b24a5575 | 276 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
277 | break; |
278 | case R_AARCH64_MOVW_SABS_G0: | |
279 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, | |
c84fced8 | 280 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
281 | break; |
282 | case R_AARCH64_MOVW_SABS_G1: | |
283 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, | |
c84fced8 | 284 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
285 | break; |
286 | case R_AARCH64_MOVW_SABS_G2: | |
287 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, | |
c84fced8 | 288 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
289 | break; |
290 | case R_AARCH64_MOVW_PREL_G0_NC: | |
291 | overflow_check = false; | |
292 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, | |
b24a5575 | 293 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
294 | break; |
295 | case R_AARCH64_MOVW_PREL_G0: | |
296 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, | |
c84fced8 | 297 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
298 | break; |
299 | case R_AARCH64_MOVW_PREL_G1_NC: | |
300 | overflow_check = false; | |
301 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, | |
b24a5575 | 302 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
303 | break; |
304 | case R_AARCH64_MOVW_PREL_G1: | |
305 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, | |
c84fced8 | 306 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
307 | break; |
308 | case R_AARCH64_MOVW_PREL_G2_NC: | |
309 | overflow_check = false; | |
310 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, | |
b24a5575 | 311 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
312 | break; |
313 | case R_AARCH64_MOVW_PREL_G2: | |
314 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, | |
c84fced8 | 315 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
316 | break; |
317 | case R_AARCH64_MOVW_PREL_G3: | |
318 | /* We're using the top bits so we can't overflow. */ | |
319 | overflow_check = false; | |
320 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, | |
c84fced8 | 321 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
322 | break; |
323 | ||
324 | /* Immediate instruction relocations. */ | |
325 | case R_AARCH64_LD_PREL_LO19: | |
326 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, | |
c84fced8 | 327 | AARCH64_INSN_IMM_19); |
257cb251 WD |
328 | break; |
329 | case R_AARCH64_ADR_PREL_LO21: | |
330 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, | |
c84fced8 | 331 | AARCH64_INSN_IMM_ADR); |
257cb251 | 332 | break; |
df057cc7 | 333 | #ifndef CONFIG_ARM64_ERRATUM_843419 |
257cb251 WD |
334 | case R_AARCH64_ADR_PREL_PG_HI21_NC: |
335 | overflow_check = false; | |
336 | case R_AARCH64_ADR_PREL_PG_HI21: | |
337 | ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, | |
c84fced8 | 338 | AARCH64_INSN_IMM_ADR); |
257cb251 | 339 | break; |
df057cc7 | 340 | #endif |
257cb251 WD |
341 | case R_AARCH64_ADD_ABS_LO12_NC: |
342 | case R_AARCH64_LDST8_ABS_LO12_NC: | |
343 | overflow_check = false; | |
344 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, | |
c84fced8 | 345 | AARCH64_INSN_IMM_12); |
257cb251 WD |
346 | break; |
347 | case R_AARCH64_LDST16_ABS_LO12_NC: | |
348 | overflow_check = false; | |
349 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, | |
c84fced8 | 350 | AARCH64_INSN_IMM_12); |
257cb251 WD |
351 | break; |
352 | case R_AARCH64_LDST32_ABS_LO12_NC: | |
353 | overflow_check = false; | |
354 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, | |
c84fced8 | 355 | AARCH64_INSN_IMM_12); |
257cb251 WD |
356 | break; |
357 | case R_AARCH64_LDST64_ABS_LO12_NC: | |
358 | overflow_check = false; | |
359 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, | |
c84fced8 | 360 | AARCH64_INSN_IMM_12); |
257cb251 WD |
361 | break; |
362 | case R_AARCH64_LDST128_ABS_LO12_NC: | |
363 | overflow_check = false; | |
364 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, | |
c84fced8 | 365 | AARCH64_INSN_IMM_12); |
257cb251 WD |
366 | break; |
367 | case R_AARCH64_TSTBR14: | |
368 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, | |
c84fced8 | 369 | AARCH64_INSN_IMM_14); |
257cb251 WD |
370 | break; |
371 | case R_AARCH64_CONDBR19: | |
372 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, | |
c84fced8 | 373 | AARCH64_INSN_IMM_19); |
257cb251 WD |
374 | break; |
375 | case R_AARCH64_JUMP26: | |
376 | case R_AARCH64_CALL26: | |
377 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, | |
c84fced8 | 378 | AARCH64_INSN_IMM_26); |
fd045f6c AB |
379 | |
380 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && | |
381 | ovf == -ERANGE) { | |
382 | val = module_emit_plt_entry(me, &rel[i], sym); | |
383 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, | |
384 | 26, AARCH64_INSN_IMM_26); | |
385 | } | |
257cb251 WD |
386 | break; |
387 | ||
388 | default: | |
389 | pr_err("module %s: unsupported RELA relocation: %llu\n", | |
390 | me->name, ELF64_R_TYPE(rel[i].r_info)); | |
391 | return -ENOEXEC; | |
392 | } | |
393 | ||
394 | if (overflow_check && ovf == -ERANGE) | |
395 | goto overflow; | |
396 | ||
397 | } | |
398 | ||
399 | return 0; | |
400 | ||
401 | overflow: | |
402 | pr_err("module %s: overflow in relocation type %d val %Lx\n", | |
403 | me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); | |
404 | return -ENOEXEC; | |
405 | } | |
932ded4b AP |
406 | |
407 | int module_finalize(const Elf_Ehdr *hdr, | |
408 | const Elf_Shdr *sechdrs, | |
409 | struct module *me) | |
410 | { | |
411 | const Elf_Shdr *s, *se; | |
412 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | |
413 | ||
414 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { | |
415 | if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { | |
416 | apply_alternatives((void *)s->sh_addr, s->sh_size); | |
417 | return 0; | |
418 | } | |
419 | } | |
420 | ||
421 | return 0; | |
422 | } |