Commit | Line | Data |
---|---|---|
b11a64a4 JL |
1 | /* |
2 | * Copyright (C) 2013 Huawei Ltd. | |
3 | * Author: Jiang Liu <liuj97@gmail.com> | |
4 | * | |
617d2fbc ZSL |
5 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> |
6 | * | |
b11a64a4 JL |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
5c5bf25d | 19 | #include <linux/bitops.h> |
b11a64a4 JL |
20 | #include <linux/compiler.h> |
21 | #include <linux/kernel.h> | |
ae164807 JL |
22 | #include <linux/smp.h> |
23 | #include <linux/stop_machine.h> | |
24 | #include <linux/uaccess.h> | |
25 | #include <asm/cacheflush.h> | |
b11a64a4 JL |
26 | #include <asm/insn.h> |
27 | ||
617d2fbc ZSL |
28 | #define AARCH64_INSN_SF_BIT BIT(31) |
29 | ||
b11a64a4 JL |
30 | static int aarch64_insn_encoding_class[] = { |
31 | AARCH64_INSN_CLS_UNKNOWN, | |
32 | AARCH64_INSN_CLS_UNKNOWN, | |
33 | AARCH64_INSN_CLS_UNKNOWN, | |
34 | AARCH64_INSN_CLS_UNKNOWN, | |
35 | AARCH64_INSN_CLS_LDST, | |
36 | AARCH64_INSN_CLS_DP_REG, | |
37 | AARCH64_INSN_CLS_LDST, | |
38 | AARCH64_INSN_CLS_DP_FPSIMD, | |
39 | AARCH64_INSN_CLS_DP_IMM, | |
40 | AARCH64_INSN_CLS_DP_IMM, | |
41 | AARCH64_INSN_CLS_BR_SYS, | |
42 | AARCH64_INSN_CLS_BR_SYS, | |
43 | AARCH64_INSN_CLS_LDST, | |
44 | AARCH64_INSN_CLS_DP_REG, | |
45 | AARCH64_INSN_CLS_LDST, | |
46 | AARCH64_INSN_CLS_DP_FPSIMD, | |
47 | }; | |
48 | ||
49 | enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn) | |
50 | { | |
51 | return aarch64_insn_encoding_class[(insn >> 25) & 0xf]; | |
52 | } | |
53 | ||
54 | /* NOP is an alias of HINT */ | |
55 | bool __kprobes aarch64_insn_is_nop(u32 insn) | |
56 | { | |
57 | if (!aarch64_insn_is_hint(insn)) | |
58 | return false; | |
59 | ||
60 | switch (insn & 0xFE0) { | |
61 | case AARCH64_INSN_HINT_YIELD: | |
62 | case AARCH64_INSN_HINT_WFE: | |
63 | case AARCH64_INSN_HINT_WFI: | |
64 | case AARCH64_INSN_HINT_SEV: | |
65 | case AARCH64_INSN_HINT_SEVL: | |
66 | return false; | |
67 | default: | |
68 | return true; | |
69 | } | |
70 | } | |
71 | ||
ae164807 JL |
72 | /* |
73 | * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always | |
74 | * little-endian. | |
75 | */ | |
76 | int __kprobes aarch64_insn_read(void *addr, u32 *insnp) | |
77 | { | |
78 | int ret; | |
79 | u32 val; | |
80 | ||
81 | ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); | |
82 | if (!ret) | |
83 | *insnp = le32_to_cpu(val); | |
84 | ||
85 | return ret; | |
86 | } | |
87 | ||
88 | int __kprobes aarch64_insn_write(void *addr, u32 insn) | |
89 | { | |
90 | insn = cpu_to_le32(insn); | |
91 | return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); | |
92 | } | |
93 | ||
b11a64a4 JL |
94 | static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) |
95 | { | |
96 | if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) | |
97 | return false; | |
98 | ||
99 | return aarch64_insn_is_b(insn) || | |
100 | aarch64_insn_is_bl(insn) || | |
101 | aarch64_insn_is_svc(insn) || | |
102 | aarch64_insn_is_hvc(insn) || | |
103 | aarch64_insn_is_smc(insn) || | |
104 | aarch64_insn_is_brk(insn) || | |
105 | aarch64_insn_is_nop(insn); | |
106 | } | |
107 | ||
108 | /* | |
109 | * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a | |
110 | * Section B2.6.5 "Concurrent modification and execution of instructions": | |
111 | * Concurrent modification and execution of instructions can lead to the | |
112 | * resulting instruction performing any behavior that can be achieved by | |
113 | * executing any sequence of instructions that can be executed from the | |
114 | * same Exception level, except where the instruction before modification | |
115 | * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC, | |
116 | * or SMC instruction. | |
117 | */ | |
118 | bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn) | |
119 | { | |
120 | return __aarch64_insn_hotpatch_safe(old_insn) && | |
121 | __aarch64_insn_hotpatch_safe(new_insn); | |
122 | } | |
ae164807 JL |
123 | |
124 | int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) | |
125 | { | |
126 | u32 *tp = addr; | |
127 | int ret; | |
128 | ||
129 | /* A64 instructions must be word aligned */ | |
130 | if ((uintptr_t)tp & 0x3) | |
131 | return -EINVAL; | |
132 | ||
133 | ret = aarch64_insn_write(tp, insn); | |
134 | if (ret == 0) | |
135 | flush_icache_range((uintptr_t)tp, | |
136 | (uintptr_t)tp + AARCH64_INSN_SIZE); | |
137 | ||
138 | return ret; | |
139 | } | |
140 | ||
141 | struct aarch64_insn_patch { | |
142 | void **text_addrs; | |
143 | u32 *new_insns; | |
144 | int insn_cnt; | |
145 | atomic_t cpu_count; | |
146 | }; | |
147 | ||
148 | static int __kprobes aarch64_insn_patch_text_cb(void *arg) | |
149 | { | |
150 | int i, ret = 0; | |
151 | struct aarch64_insn_patch *pp = arg; | |
152 | ||
153 | /* The first CPU becomes master */ | |
154 | if (atomic_inc_return(&pp->cpu_count) == 1) { | |
155 | for (i = 0; ret == 0 && i < pp->insn_cnt; i++) | |
156 | ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], | |
157 | pp->new_insns[i]); | |
158 | /* | |
159 | * aarch64_insn_patch_text_nosync() calls flush_icache_range(), | |
160 | * which ends with "dsb; isb" pair guaranteeing global | |
161 | * visibility. | |
162 | */ | |
163 | atomic_set(&pp->cpu_count, -1); | |
164 | } else { | |
165 | while (atomic_read(&pp->cpu_count) != -1) | |
166 | cpu_relax(); | |
167 | isb(); | |
168 | } | |
169 | ||
170 | return ret; | |
171 | } | |
172 | ||
173 | int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) | |
174 | { | |
175 | struct aarch64_insn_patch patch = { | |
176 | .text_addrs = addrs, | |
177 | .new_insns = insns, | |
178 | .insn_cnt = cnt, | |
179 | .cpu_count = ATOMIC_INIT(0), | |
180 | }; | |
181 | ||
182 | if (cnt <= 0) | |
183 | return -EINVAL; | |
184 | ||
185 | return stop_machine(aarch64_insn_patch_text_cb, &patch, | |
186 | cpu_online_mask); | |
187 | } | |
188 | ||
189 | int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) | |
190 | { | |
191 | int ret; | |
192 | u32 insn; | |
193 | ||
194 | /* Unsafe to patch multiple instructions without synchronizaiton */ | |
195 | if (cnt == 1) { | |
196 | ret = aarch64_insn_read(addrs[0], &insn); | |
197 | if (ret) | |
198 | return ret; | |
199 | ||
200 | if (aarch64_insn_hotpatch_safe(insn, insns[0])) { | |
201 | /* | |
202 | * ARMv8 architecture doesn't guarantee all CPUs see | |
203 | * the new instruction after returning from function | |
204 | * aarch64_insn_patch_text_nosync(). So send IPIs to | |
205 | * all other CPUs to achieve instruction | |
206 | * synchronization. | |
207 | */ | |
208 | ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); | |
209 | kick_all_cpus_sync(); | |
210 | return ret; | |
211 | } | |
212 | } | |
213 | ||
214 | return aarch64_insn_patch_text_sync(addrs, insns, cnt); | |
215 | } | |
c84fced8 JL |
216 | |
217 | u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |
218 | u32 insn, u64 imm) | |
219 | { | |
220 | u32 immlo, immhi, lomask, himask, mask; | |
221 | int shift; | |
222 | ||
223 | switch (type) { | |
224 | case AARCH64_INSN_IMM_ADR: | |
225 | lomask = 0x3; | |
226 | himask = 0x7ffff; | |
227 | immlo = imm & lomask; | |
228 | imm >>= 2; | |
229 | immhi = imm & himask; | |
230 | imm = (immlo << 24) | (immhi); | |
231 | mask = (lomask << 24) | (himask); | |
232 | shift = 5; | |
233 | break; | |
234 | case AARCH64_INSN_IMM_26: | |
235 | mask = BIT(26) - 1; | |
236 | shift = 0; | |
237 | break; | |
238 | case AARCH64_INSN_IMM_19: | |
239 | mask = BIT(19) - 1; | |
240 | shift = 5; | |
241 | break; | |
242 | case AARCH64_INSN_IMM_16: | |
243 | mask = BIT(16) - 1; | |
244 | shift = 5; | |
245 | break; | |
246 | case AARCH64_INSN_IMM_14: | |
247 | mask = BIT(14) - 1; | |
248 | shift = 5; | |
249 | break; | |
250 | case AARCH64_INSN_IMM_12: | |
251 | mask = BIT(12) - 1; | |
252 | shift = 10; | |
253 | break; | |
254 | case AARCH64_INSN_IMM_9: | |
255 | mask = BIT(9) - 1; | |
256 | shift = 12; | |
257 | break; | |
258 | default: | |
259 | pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | |
260 | type); | |
261 | return 0; | |
262 | } | |
263 | ||
264 | /* Update the immediate field. */ | |
265 | insn &= ~(mask << shift); | |
266 | insn |= (imm & mask) << shift; | |
267 | ||
268 | return insn; | |
269 | } | |
5c5bf25d | 270 | |
617d2fbc ZSL |
271 | static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, |
272 | u32 insn, | |
273 | enum aarch64_insn_register reg) | |
274 | { | |
275 | int shift; | |
276 | ||
277 | if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { | |
278 | pr_err("%s: unknown register encoding %d\n", __func__, reg); | |
279 | return 0; | |
280 | } | |
281 | ||
282 | switch (type) { | |
283 | case AARCH64_INSN_REGTYPE_RT: | |
284 | shift = 0; | |
285 | break; | |
c0cafbae ZSL |
286 | case AARCH64_INSN_REGTYPE_RN: |
287 | shift = 5; | |
288 | break; | |
617d2fbc ZSL |
289 | default: |
290 | pr_err("%s: unknown register type encoding %d\n", __func__, | |
291 | type); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | insn &= ~(GENMASK(4, 0) << shift); | |
296 | insn |= reg << shift; | |
297 | ||
298 | return insn; | |
299 | } | |
300 | ||
301 | static inline long branch_imm_common(unsigned long pc, unsigned long addr, | |
302 | long range) | |
5c5bf25d | 303 | { |
5c5bf25d JL |
304 | long offset; |
305 | ||
306 | /* | |
307 | * PC: A 64-bit Program Counter holding the address of the current | |
308 | * instruction. A64 instructions must be word-aligned. | |
309 | */ | |
310 | BUG_ON((pc & 0x3) || (addr & 0x3)); | |
311 | ||
617d2fbc ZSL |
312 | offset = ((long)addr - (long)pc); |
313 | BUG_ON(offset < -range || offset >= range); | |
314 | ||
315 | return offset; | |
316 | } | |
317 | ||
318 | u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | |
319 | enum aarch64_insn_branch_type type) | |
320 | { | |
321 | u32 insn; | |
322 | long offset; | |
323 | ||
5c5bf25d JL |
324 | /* |
325 | * B/BL support [-128M, 128M) offset | |
326 | * ARM64 virtual address arrangement guarantees all kernel and module | |
327 | * texts are within +/-128M. | |
328 | */ | |
617d2fbc | 329 | offset = branch_imm_common(pc, addr, SZ_128M); |
5c5bf25d | 330 | |
c0cafbae ZSL |
331 | switch (type) { |
332 | case AARCH64_INSN_BRANCH_LINK: | |
5c5bf25d | 333 | insn = aarch64_insn_get_bl_value(); |
c0cafbae ZSL |
334 | break; |
335 | case AARCH64_INSN_BRANCH_NOLINK: | |
5c5bf25d | 336 | insn = aarch64_insn_get_b_value(); |
c0cafbae ZSL |
337 | break; |
338 | default: | |
339 | BUG_ON(1); | |
340 | } | |
5c5bf25d JL |
341 | |
342 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | |
343 | offset >> 2); | |
344 | } | |
345 | ||
617d2fbc ZSL |
346 | u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, |
347 | enum aarch64_insn_register reg, | |
348 | enum aarch64_insn_variant variant, | |
349 | enum aarch64_insn_branch_type type) | |
350 | { | |
351 | u32 insn; | |
352 | long offset; | |
353 | ||
354 | offset = branch_imm_common(pc, addr, SZ_1M); | |
355 | ||
356 | switch (type) { | |
357 | case AARCH64_INSN_BRANCH_COMP_ZERO: | |
358 | insn = aarch64_insn_get_cbz_value(); | |
359 | break; | |
360 | case AARCH64_INSN_BRANCH_COMP_NONZERO: | |
361 | insn = aarch64_insn_get_cbnz_value(); | |
362 | break; | |
363 | default: | |
364 | BUG_ON(1); | |
365 | } | |
366 | ||
367 | switch (variant) { | |
368 | case AARCH64_INSN_VARIANT_32BIT: | |
369 | break; | |
370 | case AARCH64_INSN_VARIANT_64BIT: | |
371 | insn |= AARCH64_INSN_SF_BIT; | |
372 | break; | |
373 | default: | |
374 | BUG_ON(1); | |
375 | } | |
376 | ||
377 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); | |
378 | ||
379 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | |
380 | offset >> 2); | |
381 | } | |
382 | ||
5c5bf25d JL |
383 | u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) |
384 | { | |
385 | return aarch64_insn_get_hint_value() | op; | |
386 | } | |
387 | ||
388 | u32 __kprobes aarch64_insn_gen_nop(void) | |
389 | { | |
390 | return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); | |
391 | } | |
c0cafbae ZSL |
392 | |
393 | u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, | |
394 | enum aarch64_insn_branch_type type) | |
395 | { | |
396 | u32 insn; | |
397 | ||
398 | switch (type) { | |
399 | case AARCH64_INSN_BRANCH_NOLINK: | |
400 | insn = aarch64_insn_get_br_value(); | |
401 | break; | |
402 | case AARCH64_INSN_BRANCH_LINK: | |
403 | insn = aarch64_insn_get_blr_value(); | |
404 | break; | |
405 | case AARCH64_INSN_BRANCH_RETURN: | |
406 | insn = aarch64_insn_get_ret_value(); | |
407 | break; | |
408 | default: | |
409 | BUG_ON(1); | |
410 | } | |
411 | ||
412 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); | |
413 | } |