Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * A code-rewriter that enables instruction single-stepping. | |
867e359b CM |
15 | */ |
16 | ||
2f9ac29e CM |
17 | #include <linux/smp.h> |
18 | #include <linux/ptrace.h> | |
867e359b CM |
19 | #include <linux/slab.h> |
20 | #include <linux/thread_info.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/mman.h> | |
23 | #include <linux/types.h> | |
0707ad30 | 24 | #include <linux/err.h> |
2f9ac29e | 25 | #include <linux/prctl.h> |
49e4e156 | 26 | #include <linux/context_tracking.h> |
867e359b | 27 | #include <asm/cacheflush.h> |
2f9ac29e CM |
28 | #include <asm/traps.h> |
29 | #include <asm/uaccess.h> | |
bd119c69 | 30 | #include <asm/unaligned.h> |
867e359b | 31 | #include <arch/abi.h> |
2f9ac29e | 32 | #include <arch/spr_def.h> |
eb7c792d | 33 | #include <arch/opcode.h> |
867e359b | 34 | |
867e359b | 35 | |
2f9ac29e | 36 | #ifndef __tilegx__ /* Hardware support for single step unavailable. */ |
867e359b | 37 | |
2f9ac29e CM |
38 | #define signExtend17(val) sign_extend((val), 17) |
39 | #define TILE_X1_MASK (0xffffffffULL << 31) | |
867e359b CM |
40 | |
41 | enum mem_op { | |
42 | MEMOP_NONE, | |
43 | MEMOP_LOAD, | |
44 | MEMOP_STORE, | |
45 | MEMOP_LOAD_POSTINCR, | |
46 | MEMOP_STORE_POSTINCR | |
47 | }; | |
48 | ||
2f9ac29e CM |
49 | static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n, |
50 | s32 offset) | |
867e359b | 51 | { |
2f9ac29e | 52 | tilepro_bundle_bits result; |
867e359b CM |
53 | |
54 | /* mask out the old offset */ | |
2f9ac29e | 55 | tilepro_bundle_bits mask = create_BrOff_X1(-1); |
867e359b CM |
56 | result = n & (~mask); |
57 | ||
58 | /* or in the new offset */ | |
59 | result |= create_BrOff_X1(offset); | |
60 | ||
61 | return result; | |
62 | } | |
63 | ||
2f9ac29e CM |
64 | static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest, |
65 | int src) | |
867e359b | 66 | { |
2f9ac29e CM |
67 | tilepro_bundle_bits result; |
68 | tilepro_bundle_bits op; | |
867e359b CM |
69 | |
70 | result = n & (~TILE_X1_MASK); | |
71 | ||
72 | op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | | |
73 | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | | |
74 | create_Dest_X1(dest) | | |
75 | create_SrcB_X1(TREG_ZERO) | | |
76 | create_SrcA_X1(src) ; | |
77 | ||
78 | result |= op; | |
79 | return result; | |
80 | } | |
81 | ||
2f9ac29e | 82 | static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n) |
867e359b CM |
83 | { |
84 | return move_X1(n, TREG_ZERO, TREG_ZERO); | |
85 | } | |
86 | ||
2f9ac29e CM |
87 | static inline tilepro_bundle_bits addi_X1( |
88 | tilepro_bundle_bits n, int dest, int src, int imm) | |
867e359b CM |
89 | { |
90 | n &= ~TILE_X1_MASK; | |
91 | ||
92 | n |= (create_SrcA_X1(src) | | |
93 | create_Dest_X1(dest) | | |
94 | create_Imm8_X1(imm) | | |
95 | create_S_X1(0) | | |
96 | create_Opcode_X1(IMM_0_OPCODE_X1) | | |
97 | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); | |
98 | ||
99 | return n; | |
100 | } | |
101 | ||
2f9ac29e | 102 | static tilepro_bundle_bits rewrite_load_store_unaligned( |
867e359b | 103 | struct single_step_state *state, |
2f9ac29e | 104 | tilepro_bundle_bits bundle, |
867e359b CM |
105 | struct pt_regs *regs, |
106 | enum mem_op mem_op, | |
107 | int size, int sign_ext) | |
108 | { | |
0707ad30 | 109 | unsigned char __user *addr; |
867e359b | 110 | int val_reg, addr_reg, err, val; |
2f9ac29e CM |
111 | int align_ctl; |
112 | ||
113 | align_ctl = unaligned_fixup; | |
114 | switch (task_thread_info(current)->align_ctl) { | |
115 | case PR_UNALIGN_NOPRINT: | |
116 | align_ctl = 1; | |
117 | break; | |
118 | case PR_UNALIGN_SIGBUS: | |
119 | align_ctl = 0; | |
120 | break; | |
121 | } | |
867e359b CM |
122 | |
123 | /* Get address and value registers */ | |
eb7c792d | 124 | if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { |
867e359b CM |
125 | addr_reg = get_SrcA_Y2(bundle); |
126 | val_reg = get_SrcBDest_Y2(bundle); | |
127 | } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | |
128 | addr_reg = get_SrcA_X1(bundle); | |
129 | val_reg = get_Dest_X1(bundle); | |
130 | } else { | |
131 | addr_reg = get_SrcA_X1(bundle); | |
132 | val_reg = get_SrcB_X1(bundle); | |
133 | } | |
134 | ||
135 | /* | |
136 | * If registers are not GPRs, don't try to handle it. | |
137 | * | |
138 | * FIXME: we could handle non-GPR loads by getting the real value | |
139 | * from memory, writing it to the single step buffer, using a | |
140 | * temp_reg to hold a pointer to that memory, then executing that | |
141 | * instruction and resetting temp_reg. For non-GPR stores, it's a | |
142 | * little trickier; we could use the single step buffer for that | |
143 | * too, but we'd have to add some more state bits so that we could | |
144 | * call back in here to copy that value to the real target. For | |
145 | * now, we just handle the simple case. | |
146 | */ | |
147 | if ((val_reg >= PTREGS_NR_GPRS && | |
148 | (val_reg != TREG_ZERO || | |
149 | mem_op == MEMOP_LOAD || | |
150 | mem_op == MEMOP_LOAD_POSTINCR)) || | |
151 | addr_reg >= PTREGS_NR_GPRS) | |
152 | return bundle; | |
153 | ||
154 | /* If it's aligned, don't handle it specially */ | |
0707ad30 | 155 | addr = (void __user *)regs->regs[addr_reg]; |
867e359b CM |
156 | if (((unsigned long)addr % size) == 0) |
157 | return bundle; | |
158 | ||
cdd8e16f CM |
159 | /* |
160 | * Return SIGBUS with the unaligned address, if requested. | |
161 | * Note that we return SIGBUS even for completely invalid addresses | |
162 | * as long as they are in fact unaligned; this matches what the | |
163 | * tilepro hardware would be doing, if it could provide us with the | |
164 | * actual bad address in an SPR, which it doesn't. | |
165 | */ | |
2f9ac29e | 166 | if (align_ctl == 0) { |
cdd8e16f CM |
167 | siginfo_t info = { |
168 | .si_signo = SIGBUS, | |
169 | .si_code = BUS_ADRALN, | |
170 | .si_addr = addr | |
171 | }; | |
172 | trace_unhandled_signal("unaligned trap", regs, | |
173 | (unsigned long)addr, SIGBUS); | |
174 | force_sig_info(info.si_signo, &info, current); | |
175 | return (tilepro_bundle_bits) 0; | |
176 | } | |
177 | ||
867e359b CM |
178 | /* Handle unaligned load/store */ |
179 | if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | |
180 | unsigned short val_16; | |
181 | switch (size) { | |
182 | case 2: | |
183 | err = copy_from_user(&val_16, addr, sizeof(val_16)); | |
184 | val = sign_ext ? ((short)val_16) : val_16; | |
185 | break; | |
186 | case 4: | |
187 | err = copy_from_user(&val, addr, sizeof(val)); | |
188 | break; | |
189 | default: | |
190 | BUG(); | |
191 | } | |
192 | if (err == 0) { | |
193 | state->update_reg = val_reg; | |
194 | state->update_value = val; | |
195 | state->update = 1; | |
196 | } | |
197 | } else { | |
1efea40d | 198 | unsigned short val_16; |
867e359b | 199 | val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; |
1efea40d CM |
200 | switch (size) { |
201 | case 2: | |
202 | val_16 = val; | |
203 | err = copy_to_user(addr, &val_16, sizeof(val_16)); | |
204 | break; | |
205 | case 4: | |
206 | err = copy_to_user(addr, &val, sizeof(val)); | |
207 | break; | |
208 | default: | |
209 | BUG(); | |
210 | } | |
867e359b CM |
211 | } |
212 | ||
213 | if (err) { | |
214 | siginfo_t info = { | |
2f9ac29e CM |
215 | .si_signo = SIGBUS, |
216 | .si_code = BUS_ADRALN, | |
0707ad30 | 217 | .si_addr = addr |
867e359b | 218 | }; |
2f9ac29e CM |
219 | trace_unhandled_signal("bad address for unaligned fixup", regs, |
220 | (unsigned long)addr, SIGBUS); | |
867e359b | 221 | force_sig_info(info.si_signo, &info, current); |
2f9ac29e | 222 | return (tilepro_bundle_bits) 0; |
867e359b CM |
223 | } |
224 | ||
867e359b | 225 | if (unaligned_printk || unaligned_fixup_count == 0) { |
f4743673 | 226 | pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n", |
0707ad30 | 227 | current->pid, current->comm, regs->pc, |
f4743673 | 228 | mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ? |
0707ad30 CM |
229 | "load" : "store", |
230 | (unsigned long)addr); | |
867e359b | 231 | if (!unaligned_printk) { |
0707ad30 CM |
232 | #define P pr_info |
233 | P("\n"); | |
234 | P("Unaligned fixups in the kernel will slow your application considerably.\n"); | |
235 | P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"); | |
236 | P("which requests the kernel show all unaligned fixups, or write a \"0\"\n"); | |
237 | P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"); | |
238 | P("access will become a SIGBUS you can debug. No further warnings will be\n"); | |
239 | P("shown so as to avoid additional slowdown, but you can track the number\n"); | |
240 | P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"); | |
241 | P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"); | |
242 | P("\n"); | |
243 | #undef P | |
867e359b CM |
244 | } |
245 | } | |
246 | ++unaligned_fixup_count; | |
247 | ||
eb7c792d | 248 | if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { |
867e359b CM |
249 | /* Convert the Y2 instruction to a prefetch. */ |
250 | bundle &= ~(create_SrcBDest_Y2(-1) | | |
251 | create_Opcode_Y2(-1)); | |
252 | bundle |= (create_SrcBDest_Y2(TREG_ZERO) | | |
253 | create_Opcode_Y2(LW_OPCODE_Y2)); | |
254 | /* Replace the load postincr with an addi */ | |
255 | } else if (mem_op == MEMOP_LOAD_POSTINCR) { | |
256 | bundle = addi_X1(bundle, addr_reg, addr_reg, | |
257 | get_Imm8_X1(bundle)); | |
258 | /* Replace the store postincr with an addi */ | |
259 | } else if (mem_op == MEMOP_STORE_POSTINCR) { | |
260 | bundle = addi_X1(bundle, addr_reg, addr_reg, | |
261 | get_Dest_Imm8_X1(bundle)); | |
262 | } else { | |
263 | /* Convert the X1 instruction to a nop. */ | |
264 | bundle &= ~(create_Opcode_X1(-1) | | |
265 | create_UnShOpcodeExtension_X1(-1) | | |
266 | create_UnOpcodeExtension_X1(-1)); | |
267 | bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | | |
268 | create_UnShOpcodeExtension_X1( | |
269 | UN_0_SHUN_0_OPCODE_X1) | | |
270 | create_UnOpcodeExtension_X1( | |
271 | NOP_UN_0_SHUN_0_OPCODE_X1)); | |
272 | } | |
273 | ||
274 | return bundle; | |
275 | } | |
276 | ||
04f7a3f1 CM |
277 | /* |
278 | * Called after execve() has started the new image. This allows us | |
279 | * to reset the info state. Note that the the mmap'ed memory, if there | |
280 | * was any, has already been unmapped by the exec. | |
281 | */ | |
282 | void single_step_execve(void) | |
283 | { | |
284 | struct thread_info *ti = current_thread_info(); | |
285 | kfree(ti->step_state); | |
286 | ti->step_state = NULL; | |
287 | } | |
288 | ||
2f9ac29e | 289 | /* |
867e359b CM |
290 | * single_step_once() - entry point when single stepping has been triggered. |
291 | * @regs: The machine register state | |
292 | * | |
293 | * When we arrive at this routine via a trampoline, the single step | |
294 | * engine copies the executing bundle to the single step buffer. | |
295 | * If the instruction is a condition branch, then the target is | |
296 | * reset to one past the next instruction. If the instruction | |
297 | * sets the lr, then that is noted. If the instruction is a jump | |
298 | * or call, then the new target pc is preserved and the current | |
299 | * bundle instruction set to null. | |
300 | * | |
301 | * The necessary post-single-step rewriting information is stored in | |
302 | * single_step_state-> We use data segment values because the | |
303 | * stack will be rewound when we run the rewritten single-stepped | |
304 | * instruction. | |
305 | */ | |
306 | void single_step_once(struct pt_regs *regs) | |
307 | { | |
2f9ac29e CM |
308 | extern tilepro_bundle_bits __single_step_ill_insn; |
309 | extern tilepro_bundle_bits __single_step_j_insn; | |
310 | extern tilepro_bundle_bits __single_step_addli_insn; | |
311 | extern tilepro_bundle_bits __single_step_auli_insn; | |
867e359b CM |
312 | struct thread_info *info = (void *)current_thread_info(); |
313 | struct single_step_state *state = info->step_state; | |
314 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | |
2f9ac29e CM |
315 | tilepro_bundle_bits __user *buffer, *pc; |
316 | tilepro_bundle_bits bundle; | |
867e359b CM |
317 | int temp_reg; |
318 | int target_reg = TREG_LR; | |
319 | int err; | |
320 | enum mem_op mem_op = MEMOP_NONE; | |
321 | int size = 0, sign_ext = 0; /* happy compiler */ | |
2f9ac29e CM |
322 | int align_ctl; |
323 | ||
324 | align_ctl = unaligned_fixup; | |
325 | switch (task_thread_info(current)->align_ctl) { | |
326 | case PR_UNALIGN_NOPRINT: | |
327 | align_ctl = 1; | |
328 | break; | |
329 | case PR_UNALIGN_SIGBUS: | |
330 | align_ctl = 0; | |
331 | break; | |
332 | } | |
867e359b CM |
333 | |
334 | asm( | |
335 | " .pushsection .rodata.single_step\n" | |
336 | " .align 8\n" | |
337 | " .globl __single_step_ill_insn\n" | |
338 | "__single_step_ill_insn:\n" | |
339 | " ill\n" | |
340 | " .globl __single_step_addli_insn\n" | |
341 | "__single_step_addli_insn:\n" | |
342 | " { nop; addli r0, zero, 0 }\n" | |
343 | " .globl __single_step_auli_insn\n" | |
344 | "__single_step_auli_insn:\n" | |
345 | " { nop; auli r0, r0, 0 }\n" | |
346 | " .globl __single_step_j_insn\n" | |
347 | "__single_step_j_insn:\n" | |
348 | " j .\n" | |
349 | " .popsection\n" | |
350 | ); | |
351 | ||
313ce674 CM |
352 | /* |
353 | * Enable interrupts here to allow touching userspace and the like. | |
354 | * The callers expect this: do_trap() already has interrupts | |
355 | * enabled, and do_work_pending() handles functions that enable | |
356 | * interrupts internally. | |
357 | */ | |
358 | local_irq_enable(); | |
359 | ||
867e359b CM |
360 | if (state == NULL) { |
361 | /* allocate a page of writable, executable memory */ | |
362 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | |
363 | if (state == NULL) { | |
0707ad30 | 364 | pr_err("Out of kernel memory trying to single-step\n"); |
867e359b CM |
365 | return; |
366 | } | |
367 | ||
368 | /* allocate a cache line of writable, executable memory */ | |
6be5ceb0 | 369 | buffer = (void __user *) vm_mmap(NULL, 0, 64, |
867e359b CM |
370 | PROT_EXEC | PROT_READ | PROT_WRITE, |
371 | MAP_PRIVATE | MAP_ANONYMOUS, | |
372 | 0); | |
867e359b | 373 | |
0707ad30 | 374 | if (IS_ERR((void __force *)buffer)) { |
867e359b | 375 | kfree(state); |
0707ad30 | 376 | pr_err("Out of kernel pages trying to single-step\n"); |
867e359b CM |
377 | return; |
378 | } | |
379 | ||
380 | state->buffer = buffer; | |
381 | state->is_enabled = 0; | |
382 | ||
383 | info->step_state = state; | |
384 | ||
385 | /* Validate our stored instruction patterns */ | |
386 | BUG_ON(get_Opcode_X1(__single_step_addli_insn) != | |
387 | ADDLI_OPCODE_X1); | |
388 | BUG_ON(get_Opcode_X1(__single_step_auli_insn) != | |
389 | AULI_OPCODE_X1); | |
390 | BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); | |
391 | BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); | |
392 | BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); | |
393 | } | |
394 | ||
395 | /* | |
396 | * If we are returning from a syscall, we still haven't hit the | |
397 | * "ill" for the swint1 instruction. So back the PC up to be | |
398 | * pointing at the swint1, but we'll actually return directly | |
399 | * back to the "ill" so we come back in via SIGILL as if we | |
400 | * had "executed" the swint1 without ever being in kernel space. | |
401 | */ | |
402 | if (regs->faultnum == INT_SWINT_1) | |
403 | regs->pc -= 8; | |
404 | ||
2f9ac29e | 405 | pc = (tilepro_bundle_bits __user *)(regs->pc); |
0707ad30 CM |
406 | if (get_user(bundle, pc) != 0) { |
407 | pr_err("Couldn't read instruction at %p trying to step\n", pc); | |
408 | return; | |
409 | } | |
867e359b CM |
410 | |
411 | /* We'll follow the instruction with 2 ill op bundles */ | |
0707ad30 | 412 | state->orig_pc = (unsigned long)pc; |
867e359b CM |
413 | state->next_pc = (unsigned long)(pc + 1); |
414 | state->branch_next_pc = 0; | |
415 | state->update = 0; | |
416 | ||
eb7c792d | 417 | if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) { |
867e359b CM |
418 | /* two wide, check for control flow */ |
419 | int opcode = get_Opcode_X1(bundle); | |
420 | ||
421 | switch (opcode) { | |
422 | /* branches */ | |
423 | case BRANCH_OPCODE_X1: | |
424 | { | |
04f7a3f1 | 425 | s32 offset = signExtend17(get_BrOff_X1(bundle)); |
867e359b CM |
426 | |
427 | /* | |
428 | * For branches, we use a rewriting trick to let the | |
429 | * hardware evaluate whether the branch is taken or | |
430 | * untaken. We record the target offset and then | |
431 | * rewrite the branch instruction to target 1 insn | |
432 | * ahead if the branch is taken. We then follow the | |
433 | * rewritten branch with two bundles, each containing | |
434 | * an "ill" instruction. The supervisor examines the | |
435 | * pc after the single step code is executed, and if | |
436 | * the pc is the first ill instruction, then the | |
437 | * branch (if any) was not taken. If the pc is the | |
438 | * second ill instruction, then the branch was | |
439 | * taken. The new pc is computed for these cases, and | |
440 | * inserted into the registers for the thread. If | |
441 | * the pc is the start of the single step code, then | |
442 | * an exception or interrupt was taken before the | |
443 | * code started processing, and the same "original" | |
444 | * pc is restored. This change, different from the | |
445 | * original implementation, has the advantage of | |
446 | * executing a single user instruction. | |
447 | */ | |
448 | state->branch_next_pc = (unsigned long)(pc + offset); | |
449 | ||
450 | /* rewrite branch offset to go forward one bundle */ | |
451 | bundle = set_BrOff_X1(bundle, 2); | |
452 | } | |
453 | break; | |
454 | ||
455 | /* jumps */ | |
456 | case JALB_OPCODE_X1: | |
457 | case JALF_OPCODE_X1: | |
458 | state->update = 1; | |
459 | state->next_pc = | |
460 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | |
461 | break; | |
462 | ||
463 | case JB_OPCODE_X1: | |
464 | case JF_OPCODE_X1: | |
465 | state->next_pc = | |
466 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | |
467 | bundle = nop_X1(bundle); | |
468 | break; | |
469 | ||
470 | case SPECIAL_0_OPCODE_X1: | |
471 | switch (get_RRROpcodeExtension_X1(bundle)) { | |
472 | /* jump-register */ | |
473 | case JALRP_SPECIAL_0_OPCODE_X1: | |
474 | case JALR_SPECIAL_0_OPCODE_X1: | |
475 | state->update = 1; | |
476 | state->next_pc = | |
477 | regs->regs[get_SrcA_X1(bundle)]; | |
478 | break; | |
479 | ||
480 | case JRP_SPECIAL_0_OPCODE_X1: | |
481 | case JR_SPECIAL_0_OPCODE_X1: | |
482 | state->next_pc = | |
483 | regs->regs[get_SrcA_X1(bundle)]; | |
484 | bundle = nop_X1(bundle); | |
485 | break; | |
486 | ||
487 | case LNK_SPECIAL_0_OPCODE_X1: | |
488 | state->update = 1; | |
489 | target_reg = get_Dest_X1(bundle); | |
490 | break; | |
491 | ||
492 | /* stores */ | |
493 | case SH_SPECIAL_0_OPCODE_X1: | |
494 | mem_op = MEMOP_STORE; | |
495 | size = 2; | |
496 | break; | |
497 | ||
498 | case SW_SPECIAL_0_OPCODE_X1: | |
499 | mem_op = MEMOP_STORE; | |
500 | size = 4; | |
501 | break; | |
502 | } | |
503 | break; | |
504 | ||
505 | /* loads and iret */ | |
506 | case SHUN_0_OPCODE_X1: | |
507 | if (get_UnShOpcodeExtension_X1(bundle) == | |
508 | UN_0_SHUN_0_OPCODE_X1) { | |
509 | switch (get_UnOpcodeExtension_X1(bundle)) { | |
510 | case LH_UN_0_SHUN_0_OPCODE_X1: | |
511 | mem_op = MEMOP_LOAD; | |
512 | size = 2; | |
513 | sign_ext = 1; | |
514 | break; | |
515 | ||
516 | case LH_U_UN_0_SHUN_0_OPCODE_X1: | |
517 | mem_op = MEMOP_LOAD; | |
518 | size = 2; | |
519 | sign_ext = 0; | |
520 | break; | |
521 | ||
522 | case LW_UN_0_SHUN_0_OPCODE_X1: | |
523 | mem_op = MEMOP_LOAD; | |
524 | size = 4; | |
525 | break; | |
526 | ||
527 | case IRET_UN_0_SHUN_0_OPCODE_X1: | |
528 | { | |
529 | unsigned long ex0_0 = __insn_mfspr( | |
530 | SPR_EX_CONTEXT_0_0); | |
531 | unsigned long ex0_1 = __insn_mfspr( | |
532 | SPR_EX_CONTEXT_0_1); | |
533 | /* | |
534 | * Special-case it if we're iret'ing | |
535 | * to PL0 again. Otherwise just let | |
536 | * it run and it will generate SIGILL. | |
537 | */ | |
538 | if (EX1_PL(ex0_1) == USER_PL) { | |
539 | state->next_pc = ex0_0; | |
540 | regs->ex1 = ex0_1; | |
541 | bundle = nop_X1(bundle); | |
542 | } | |
543 | } | |
544 | } | |
545 | } | |
546 | break; | |
547 | ||
867e359b CM |
548 | /* postincrement operations */ |
549 | case IMM_0_OPCODE_X1: | |
550 | switch (get_ImmOpcodeExtension_X1(bundle)) { | |
551 | case LWADD_IMM_0_OPCODE_X1: | |
552 | mem_op = MEMOP_LOAD_POSTINCR; | |
553 | size = 4; | |
554 | break; | |
555 | ||
556 | case LHADD_IMM_0_OPCODE_X1: | |
557 | mem_op = MEMOP_LOAD_POSTINCR; | |
558 | size = 2; | |
559 | sign_ext = 1; | |
560 | break; | |
561 | ||
562 | case LHADD_U_IMM_0_OPCODE_X1: | |
563 | mem_op = MEMOP_LOAD_POSTINCR; | |
564 | size = 2; | |
565 | sign_ext = 0; | |
566 | break; | |
567 | ||
568 | case SWADD_IMM_0_OPCODE_X1: | |
569 | mem_op = MEMOP_STORE_POSTINCR; | |
570 | size = 4; | |
571 | break; | |
572 | ||
573 | case SHADD_IMM_0_OPCODE_X1: | |
574 | mem_op = MEMOP_STORE_POSTINCR; | |
575 | size = 2; | |
576 | break; | |
577 | ||
578 | default: | |
579 | break; | |
580 | } | |
581 | break; | |
867e359b CM |
582 | } |
583 | ||
584 | if (state->update) { | |
585 | /* | |
586 | * Get an available register. We start with a | |
587 | * bitmask with 1's for available registers. | |
588 | * We truncate to the low 32 registers since | |
589 | * we are guaranteed to have set bits in the | |
590 | * low 32 bits, then use ctz to pick the first. | |
591 | */ | |
592 | u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | | |
593 | (1ULL << get_SrcA_X0(bundle)) | | |
594 | (1ULL << get_SrcB_X0(bundle)) | | |
595 | (1ULL << target_reg)); | |
596 | temp_reg = __builtin_ctz(mask); | |
597 | state->update_reg = temp_reg; | |
598 | state->update_value = regs->regs[temp_reg]; | |
599 | regs->regs[temp_reg] = (unsigned long) (pc+1); | |
600 | regs->flags |= PT_FLAGS_RESTORE_REGS; | |
601 | bundle = move_X1(bundle, target_reg, temp_reg); | |
602 | } | |
603 | } else { | |
604 | int opcode = get_Opcode_Y2(bundle); | |
605 | ||
606 | switch (opcode) { | |
607 | /* loads */ | |
608 | case LH_OPCODE_Y2: | |
609 | mem_op = MEMOP_LOAD; | |
610 | size = 2; | |
611 | sign_ext = 1; | |
612 | break; | |
613 | ||
614 | case LH_U_OPCODE_Y2: | |
615 | mem_op = MEMOP_LOAD; | |
616 | size = 2; | |
617 | sign_ext = 0; | |
618 | break; | |
619 | ||
620 | case LW_OPCODE_Y2: | |
621 | mem_op = MEMOP_LOAD; | |
622 | size = 4; | |
623 | break; | |
624 | ||
625 | /* stores */ | |
626 | case SH_OPCODE_Y2: | |
627 | mem_op = MEMOP_STORE; | |
628 | size = 2; | |
629 | break; | |
630 | ||
631 | case SW_OPCODE_Y2: | |
632 | mem_op = MEMOP_STORE; | |
633 | size = 4; | |
634 | break; | |
635 | } | |
636 | } | |
637 | ||
638 | /* | |
639 | * Check if we need to rewrite an unaligned load/store. | |
2f9ac29e | 640 | * Returning zero is a special value meaning we generated a signal. |
867e359b | 641 | */ |
2f9ac29e | 642 | if (mem_op != MEMOP_NONE && align_ctl >= 0) { |
867e359b CM |
643 | bundle = rewrite_load_store_unaligned(state, bundle, regs, |
644 | mem_op, size, sign_ext); | |
645 | if (bundle == 0) | |
646 | return; | |
647 | } | |
648 | ||
649 | /* write the bundle to our execution area */ | |
650 | buffer = state->buffer; | |
651 | err = __put_user(bundle, buffer++); | |
652 | ||
653 | /* | |
654 | * If we're really single-stepping, we take an INT_ILL after. | |
655 | * If we're just handling an unaligned access, we can just | |
656 | * jump directly back to where we were in user code. | |
657 | */ | |
658 | if (is_single_step) { | |
659 | err |= __put_user(__single_step_ill_insn, buffer++); | |
660 | err |= __put_user(__single_step_ill_insn, buffer++); | |
661 | } else { | |
662 | long delta; | |
663 | ||
664 | if (state->update) { | |
665 | /* We have some state to update; do it inline */ | |
666 | int ha16; | |
667 | bundle = __single_step_addli_insn; | |
668 | bundle |= create_Dest_X1(state->update_reg); | |
669 | bundle |= create_Imm16_X1(state->update_value); | |
670 | err |= __put_user(bundle, buffer++); | |
671 | bundle = __single_step_auli_insn; | |
672 | bundle |= create_Dest_X1(state->update_reg); | |
673 | bundle |= create_SrcA_X1(state->update_reg); | |
674 | ha16 = (state->update_value + 0x8000) >> 16; | |
675 | bundle |= create_Imm16_X1(ha16); | |
676 | err |= __put_user(bundle, buffer++); | |
677 | state->update = 0; | |
678 | } | |
679 | ||
680 | /* End with a jump back to the next instruction */ | |
2f9ac29e | 681 | delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) - |
867e359b | 682 | (unsigned long)buffer) >> |
2f9ac29e | 683 | TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; |
867e359b CM |
684 | bundle = __single_step_j_insn; |
685 | bundle |= create_JOffLong_X1(delta); | |
686 | err |= __put_user(bundle, buffer++); | |
687 | } | |
688 | ||
689 | if (err) { | |
0707ad30 | 690 | pr_err("Fault when writing to single-step buffer\n"); |
867e359b CM |
691 | return; |
692 | } | |
693 | ||
694 | /* | |
695 | * Flush the buffer. | |
696 | * We do a local flush only, since this is a thread-specific buffer. | |
697 | */ | |
0707ad30 CM |
698 | __flush_icache_range((unsigned long)state->buffer, |
699 | (unsigned long)buffer); | |
867e359b CM |
700 | |
701 | /* Indicate enabled */ | |
702 | state->is_enabled = is_single_step; | |
0707ad30 | 703 | regs->pc = (unsigned long)state->buffer; |
867e359b CM |
704 | |
705 | /* Fault immediately if we are coming back from a syscall. */ | |
706 | if (regs->faultnum == INT_SWINT_1) | |
707 | regs->pc += 8; | |
708 | } | |
709 | ||
233325b9 | 710 | #else |
233325b9 CM |
711 | |
712 | static DEFINE_PER_CPU(unsigned long, ss_saved_pc); | |
713 | ||
714 | ||
715 | /* | |
716 | * Called directly on the occasion of an interrupt. | |
717 | * | |
718 | * If the process doesn't have single step set, then we use this as an | |
719 | * opportunity to turn single step off. | |
720 | * | |
721 | * It has been mentioned that we could conditionally turn off single stepping | |
722 | * on each entry into the kernel and rely on single_step_once to turn it | |
723 | * on for the processes that matter (as we already do), but this | |
724 | * implementation is somewhat more efficient in that we muck with registers | |
725 | * once on a bum interrupt rather than on every entry into the kernel. | |
726 | * | |
727 | * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred, | |
728 | * so we have to run through this process again before we can say that an | |
729 | * instruction has executed. | |
730 | * | |
731 | * swint will set CANCELED, but it's a legitimate instruction. Fortunately | |
732 | * it changes the PC. If it hasn't changed, then we know that the interrupt | |
733 | * wasn't generated by swint and we'll need to run this process again before | |
734 | * we can say an instruction has executed. | |
735 | * | |
736 | * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get | |
737 | * on with our lives. | |
738 | */ | |
739 | ||
740 | void gx_singlestep_handle(struct pt_regs *regs, int fault_num) | |
741 | { | |
49e4e156 | 742 | enum ctx_state prev_state = exception_enter(); |
b4f50191 | 743 | unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc); |
233325b9 CM |
744 | struct thread_info *info = (void *)current_thread_info(); |
745 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | |
746 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | |
747 | ||
748 | if (is_single_step == 0) { | |
749 | __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0); | |
750 | ||
751 | } else if ((*ss_pc != regs->pc) || | |
752 | (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) { | |
753 | ||
233325b9 CM |
754 | control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; |
755 | control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; | |
756 | __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); | |
2f9ac29e | 757 | send_sigtrap(current, regs); |
233325b9 | 758 | } |
49e4e156 | 759 | exception_exit(prev_state); |
233325b9 CM |
760 | } |
761 | ||
762 | ||
763 | /* | |
764 | * Called from need_singlestep. Set up the control registers and the enable | |
765 | * register, then return back. | |
766 | */ | |
767 | ||
768 | void single_step_once(struct pt_regs *regs) | |
769 | { | |
b4f50191 | 770 | unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc); |
233325b9 CM |
771 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); |
772 | ||
773 | *ss_pc = regs->pc; | |
774 | control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; | |
775 | control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; | |
776 | __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); | |
777 | __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL); | |
778 | } | |
779 | ||
04f7a3f1 CM |
780 | void single_step_execve(void) |
781 | { | |
782 | /* Nothing */ | |
783 | } | |
784 | ||
867e359b | 785 | #endif /* !__tilegx__ */ |